diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000000..4a26e5f0233 Binary files /dev/null and b/.DS_Store differ diff --git a/.gitignore b/.gitignore index 4e6417a74ef..1bc0ae472a4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,14 +8,10 @@ *.etcd *.log *.swp -/etcd /hack/insta-discovery/.env *.coverprofile *.test hack/tls-setup/certs -.idea -*.iml -/contrib/mixin/manifests /contrib/raftexample/raftexample /contrib/raftexample/raftexample-* /vendor @@ -23,15 +19,5 @@ hack/tls-setup/certs *.tmp *.bak .gobincache/ -.DS_Store -/Documentation/dev-guide/api_reference_v3.md -/Documentation/dev-guide/api_concurrency_reference_v3.md - -/tools/etcd-dump-db/etcd-dump-db -/tools/etcd-dump-logs/etcd-dump-logs -/tools/etcd-dump-metrics/etcd-dump-metrics -/tools/local-tester/bridge/bridge -/tools/proto-annotations/proto-annotations -/tools/benchmark/benchmark -/out -/etcd-dump-logs +default.etcd +raftexample/db/* diff --git a/.golangci.yaml b/.golangci.yaml deleted file mode 100644 index d169aa4e7fa..00000000000 --- a/.golangci.yaml +++ /dev/null @@ -1,41 +0,0 @@ -run: - timeout: 30m - skip-files: - - "^zz_generated.*" - -issues: - max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - # exclude ineffassing linter for generated files for conversion - - path: conversion\.go - linters: - - ineffassign - -linters: - disable-all: true - enable: # please keep this alphabetized - # Don't use soon to deprecated[1] linters that lead to false - # https://github.com/golangci/golangci-lint/issues/1841 - # - deadcode - # - structcheck - # - varcheck - - goimports - - ineffassign - - revive - - staticcheck - - stylecheck - - unused - - unconvert # Remove unnecessary type conversions - -linters-settings: # please keep this alphabetized - goimports: - local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages. - staticcheck: - checks: - - "all" - - "-SA1019" # TODO(fix) Using a deprecated function, variable, constant or field - - "-SA2002" # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isn’t allowed - stylecheck: - checks: - - "ST1019" # Importing the same package multiple times. diff --git a/.header b/.header deleted file mode 100644 index 0446af6d877..00000000000 --- a/.header +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. diff --git a/ADOPTERS.md b/ADOPTERS.md deleted file mode 100644 index c6c294637d3..00000000000 --- a/ADOPTERS.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: Production users ---- - -This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on how etcd is working in the field and update this list. - -## All Kubernetes Users - -- *Application*: https://kubernetes.io/ -- *Environments*: AWS, OpenStack, Azure, Google Cloud, Huawei Cloud, Bare Metal, etc - -**This is a meta user; please feel free to document specific Kubernetes clusters!** - -All Kubernetes clusters use etcd as their primary data store. This means etcd's users include such companies as [Niantic, Inc Pokemon Go](https://cloudplatform.googleblog.com/2016/09/bringing-Pokemon-GO-to-life-on-Google-Cloud.html), [Box](https://blog.box.com/blog/kubernetes-box-microservices-maximum-velocity/), [CoreOS](https://coreos.com/tectonic), [Ticketmaster](https://www.youtube.com/watch?v=wqXVKneP0Hg), [Salesforce](https://www.salesforce.com) and many many more. - -## discovery.etcd.io - -- *Application*: https://github.com/coreos/discovery.etcd.io -- *Launched*: Feb. 2014 -- *Cluster Size*: 5 members, 5 discovery proxies -- *Order of Data Size*: 100s of Megabytes -- *Operator*: CoreOS, brandon.philips@coreos.com -- *Environment*: AWS -- *Backups*: Periodic async to S3 - -discovery.etcd.io is the longest continuously running etcd backed service that we know about. It is the basis of automatic cluster bootstrap and was launched in Feb. 2014: https://coreos.com/blog/etcd-0.3.0-released/. - -## OpenTable - -- *Application*: OpenTable internal service discovery and cluster configuration management -- *Launched*: May 2014 -- *Cluster Size*: 3 members each in 6 independent clusters; approximately 50 nodes reading / writing -- *Order of Data Size*: 10s of MB -- *Operator*: OpenTable, Inc; sschlansker@opentable.com -- *Environment*: AWS, VMWare -- *Backups*: None, all data can be re-created if necessary. - -## cycoresys.com - -- *Application*: multiple -- *Launched*: Jul. 2014 -- *Cluster Size*: 3 members, _n_ proxies -- *Order of Data Size*: 100s of kilobytes -- *Operator*: CyCore Systems, Inc, sys@cycoresys.com -- *Environment*: Baremetal -- *Backups*: Periodic sync to Ceph RadosGW and DigitalOcean VM - -CyCore Systems provides architecture and engineering for computing systems. This cluster provides microservices, virtual machines, databases, storage clusters to a number of clients. It is built on CoreOS machines, with each machine in the cluster running etcd as a peer or proxy. - -## Radius Intelligence - -- *Application*: multiple internal tools, Kubernetes clusters, bootstrappable system configs -- *Launched*: June 2015 -- *Cluster Size*: 2 clusters of 5 and 3 members; approximately a dozen nodes read/write -- *Order of Data Size*: 100s of kilobytes -- *Operator*: Radius Intelligence; jcderr@radius.com -- *Environment*: AWS, CoreOS, Kubernetes -- *Backups*: None, all data can be recreated if necessary. - -Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys. - -## Vonage - -- *Application*: kubernetes, vault backend, system configuration for microservices, scheduling, locks (future - service discovery) -- *Launched*: August 2015 -- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up) -- *Order of Data Size*: kilobytes -- *Operator*: Vonage [devAdmin][raoofm] -- *Environment*: VMWare, AWS -- *Backups*: Daily snapshots on VMs. Backups done for upgrades. - -## PD - -- *Application*: embed etcd -- *Launched*: Mar 2016 -- *Cluster Size*: 3 or 5 members -- *Order of Data Size*: megabytes -- *Operator*: PingCAP, Inc. -- *Environment*: Bare Metal, AWS, etc. -- *Backups*: None. - -PD(Placement Driver) is the central controller in the TiDB cluster. It saves the cluster meta information, schedule the data, allocate the global unique timestamp for the distributed transaction, etc. It embeds etcd to supply high availability and auto failover. - -## Huawei - -- *Application*: System configuration for overlay network (Canal) -- *Launched*: June 2016 -- *Cluster Size*: 3 members for each cluster -- *Order of Data Size*: kilobytes -- *Operator*: Huawei Euler Department -- *Environment*: [Huawei Cloud](http://www.hwclouds.com/product/cce.html) -- *Backups*: None, all data can be recreated if necessary. - -[teamcity]: https://www.jetbrains.com/teamcity/ -[raoofm]:https://github.com/raoofm - -## Qiniu Cloud - -- *Application*: system configuration for microservices, distributed locks -- *Launched*: Jan. 2016 -- *Cluster Size*: 3 members each with several clusters -- *Order of Data Size*: kilobytes -- *Operator*: Pandora, chenchao@qiniu.com -- *Environment*: Baremetal -- *Backups*: None, all data can be recreated if necessary - -## QingCloud - -- *Application*: [QingCloud][qingcloud] appcenter cluster for service discovery as [metad][metad] backend. -- *Launched*: December 2016 -- *Cluster Size*: 1 cluster of 3 members per user. -- *Order of Data Size*: kilobytes -- *Operator*: [yunify][yunify] -- *Environment*: QingCloud IaaS -- *Backups*: None, all data can be recreated if necessary. - -[metad]:https://github.com/yunify/metad -[yunify]:https://github.com/yunify -[qingcloud]:https://qingcloud.com/ - - -## Yandex - -- *Application*: system configuration for services, service discovery -- *Launched*: March 2016 -- *Cluster Size*: 3 clusters of 5 members -- *Order of Data Size*: several gigabytes -- *Operator*: Yandex; [nekto0n][nekto0n] -- *Environment*: Bare Metal -- *Backups*: None - -[nekto0n]:https://github.com/nekto0n - -## Tencent Games - -- *Application*: Meta data and configuration data for service discovery, Kubernetes, etc. -- *Launched*: Jan. 2015 -- *Cluster Size*: 3 members each with 10s of clusters -- *Order of Data Size*: 10s of Megabytes -- *Operator*: Tencent Game Operations Department -- *Environment*: Baremetal -- *Backups*: Periodic sync to backup server - -In Tencent games, we use Docker and Kubernetes to deploy and run our applications, and use etcd to save meta data for service discovery, Kubernetes, etc. - -## Hyper.sh - -- *Application*: Kubernetes, distributed locks, etc. -- *Launched*: April 2016 -- *Cluster Size*: 1 cluster of 3 members -- *Order of Data Size*: 10s of MB -- *Operator*: Hyper.sh -- *Environment*: Baremetal -- *Backups*: None, all data can be recreated if necessary. - -In [hyper.sh][hyper.sh], the container service is backed by [hypernetes][hypernetes], a multi-tenant kubernetes distro. Moreover, we use etcd to coordinate the multiple manage services and store global meta data. - -[hypernetes]:https://github.com/hyperhq/hypernetes -[Hyper.sh]:https://www.hyper.sh - -## Meitu -- *Application*: system configuration for services, service discovery, kubernetes in test environment -- *Launched*: October 2015 -- *Cluster Size*: 1 cluster of 3 members -- *Order of Data Size*: megabytes -- *Operator*: Meitu, hxj@meitu.com, [shafreeck][shafreeck] -- *Environment*: Bare Metal -- *Backups*: None, all data can be recreated if necessary. - -[shafreeck]:https://github.com/shafreeck - -## Grab -- *Application*: system configuration for services, service discovery -- *Launched*: June 2016 -- *Cluster Size*: 1 cluster of 7 members -- *Order of Data Size*: megabytes -- *Operator*: Grab, [taxitan][taxitan], [reterVision][reterVision] -- *Environment*: AWS -- *Backups*: None, all data can be recreated if necessary. - -[taxitan]:https://github.com/taxitan -[reterVision]:https://github.com/reterVision - -## DaoCloud.io - -- *Application*: container management -- *Launched*: Sep. 2015 -- *Cluster Size*: 1000+ deployments, each deployment contains a 3 node cluster. -- *Order of Data Size*: 100s of Megabytes -- *Operator*: daocloud.io -- *Environment*: Baremetal and virtual machines -- *Backups*: None, all data can be recreated if necessary. - -In [DaoCloud][DaoCloud], we use Docker and Swarm to deploy and run our applications, and we use etcd to save metadata for service discovery. - -[DaoCloud]:https://www.daocloud.io - -## Branch.io - -- *Application*: Kubernetes -- *Launched*: April 2016 -- *Cluster Size*: Multiple clusters, multiple sizes -- *Order of Data Size*: 100s of Megabytes -- *Operator*: branch.io -- *Environment*: AWS, Kubernetes -- *Backups*: EBS volume backups - -At [Branch][branch], we use kubernetes heavily as our core microservice platform for staging and production. - -[branch]: https://branch.io - -## Baidu Waimai - -- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems -- *Launched*: April. 2016 -- *Cluster Size*: 3 clusters of 5 members -- *Order of Data Size*: several gigabytes -- *Operator*: Baidu Waimai Operations Department -- *Environment*: CentOS 6.5 -- *Backups*: backup scripts - -## Salesforce.com - -- *Application*: Kubernetes -- *Launched*: Jan 2017 -- *Cluster Size*: Multiple clusters of 3 members -- *Order of Data Size*: 100s of Megabytes -- *Operator*: Salesforce.com (krmayankk@github) -- *Environment*: BareMetal -- *Backups*: None, all data can be recreated - -## Hosted Graphite - -- *Application*: Service discovery, locking, ephemeral application data -- *Launched*: January 2017 -- *Cluster Size*: 2 clusters of 7 members -- *Order of Data Size*: Megabytes -- *Operator*: Hosted Graphite (sre@hostedgraphite.com) -- *Environment*: Bare Metal -- *Backups*: None, all data is considered ephemeral. - -## Transwarp - -- *Application*: Transwarp Data Cloud, Transwarp Operating System, Transwarp Data Hub, Sophon -- *Launched*: January 2016 -- *Cluster Size*: Multiple clusters, multiple sizes -- *Order of Data Size*: Megabytes -- *Operator*: Trasnwarp Operating System -- *Environment*: Bare Metal, Container -- *Backups*: backup scripts diff --git a/CHANGELOG/CHANGELOG-2.3.md b/CHANGELOG/CHANGELOG-2.3.md deleted file mode 100644 index 0b54062b1d8..00000000000 --- a/CHANGELOG/CHANGELOG-2.3.md +++ /dev/null @@ -1,16 +0,0 @@ - - -
- - -## [v2.3.8](https://github.com/etcd-io/etcd/releases/tag/v2.3.8) (2017-02-17) - -See [code changes](https://github.com/etcd-io/etcd/compare/v2.3.7...v2.3.8). - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.0.md b/CHANGELOG/CHANGELOG-3.0.md deleted file mode 100644 index bc11c80a5f0..00000000000 --- a/CHANGELOG/CHANGELOG-3.0.md +++ /dev/null @@ -1,291 +0,0 @@ - - -
- - -## [v3.0.16](https://github.com/etcd-io/etcd/releases/tag/v3.0.16) (2016-11-13) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.15...v3.0.16) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.4*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.15](https://github.com/etcd-io/etcd/releases/tag/v3.0.15) (2016-11-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.14...v3.0.15) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Fixed - -- Fix cancel watch request with wrong range end. - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.14](https://github.com/etcd-io/etcd/releases/tag/v3.0.14) (2016-11-04) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.13...v3.0.14) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Added - -- v3 `etcdctl migrate` command now supports `--no-ttl` flag to discard keys on transform. - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.13](https://github.com/etcd-io/etcd/releases/tag/v3.0.13) (2016-10-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.12...v3.0.13) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.12](https://github.com/etcd-io/etcd/releases/tag/v3.0.12) (2016-10-07) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.11...v3.0.12) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.11](https://github.com/etcd-io/etcd/releases/tag/v3.0.11) (2016-10-07) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.10...v3.0.11) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Added - -- Server returns previous key-value (optional) - - `clientv3.WithPrevKV` option - - v3 etcdctl `put,watch,del --prev-kv` flag - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.10](https://github.com/etcd-io/etcd/releases/tag/v3.0.10) (2016-09-23) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.9...v3.0.10) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.9](https://github.com/etcd-io/etcd/releases/tag/v3.0.9) (2016-09-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.8...v3.0.9) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Added - -- Warn on domain names on listen URLs (v3.2 will reject domain names). - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.8](https://github.com/etcd-io/etcd/releases/tag/v3.0.8) (2016-09-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.7...v3.0.8) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Other - -- Allow only IP addresses in listen URLs (domain names are rejected). - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.7](https://github.com/etcd-io/etcd/releases/tag/v3.0.7) (2016-08-31) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.6...v3.0.7) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Other - -- SRV records only allow A records (RFC 2052). - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.6](https://github.com/etcd-io/etcd/releases/tag/v3.0.6) (2016-08-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.5...v3.0.6) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.5](https://github.com/etcd-io/etcd/releases/tag/v3.0.5) (2016-08-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.4...v3.0.5) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Other - -- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given. - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.4](https://github.com/etcd-io/etcd/releases/tag/v3.0.4) (2016-07-27) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.3...v3.0.4) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Added - -- v2 `etcdctl ls` command now supports `--output=json`. -- Add /var/lib/etcd directory to etcd official Docker image. - -### Other - -- v2 auth can now use common name from TLS certificate when `--client-cert-auth` is enabled. - -### Go - -- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.3](https://github.com/etcd-io/etcd/releases/tag/v3.0.3) (2016-07-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.2...v3.0.3) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Other - -- Revert Dockerfile to use `CMD`, instead of `ENTRYPOINT`, to support `etcdctl` run. - - Docker commands for v3.0.2 won't work without specifying executable binary paths. -- v3 etcdctl default endpoints are now `127.0.0.1:2379`. - -### Go - -- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.2](https://github.com/etcd-io/etcd/releases/tag/v3.0.2) (2016-07-08) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.1...v3.0.2) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Other - -- Dockerfile uses `ENTRYPOINT`, instead of `CMD`, to run etcd without binary path specified. - -### Go - -- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.1](https://github.com/etcd-io/etcd/releases/tag/v3.0.1) (2016-07-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.0...v3.0.1) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6). - - -
- - -## [v3.0.0](https://github.com/etcd-io/etcd/releases/tag/v3.0.0) (2016-06-30) - -See [code changes](https://github.com/etcd-io/etcd/compare/v2.3.0...v3.0.0) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).** - -### Go - -- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.1.md b/CHANGELOG/CHANGELOG-3.1.md deleted file mode 100644 index 0c97517a7e2..00000000000 --- a/CHANGELOG/CHANGELOG-3.1.md +++ /dev/null @@ -1,574 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.0](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.0.md). - -
- -## [v3.1.21](https://github.com/etcd-io/etcd/releases/tag/v3.1.21) (2019-TBD) - -### etcdctl v3 - -- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2 -- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540). - - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532). - - The command output is changed. Previously, if endpoint is unreachable, the command output is - "\ is unhealthy: failed to connect: \". This change unified the error message, all error types - now have the same output "\ is unhealthy: failed to commit proposal: \". - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646). - -
- -## [v3.1.20](https://github.com/etcd-io/etcd/releases/tag/v3.1.20) (2018-10-10) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.19...v3.1.20) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Improved - -- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed. -- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network. -- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information. -- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats. - - Previously, it only samples the TCP connection for snapshot messages. -- Display all registered [gRPC metrics at start](https://github.com/etcd-io/etcd/pull/10034). -- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric. -- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric. - -### client v3 - -- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.19](https://github.com/etcd-io/etcd/releases/tag/v3.1.19) (2018-07-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.18...v3.1.19) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Improved - -- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric. -- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric. -- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. -- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric. - - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819). -- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. - -### client v3 - -- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952). - - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.18](https://github.com/etcd-io/etcd/releases/tag/v3.1.18) (2018-06-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.17...v3.1.18) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric. - - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.17](https://github.com/etcd-io/etcd/releases/tag/v3.1.17) (2018-06-06) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.16...v3.1.17) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Fix [v3 snapshot recovery](https://github.com/etcd-io/etcd/issues/7628). - - A follower receives a leader snapshot to be persisted as a `[SNAPSHOT-INDEX].snap.db` file on disk. - - Now, server [ensures that the incoming snapshot be persisted on disk before loading it](https://github.com/etcd-io/etcd/pull/7876). - - Otherwise, index mismatch happens and triggers server-side panic (e.g. newer WAL entry with outdated snapshot index). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.16](https://github.com/etcd-io/etcd/releases/tag/v3.1.16) (2018-05-31) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.15...v3.1.16) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775). - - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation. - - Now, this server-side panic has been fixed. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.15](https://github.com/etcd-io/etcd/releases/tag/v3.1.15) (2018-05-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.14...v3.1.15) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Purge old [`*.snap.db` snapshot files](https://github.com/etcd-io/etcd/pull/7967). - - Previously, etcd did not respect `--max-snapshots` flag to purge old `*.snap.db` files. - - Now, etcd purges old `*.snap.db` files to keep maximum `--max-snapshots` number of files on disk. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.14](https://github.com/etcd-io/etcd/releases/tag/v3.1.14) (2018-04-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.13...v3.1.14) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric. - -### etcd server - -- Add [`--initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward. - - By default, `--initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger. - - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election. - - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout. - - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities. - - Now, this can be disabled by setting `--initial-election-tick-advance=false`. - - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `--initial-election-tick-advance` at the cost of slow initial bootstrap. - - If single-node, it advances ticks regardless. - - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.13](https://github.com/etcd-io/etcd/releases/tag/v3.1.13) (2018-03-29) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.12...v3.1.13) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Improved - -- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333). - - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node. - - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.12](https://github.com/etcd-io/etcd/releases/tag/v3.1.12) (2018-03-08) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.11...v3.1.12) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9297). - - "unsynced" watcher is watcher that needs to be in sync with events that have happened. - - That is, "unsynced" watcher is the slow watcher that was requested on old revision. - - "unsynced" watcher restore operation was not correctly populating its underlying watcher group. - - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086). - - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.11](https://github.com/etcd-io/etcd/releases/tag/v3.1.11) (2017-11-28) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.10...v3.1.11) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- [#8411](https://github.com/etcd-io/etcd/issues/8411),[#8806](https://github.com/etcd-io/etcd/pull/8806) backport "mvcc: sending events after restore" -- [#8009](https://github.com/etcd-io/etcd/issues/8009),[#8902](https://github.com/etcd-io/etcd/pull/8902) backport coreos/bbolt v1.3.1-coreos.5 - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.1.10](https://github.com/etcd-io/etcd/releases/tag/v3.1.10) (2017-07-14) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.9...v3.1.10) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Added - -- Tag docker images with minor versions. - - e.g. `docker pull quay.io/coreos/etcd:v3.1` to fetch latest v3.1 versions. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - Fix panic on `net/http.CloseNotify` - - -
- - -## [v3.1.9](https://github.com/etcd-io/etcd/releases/tag/v3.1.9) (2017-06-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.8...v3.1.9) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Allow v2 snapshot over 512MB. - -### Go - -- Compile with [*Go 1.7.6*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.8](https://github.com/etcd-io/etcd/releases/tag/v3.1.8) (2017-05-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.7...v3.1.8) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.7](https://github.com/etcd-io/etcd/releases/tag/v3.1.7) (2017-04-28) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.6...v3.1.7) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.6](https://github.com/etcd-io/etcd/releases/tag/v3.1.6) (2017-04-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.5...v3.1.6) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Fill in Auth API response header. -- Remove auth check in Status API. - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.5](https://github.com/etcd-io/etcd/releases/tag/v3.1.5) (2017-03-27) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.4...v3.1.5) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd server - -- Fix raft memory leak issue. -- Fix Windows file path issues. - -### Other - -- Add `/etc/nsswitch.conf` file to alpine-based Docker image. - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.4](https://github.com/etcd-io/etcd/releases/tag/v3.1.4) (2017-03-22) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.3...v3.1.4) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.3](https://github.com/etcd-io/etcd/releases/tag/v3.1.3) (2017-03-10) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.2...v3.1.3) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd gateway - -- Fix `etcd gateway` schema handling in DNS discovery. -- Fix sd_notify behaviors in `gateway`, `grpc-proxy`. - -### gRPC Proxy - -- Fix sd_notify behaviors in `gateway`, `grpc-proxy`. - -### Other - -- Use machine default host when advertise URLs are default values(`localhost:2379,2380`) AND if listen URL is `0.0.0.0`. - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.2](https://github.com/etcd-io/etcd/releases/tag/v3.1.2) (2017-02-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.1...v3.1.2) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### etcd gateway - -- Fix `etcd gateway` with multiple endpoints. - -### Other - -- Use IPv4 default host, by default (when IPv4 and IPv6 are available). - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.1](https://github.com/etcd-io/etcd/releases/tag/v3.1.1) (2017-02-17) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.0...v3.1.1) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Go - -- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7). - - -
- - -## [v3.1.0](https://github.com/etcd-io/etcd/releases/tag/v3.1.0) (2017-01-20) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.0...v3.1.0) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).** - -### Improved - -- Faster linearizable reads (implements Raft [read-index](https://github.com/etcd-io/etcd/pull/6212)). -- v3 authentication API is now stable. - -### Breaking Changes - -- Deprecated following gRPC metrics in favor of [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). - - `etcd_grpc_requests_total` - - `etcd_grpc_requests_failed_total` - - `etcd_grpc_active_streams` - - `etcd_grpc_unary_requests_duration_seconds` - -### Dependency - -- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`ugorji/go@9c7f9b7`**](https://github.com/ugorji/go/commit/9c7f9b7a2bc3a520f7c7b30b34b7f85f47fe27b6), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/6945). - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given. - - `TLSConfig.ServerName` is ignored with user-provided certificates for backwards compatibility; to be deprecated. - - For example, `etcd --discovery-srv=example.com` will only authenticate peers/clients when the provided certs have root domain `example.com` as an entry in Subject Alternative Name (SAN) field. - -### etcd server - -- Automatic leadership transfer when leader steps down. -- etcd flags - - `--strict-reconfig-check` flag is set by default. - - Add `--log-output` flag. - - Add `--metrics` flag. -- etcd uses default route IP if advertise URL is not given. -- Cluster rejects removing members if quorum will be lost. -- Discovery now has upper limit for waiting on retries. -- Warn on binding listeners through domain names; to be deprecated. -- v3.0 and v3.1 with `--auto-compaction-retention=10` run periodic compaction on v3 key-value store for every 10-hour. - - Compactor only supports periodic compaction. - - Compactor records latest revisions every 5-minute, until it reaches the first compaction period (e.g. 10-hour). - - In order to retain key-value history of last compaction period, it uses the last revision that was fetched before compaction period, from the revision records that were collected every 5-minute. - - When `--auto-compaction-retention=10`, compactor uses revision 100 for compact revision where revision 100 is the latest revision fetched from 10 hours ago. - - If compaction succeeds or requested revision has already been compacted, it resets period timer and starts over with new historical revision records (e.g. restart revision collect and compact for the next 10-hour period). - - If compaction fails, it retries in 5 minutes. - -### client v3 - -- Add `SetEndpoints` method; update endpoints at runtime. -- Add `Sync` method; auto-update endpoints at runtime. -- Add `Lease TimeToLive` API; fetch lease information. -- replace Config.Logger field with global logger. -- Get API responses are sorted in ascending order by default. - -### etcdctl v3 - -- Add `lease timetolive` command. -- Add `--print-value-only` flag to get command. -- Add `--dest-prefix` flag to make-mirror command. -- `get` command responses are sorted in ascending order by default. - -### gRPC Proxy - -- Experimental gRPC proxy feature. - -### Other - -- `recipes` now conform to sessions defined in `clientv3/concurrency`. -- ACI has symlinks to `/usr/local/bin/etcd*`. - -### Go - -- Compile with [*Go 1.7.4*](https://golang.org/doc/devel/release.html#go1.7). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.2.md b/CHANGELOG/CHANGELOG-3.2.md deleted file mode 100644 index 095ff6e9f2a..00000000000 --- a/CHANGELOG/CHANGELOG-3.2.md +++ /dev/null @@ -1,1021 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.1](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.1.md). - - -## v3.2.33 (TBD) - -
- -## [v3.2.32](https://github.com/etcd-io/etcd/releases/tag/v3.2.32) (2021-03-28) -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.31...v3.2.32) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -### Package `wal` -- add wal slice bound check to make sure entry index is not greater than the number of entries -- check slice size in decodeRecord -- fix panic when decoder not set - -### Package `fileutil` -- fix constant for linux locking - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.2.31](https://github.com/etcd-io/etcd/releases/tag/v3.2.31) (2020-08-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.30...v3.2.31) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -### auth, etcdserver - -- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986). -- [attaching a fake root token when calling `LeaseRevoke`](https://github.com/etcd-io/etcd/pull/11691). - - fix a data corruption bug caused by lease expiration when authentication is enabled and upgrading cluster from etcd-3.2 to etcd-3.3 - -### Package `runtime` - -- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214). - -### Metrics, Monitoring - -- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.2.30](https://github.com/etcd-io/etcd/releases/tag/v3.2.30) (2020-04-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.29...v3.2.30) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -### Package `wal` - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Metrics, Monitoring - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.2.29](https://github.com/etcd-io/etcd/releases/tag/v3.2.29) (2020-03-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.28...v3.2.29) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -### etcd server - -- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613). -- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704). - -### client v3 - -- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687). - - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.28](https://github.com/etcd-io/etcd/releases/tag/v3.2.28) (2019-11-10) - -### Improved - -- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/11195). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11271) Prometheus metric. - -### etcdserver - -- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308). - - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup. - - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.27](https://github.com/etcd-io/etcd/releases/tag/v3.2.27) (2019-09-17) - -### etcdctl v3 - -- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2 -- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540). - - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532). - - The command output is changed. Previously, if endpoint is unreachable, the command output is - "\ is unhealthy: failed to connect: \". This change unified the error message, all error types - now have the same output "\ is unhealthy: failed to commit proposal: \". -- Fix [`etcdctl snapshot status` to not modify snapshot file](https://github.com/etcd-io/etcd/pull/11157). - - For example, start etcd `v3.3.10` - - Write some data - - Use etcdctl `v3.3.10` to save snapshot - - Somehow, upgrading Kubernetes fails, thus rolling back to previous version etcd `v3.2.24` - - Run etcdctl `v3.2.24` `snapshot status` against the snapshot file saved from `v3.3.10` server - - Run etcdctl `v3.2.24` `snapshot restore` fails with `"expected sha256 [12..."` - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646). -- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. -- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.26](https://github.com/etcd-io/etcd/releases/tag/v3.2.26) (2019-01-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.25...v3.2.26) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### gRPC Proxy - -- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327). - -### Security, Authentication - -- Disable [CommonName authentication for gRPC-gateway](https://github.com/etcd-io/etcd/pull/10366) gRPC-gateway proxy requests to etcd server use the etcd client server TLS certificate. If that certificate contains CommonName we do not want to use that for authentication as it could lead to permission escalation. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.25](https://github.com/etcd-io/etcd/releases/tag/v3.2.25) (2018-10-10) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.24...v3.2.25) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed. -- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network. -- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information. -- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats. - - Previously, it only samples the TCP connection for snapshot messages. -- Display all registered [gRPC metrics at start](https://github.com/etcd-io/etcd/pull/10032). -- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric. -- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric. - -### client v3 - -- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.24](https://github.com/etcd-io/etcd/releases/tag/v3.2.24) (2018-07-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.23...v3.2.24) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric. -- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric. -- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric. -- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric. -- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric. -- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric. -- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. -- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric. - - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819). -- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_server_quota_backend_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. - -### gRPC Proxy - -- Add [flags for specifying TLS for connecting to proxy](https://github.com/etcd-io/etcd/pull/9894): - - Add `grpc-proxy start --cert-file`, `grpc-proxy start --key-file` and `grpc-proxy start --trusted-ca-file` flags. -- Add [`grpc-proxy start --metrics-addr` flag for specifying a separate metrics listen address](https://github.com/etcd-io/etcd/pull/9894). - -### client v3 - -- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952). - - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.23](https://github.com/etcd-io/etcd/releases/tag/v3.2.23) (2018-06-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.22...v3.2.23) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288). - - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`. - - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822). - - Provide [response size](https://github.com/etcd-io/etcd/pull/9826). -- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric. - - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.22](https://github.com/etcd-io/etcd/releases/tag/v3.2.22) (2018-06-06) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.21...v3.2.22) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Security, Authentication - -- Support TLS cipher suite whitelisting. - - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320). - - TLS handshake fails when client hello is requested with invalid cipher suites. - - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag. - - If empty, Go auto-populates the list. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.21](https://github.com/etcd-io/etcd/releases/tag/v3.2.21) (2018-05-31) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.20...v3.2.21) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Fix [auth storage panic when simple token provider is disabled](https://github.com/etcd-io/etcd/pull/8695). -- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775). - - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation. - - Now, this server-side panic has been fixed. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.20](https://github.com/etcd-io/etcd/releases/tag/v3.2.20) (2018-05-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.19...v3.2.20) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Purge old [`*.snap.db` snapshot files](https://github.com/etcd-io/etcd/pull/7967). - - Previously, etcd did not respect `--max-snapshots` flag to purge old `*.snap.db` files. - - Now, etcd purges old `*.snap.db` files to keep maximum `--max-snapshots` number of files on disk. - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.19](https://github.com/etcd-io/etcd/releases/tag/v3.2.19) (2018-04-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.18...v3.2.19) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric. -- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562). -- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric. - -### Security, Authentication - -- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541). - - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets. - - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online. - - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs). - -### etcd server - -- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward. - - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger. - - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election. - - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout. - - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities. - - Now, this can be disabled by setting `--initial-election-tick-advance=false`. - - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `--initial-election-tick-advance` at the cost of slow initial bootstrap. - - If single-node, it advances ticks regardless. - - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.18](https://github.com/etcd-io/etcd/releases/tag/v3.2.18) (2018-03-29) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.17...v3.2.18) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333). - - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node. - - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.17](https://github.com/etcd-io/etcd/releases/tag/v3.2.17) (2018-03-08) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.16...v3.2.17) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379). - - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server. - - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`. -- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399). - - `TTL` parameter to `Grant` request is unit of second. - - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374). - - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years). - - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days! -- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347). - -### Proxy v2 - -- Fix [v2 proxy leaky HTTP requests](https://github.com/etcd-io/etcd/pull/9336). - -### Go - -- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.16](https://github.com/etcd-io/etcd/releases/tag/v3.2.16) (2018-02-12) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.15...v3.2.16) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9297). - - "unsynced" watcher is watcher that needs to be in sync with events that have happened. - - That is, "unsynced" watcher is the slow watcher that was requested on old revision. - - "unsynced" watcher restore operation was not correctly populating its underlying watcher group. - - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086). - - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node. - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.15](https://github.com/etcd-io/etcd/releases/tag/v3.2.15) (2018-01-22) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.14...v3.2.15) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Prevent [server panic from member update/add](https://github.com/etcd-io/etcd/pull/9174) with [wrong scheme URLs](https://github.com/etcd-io/etcd/issues/9173). -- Log [user context cancel errors on stream APIs in debug level with TLS](https://github.com/etcd-io/etcd/pull/9178). - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.14](https://github.com/etcd-io/etcd/releases/tag/v3.2.14) (2018-01-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.13...v3.2.14) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Log [user context cancel errors on stream APIs in debug level](https://github.com/etcd-io/etcd/pull/9105). - -### etcd server - -- Fix [`mvcc/backend.defragdb` nil-pointer dereference on create bucket failure](https://github.com/etcd-io/etcd/pull/9119). - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.13](https://github.com/etcd-io/etcd/releases/tag/v3.2.13) (2018-01-02) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.12...v3.2.13) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Remove [verbose error messages on stream cancel and gRPC info-level logs](https://github.com/etcd-io/etcd/pull/9080) in server-side. -- Fix [gRPC server panic on `GracefulStop` TLS-enabled server](https://github.com/etcd-io/etcd/pull/8987). - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.12](https://github.com/etcd-io/etcd/releases/tag/v3.2.12) (2017-12-20) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.11...v3.2.12) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) to [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5). -- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.3`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3) to [**`v1.3.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0). - -### etcd server - -- Fix [error message of `Revision` compactor](https://github.com/etcd-io/etcd/pull/8999) in server-side. - -### client v3 - -- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/etcd-io/etcd/clientv3#Config). - - Fix [exceeded response size limit error in client-side](https://github.com/etcd-io/etcd/issues/9043). - - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099). - - In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB. - - `MaxCallSendMsgSize` default value is 2 MiB, if not configured. - - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured. - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.11](https://github.com/etcd-io/etcd/releases/tag/v3.2.11) (2017-12-05) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.10...v3.2.11) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.7.3`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.3) to [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4). - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- Log [more details on TLS handshake failures](https://github.com/etcd-io/etcd/pull/8952/files). - -### client v3 - -- Fix racey grpc-go's server handler transport `WriteStatus` call to prevent [TLS-enabled etcd server crash](https://github.com/etcd-io/etcd/issues/8904). -- Add [gRPC RPC failure warnings](https://github.com/etcd-io/etcd/pull/8939) to help debug such issues in the future. - -### Documentation - -- Remove `--listen-metrics-urls` flag in monitoring document (non-released in `v3.2.x`, planned for `v3.3.x`). - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.10](https://github.com/etcd-io/etcd/releases/tag/v3.2.10) (2017-11-16) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.9...v3.2.10) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1) to [**`v1.7.3`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.3). -- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.2.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.0) to [**`v1.3`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3). - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- Revert [discovery SRV auth `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/etcd-io/etcd/pull/8651) to support non-wildcard subject alternative names in the certs (see [issue #8445](https://github.com/etcd-io/etcd/issues/8445) for more contexts). - - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `etcd.local` (**not `*.etcd.local`**) as an entry in Subject Alternative Name (SAN) field. - -### etcd server - -- Replace backend key-value database `boltdb/bolt` with [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to address [backend database size issue](https://github.com/etcd-io/etcd/issues/8009). - -### client v3 - -- Rewrite balancer to handle [network partitions](https://github.com/etcd-io/etcd/issues/8711). - -### Go - -- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.9](https://github.com/etcd-io/etcd/releases/tag/v3.2.9) (2017-10-06) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.8...v3.2.9) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)). -- Fix discovery SRV bootstrapping to [authenticate `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/etcd-io/etcd/pull/8651), in order to support sub-domain wildcard matching (see [issue #8445](https://github.com/etcd-io/etcd/issues/8445) for more contexts). - - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `*.etcd.local` as an entry in Subject Alternative Name (SAN) field. - -### Go - -- Compile with [*Go 1.8.4*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.8](https://github.com/etcd-io/etcd/releases/tag/v3.2.8) (2017-09-29) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.7...v3.2.8) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### client v2 - -- Fix v2 client failover to next endpoint on mutable operation. - -### gRPC Proxy - -- Handle [`KeysOnly` flag](https://github.com/etcd-io/etcd/pull/8552). - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.7](https://github.com/etcd-io/etcd/releases/tag/v3.2.7) (2017-09-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.6...v3.2.7) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Security, Authentication - -- Fix [server-side auth so concurrent auth operations do not return old revision error](https://github.com/etcd-io/etcd/pull/8306). - -### client v3 - -- Fix [`concurrency/stm` Put with serializable snapshot](https://github.com/etcd-io/etcd/pull/8439). - - Use store revision from first fetch to resolve write conflicts instead of modified revision. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.6](https://github.com/etcd-io/etcd/releases/tag/v3.2.6) (2017-08-21) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.5...v3.2.6) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Fix watch restore from snapshot. -- Fix multiple URLs for `--listen-peer-urls` flag. -- Add `--enable-pprof` flag to etcd configuration file format. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix `etcd_debugging_mvcc_keys_total` inconsistency. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.5](https://github.com/etcd-io/etcd/releases/tag/v3.2.5) (2017-08-04) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.4...v3.2.5) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcdctl v3 - -- Return non-zero exit code on unhealthy `endpoint health`. - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/etcd-io/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address. For example, peer B's CSR (with `cfssl`) SAN field is `["*.example.default.svc", "*.example.default.svc.cluster.local"]` when peer B's remote IP address is `10.138.0.2`. When peer B tries to join the cluster, peer A reverse-lookup the IP `10.138.0.2` to get the list of host names. And either exact or wildcard match the host names with peer B's cert DNS names in Subject Alternative Name (SAN) field. If none of reverse/forward lookups worked, it returns an error `"tls: "10.138.0.2" does not match any of DNSNames ["*.example.default.svc","*.example.default.svc.cluster.local"]`. See [issue#8268](https://github.com/etcd-io/etcd/issues/8268) for more detail. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix unreachable `/metrics` endpoint when `--enable-v2=false`. - -### gRPC Proxy - -- Handle [`PrevKv` flag](https://github.com/etcd-io/etcd/pull/8366). - -### Other - -- Add container registry `gcr.io/etcd-development/etcd`. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.4](https://github.com/etcd-io/etcd/releases/tag/v3.2.4) (2017-07-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.3...v3.2.4) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Do not block on active client stream when stopping server - -### gRPC proxy - -- Fix gRPC proxy Snapshot RPC error handling - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.3](https://github.com/etcd-io/etcd/releases/tag/v3.2.3) (2017-07-14) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.2...v3.2.3) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### client v3 - -- Let clients establish unlimited streams - -### Other - -- Tag docker images with minor versions - - e.g. `docker pull quay.io/coreos/etcd:v3.2` to fetch latest v3.2 versions - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.2](https://github.com/etcd-io/etcd/releases/tag/v3.2.2) (2017-07-07) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.1...v3.2.2) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Rate-limit lease revoke on expiration. -- Extend leases on promote to avoid queueing effect on lease expiration. - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- [Server accepts connections if IP matches, without checking DNS entries](https://github.com/etcd-io/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names. For example, peer B's CSR (with `cfssl`) SAN field is `["invalid.domain", "10.138.0.2"]` when peer B's remote IP address is `10.138.0.2` and `invalid.domain` is a invalid host. When peer B tries to join the cluster, peer A successfully authenticates B, since Subject Alternative Name (SAN) field has a valid matching IP address. See [issue#8206](https://github.com/etcd-io/etcd/issues/8206) for more detail. - -### etcd server - -- Accept connection with matched IP SAN but no DNS match. - - Don't check DNS entries in certs if there's a matching IP. - -### gRPC gateway - -- Use user-provided listen address to connect to gRPC gateway. - - `net.Listener` rewrites IPv4 0.0.0.0 to IPv6 [::], breaking IPv6 disabled hosts. - - Only v3.2.0, v3.2.1 are affected. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.1](https://github.com/etcd-io/etcd/releases/tag/v3.2.1) (2017-06-23) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.2.1) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### etcd server - -- Fix backend database in-memory index corruption issue on restore (only 3.2.0 is affected). - -### gRPC gateway - -- Fix Txn marshaling. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix backend database size debugging metrics. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- - -## [v3.2.0](https://github.com/etcd-io/etcd/releases/tag/v3.2.0) (2017-06-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.0...v3.2.0) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).** - -### Improved - -- Improve backend read concurrency. - -### Breaking Changes - -- Increased [`--snapshot-count` default value from 10,000 to 100,000](https://github.com/etcd-io/etcd/pull/7160). - - Higher snapshot count means it holds Raft entries in memory for longer before discarding old entries. - - It is a trade-off between less frequent snapshotting and [higher memory usage](https://github.com/kubernetes/kubernetes/issues/60589#issuecomment-371977156). - - User lower `--snapshot-count` value for lower memory usage. - - User higher `--snapshot-count` value for better availabilities of slow followers (less frequent snapshots from leader). -- `clientv3.Lease.TimeToLive` returns `LeaseTimeToLiveResponse.TTL == -1` on lease not found. -- `clientv3.NewFromConfigFile` is moved to `clientv3/yaml.NewConfig`. -- `embed.Etcd.Peers` field is now `[]*peerListener`. -- Rejects domains names for `--listen-peer-urls` and `--listen-client-urls` (3.1 only prints out warnings), since [domain name is invalid for network interface binding](https://github.com/etcd-io/etcd/issues/6336). - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.0.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.0.4) to [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1). -- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) to [**`v1.2.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.0). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_disk_backend_snapshot_duration_seconds`](https://github.com/etcd-io/etcd/pull/7892) -- Add `etcd_debugging_server_lease_expired_total` metrics. - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- [TLS certificates get reloaded on every client connection](https://github.com/etcd-io/etcd/pull/7829). This is useful when replacing expiry certs without stopping etcd servers; it can be done by overwriting old certs with new ones. Refreshing certs for every connection should not have too much overhead, but can be improved in the future, with caching layer. Example tests can be found [here](https://github.com/etcd-io/etcd/blob/b041ce5d514a4b4aaeefbffb008f0c7570a18986/integration/v3_grpc_test.go#L1601-L1757). -- [Server denies incoming peer certs with wrong IP `SAN`](https://github.com/etcd-io/etcd/pull/7687). For instance, if peer cert contains any IP addresses in Subject Alternative Name (SAN) field, server authenticates a peer only when the remote IP address matches one of those IP addresses. This is to prevent unauthorized endpoints from joining the cluster. For example, peer B's CSR (with `cfssl`) SAN field is `["*.example.default.svc", "*.example.default.svc.cluster.local", "10.138.0.27"]` when peer B's actual IP address is `10.138.0.2`, not `10.138.0.27`. When peer B tries to join the cluster, peer A will reject B with the error `x509: certificate is valid for 10.138.0.27, not 10.138.0.2`, because B's remote IP address does not match the one in Subject Alternative Name (SAN) field. -- [Server resolves TLS `DNSNames` when checking `SAN`](https://github.com/etcd-io/etcd/pull/7767). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server authenticates a peer only when forward-lookups (`dig b.com`) on those DNS names have matching IP with the remote IP address. For example, peer B's CSR (with `cfssl`) SAN field is `["b.com"]` when peer B's remote IP address is `10.138.0.2`. When peer B tries to join the cluster, peer A looks up the incoming host `b.com` to get the list of IP addresses (e.g. `dig b.com`). And rejects B if the list does not contain the IP `10.138.0.2`, with the error `tls: 10.138.0.2 does not match any of DNSNames ["b.com"]`. -- Auth support JWT token. - -### etcd server - -- RPCs - - Add Election, Lock service. -- Native client `etcdserver/api/v3client` - - client "embedded" in the server. -- Logging, monitoring - - Server warns large snapshot operations. -- Add `etcd --enable-v2` flag to enable v2 API server. - - `etcd --enable-v2=true` by default. -- Add `etcd --auth-token` flag. -- v3.2 compactor runs [every hour](https://github.com/etcd-io/etcd/pull/7875). - - Compactor only supports periodic compaction. - - Compactor continues to record latest revisions every 5-minute. - - For every hour, it uses the last revision that was fetched before compaction period, from the revision records that were collected every 5-minute. - - That is, for every hour, compactor discards historical data created before compaction period. - - The retention window of compaction period moves to next hour. - - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2 compacts revision 1000, 1100, and 1200 for every 1-hour. - - If compaction succeeds or requested revision has already been compacted, it resets period timer and removes used compacted revision from historical revision records (e.g. start next revision collect and compaction from previously collected revisions). - - If compaction fails, it retries in 5 minutes. -- Allow snapshot over 512MB. - -### client v3 - -- STM prefetching. -- Add namespace feature. -- Add `ErrOldCluster` with server version checking. -- Translate `WithPrefix()` into `WithFromKey()` for empty key. - -### etcdctl v3 - -- Add `check perf` command. -- Add `etcdctl --from-key` flag to role grant-permission command. -- `lock` command takes an optional command to execute. - -### gRPC Proxy - -- Proxy endpoint discovery. -- Namespaces. -- Coalesce lease requests. - -### etcd gateway - -- Support [DNS SRV priority](https://github.com/etcd-io/etcd/pull/7882) for [smart proxy routing](https://github.com/etcd-io/etcd/issues/4378). - -### Other - -- v3 client - - concurrency package's elections updated to match RPC interfaces. - - let client dial endpoints not in the balancer. -- Release - - Annotate acbuild with supports-systemd-notify. - - Add `nsswitch.conf` to Docker container image. - - Add ppc64le, arm64(experimental) builds. - -### Go - -- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.3.md b/CHANGELOG/CHANGELOG-3.3.md deleted file mode 100644 index 8addba112f6..00000000000 --- a/CHANGELOG/CHANGELOG-3.3.md +++ /dev/null @@ -1,1121 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.2](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.2.md). - -
- -## v3.3.27 (2021-10-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.26...v3.3.27) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Other - -- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs: - - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption - - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc - - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp - - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads. - -
- -## v3.3.26 (2021-10-03) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.25...v3.3.26) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Package `clientv3` - -- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready. - -### Package `fileutil` - -- Fix [constant](https://github.com/etcd-io/etcd/pull/12440) for linux locking. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - -
- -## v3.3.25 (2020-08-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.25) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Security - -- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd use any existing directory that has a permission different than 700 on Linux and 777 on Windows. - - -## [v3.3.24](https://github.com/etcd-io/etcd/releases/tag/v3.3.24) (2020-08-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.24) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Package `etcd server` - -- Fix [`int64` convert panic in raft logger](https://github.com/etcd-io/etcd/pull/12106). - - Fix [kubernetes/kubernetes#91937](https://github.com/kubernetes/kubernetes/issues/91937). - -### Package `runtime` - -- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214). - -### Metrics, Monitoring - -- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - - -
- - - -## [v3.3.23](https://github.com/etcd-io/etcd/releases/tag/v3.3.23) (2020-07-16) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.22...v3.3.23) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Breaking Changes - -- Fix [incorrect package dependency when etcd clientv3 used as libary](https://github.com/etcd-io/etcd/issues/12068). -- Changed behavior on [existing dir permission](https://github.com/etcd-io/etcd/pull/11798). - - Previously, the permission was not checked on existing data directory and the directory used for automatically generating self-signed certificates for TLS connections with clients. Now a check is added to make sure those directories, if already exist, has a desired permission of 700 on Linux and 777 on Windows. - -### Package `wal` - -### etcd server -- Fix [watch stream got closed if one watch request is not permitted](https://github.com/etcd-io/etcd/pull/11758). -- Add [etcd --auth-token-ttl](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings. -- Improve [runtime.FDUsage objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986). -- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - - -
- - -## [v3.3.22](https://github.com/etcd-io/etcd/releases/tag/v3.3.22) (2020-05-20) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.21...v3.3.22) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Package `wal` - -- Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924). - - See https://github.com/etcd-io/etcd/issues/11918. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.3.21](https://github.com/etcd-io/etcd/releases/tag/v3.3.21) (2020-05-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.20...v3.3.21) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### `etcdctl` - -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). - -### Package `clientv3` - -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). - -### etcd server - -- Improve logging around snapshot send and receive. -- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670). -- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817). -- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888). - - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot. - - See https://github.com/etcd-io/etcd/issues/10219 for more. - -### Package `auth` - -- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652). - -### Metrics, Monitoring - -- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.3.20](https://github.com/etcd-io/etcd/releases/tag/v3.3.20) (2020-04-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.19...v3.3.20) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Package `wal` - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Metrics, Monitoring - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.3.19](https://github.com/etcd-io/etcd/releases/tag/v3.3.19) (2020-03-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.18...v3.3.19) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### client v3 - -- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687). - - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys. - -### etcd server - -- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613). -- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704). - -### etcdctl v3 - -- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11638) command to prevent potential timeout. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687). - -### gRPC Proxy - -- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.3.18](https://github.com/etcd-io/etcd/releases/tag/v3.3.18) (2019-11-26) - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11261) Prometheus metric. -- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric. - -### etcdserver - -- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308). - - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup. - - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node. - - -
- - -## [v3.3.17](https://github.com/etcd-io/etcd/releases/tag/v3.3.17) (2019-10-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.16...v3.3.17) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -### Release details - -This release replaces 3.3.16. - -Due to the etcd 3.3.16 release being incorrectly released (see details below), please use this release instead. - - -
- - -## [v3.3.16](https://github.com/etcd-io/etcd/releases/tag/v3.3.16) (2019-10-10) - -**WARNING: This is a bad release! Please use etcd 3.3.17 instead. See https://github.com/etcd-io/etcd/issues/11241 for details.** - -### Issues with release - -- go mod for 'v3.3.16' may return a different hash if retrieved from a go mod proxy than if retrieved directly from github. Depending on this version is unsafe. See https://github.com/etcd-io/etcd/issues/11241 for details. -- The binaries and docker image for this release have been published and will be left as-is, but will not be signed since this is a bad release. - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.15...v3.3.16) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/11196). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. -- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. - -### Dependency - -- Upgrade [`github.com/coreos/bbolt`](https://github.com/etcd-io/bbolt/releases) from [**`v1.3.1-coreos.6`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.1-coreos.6) to [**`v1.3.3`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3). - -### etcdctl v3 - -- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11194) command to prevent potential timeout. - -### Go - -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - -### client v3 - -- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184). - - Fix ["kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch" (kubernetes#83028)](https://github.com/kubernetes/kubernetes/issues/83028). -- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211). - - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550). - - -
- - -## [v3.3.15](https://github.com/etcd-io/etcd/releases/tag/v3.3.15) (2019-08-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.14...v3.3.15) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -NOTE: This patch release had to include some new features from 3.4, while trying to minimize the difference between client balancer implementation. This release fixes ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - -### Breaking Changes - -- Revert "Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063)". - - Now, etcd >= v3.3.15 uses `glide` for dependency management. - - See [kubernetes#81434](https://github.com/kubernetes/kubernetes/pull/81434) for more contexts. - -### Go - -- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045). -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - - -
- - -## [v3.3.14](https://github.com/etcd-io/etcd/releases/tag/v3.3.14) (2019-08-16) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.13...v3.3.14) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -- [v3.3.14-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.14-rc.0) (2019-08-15), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.14-beta.0...v3.3.14-rc.0). -- [v3.3.14-beta.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.14-beta.0) (2019-08-14), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.13...v3.3.14-beta.0). - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -NOTE: This patch release had to include some new features from 3.4, while trying to minimize the difference between client balancer implementation. This release fixes ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - -### Breaking Changes - -- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106). - - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911). - - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911). - - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. to block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`. -- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045). -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. -- Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063). - - <= 3.3 puts `vendor` directory under `cmd/vendor` directory to [prevent conflicting transitive dependencies](https://github.com/etcd-io/etcd/issues/4913). - - 3.4 moves `cmd/vendor` directory to `vendor` at repository root. - - Remove recursive symlinks in `cmd` directory. - - Now `go get/install/build` on `etcd` packages (e.g. `clientv3`, `tools/benchmark`) enforce builds with etcd `vendor` directory. -- Deprecated `latest` [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tag. - - **`docker pull gcr.io/etcd-development/etcd:latest` would not be up-to-date**. -- Deprecated [minor](https://semver.org/) version [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tags. - - `docker pull gcr.io/etcd-development/etcd:v3.3` would still work but may be stale. - - **`docker pull gcr.io/etcd-development/etcd:v3.4` would not work**. - - Use **`docker pull gcr.io/etcd-development/etcd:v3.3.14`** instead, with the exact patch version. -- Deprecated [ACIs from official release](https://github.com/etcd-io/etcd/pull/9059). - - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016. - - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore. - - `*.aci` files are not available from `v3.4` release. - -### etcd server - -- Add [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094). - - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout. -- Fix [race condition in `rafthttp` transport pause/resume](https://github.com/etcd-io/etcd/pull/10826). - -### API - -- Add [`watch_id` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9065) to allow user-provided watch ID to `mvcc`. - - Corresponding `watch_id` is returned via `etcdserverpb.WatchResponse`, if any. -- Add [`fragment` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9291) to request etcd server to [split watch events](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes. - - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes. - - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit. - - Useful when client-side has limited bandwidths. - - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client. - - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`. - - Client must implement fragmented watch event merge (which `clientv3` does in etcd v3.4). -- Add [`WatchRequest.WatchProgressRequest`](https://github.com/etcd-io/etcd/pull/9869). - - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams. - - Think of it as `WithProgressNotify` that can be triggered manually. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_network_snapshot_send_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. -- Add [`etcd_network_snapshot_receive_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. -- Add [`etcd_server_snapshot_apply_in_progress_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. - -### client v3 - -- Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956) by upgrading [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5) to [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0). -- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106). - - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911). - - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911). - - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. to block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`. - -### etcdctl v3 - -- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540). - - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532). - - The command output is changed. Previously, if endpoint is unreachable, the command output is - "\ is unhealthy: failed to connect: \". This change unified the error message, all error types - now have the same output "\ is unhealthy: failed to commit proposal: \". -- Add [missing newline in `etcdctl endpoint health`](https://github.com/etcd-io/etcd/pull/10793). - -### Package `pkg/adt` - -- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959). - - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt). -- Improve [`pkg/adt.IntervalTree` test coverage](https://github.com/etcd-io/etcd/pull/10959). - - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt). -- Fix [Red-Black tree to maintain black-height property](https://github.com/etcd-io/etcd/pull/10978). - - Previously, delete operation violates [black-height property](https://github.com/etcd-io/etcd/issues/10965). - -### Go - -- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045). -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - - -
- - -## [v3.3.13](https://github.com/etcd-io/etcd/releases/tag/v3.3.13) (2019-05-02) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.12...v3.3.13) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Improve [heartbeat send failure logging](https://github.com/etcd-io/etcd/pull/10663). -- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646). - -### client v3 - -- Fix [`(*Client).Endpoints()` method race condition](https://github.com/etcd-io/etcd/pull/10595). - -### Package `wal` - -- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603). - -### Dependency - -- Migrate [`github.com/ugorji/go/codec`](https://github.com/ugorji/go/releases) to [**`github.com/json-iterator/go`**](https://github.com/json-iterator/go) (See [#10667](https://github.com/etcd-io/etcd/pull/10667) for more). -- Migrate [`github.com/ghodss/yaml`](https://github.com/ghodss/yaml/releases) to [**`sigs.k8s.io/yaml`**](https://github.com/kubernetes-sigs/yaml) (See [#10718](https://github.com/etcd-io/etcd/pull/10718) for more). - -### Go - -- Compile with [*Go 1.10.8*](https://golang.org/doc/devel/release.html#go1.10). - - -
- - -## [v3.3.12](https://github.com/etcd-io/etcd/releases/tag/v3.3.12) (2019-02-07) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.11...v3.3.12) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### etcdctl v3 - -- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2 - -### Go - -- Compile with [*Go 1.10.8*](https://golang.org/doc/devel/release.html#go1.10). - - -
- - -## [v3.3.11](https://github.com/etcd-io/etcd/releases/tag/v3.3.11) (2019-01-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.10...v3.3.11) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### gRPC Proxy - -- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327). - -### Security, Authentication - -- Disable [CommonName authentication for gRPC-gateway](https://github.com/etcd-io/etcd/pull/10366) gRPC-gateway proxy requests to etcd server use the etcd client server TLS certificate. If that certificate contains CommonName we do not want to use that for authentication as it could lead to permission escalation. - -### Go - -- Compile with [*Go 1.10.7*](https://golang.org/doc/devel/release.html#go1.10). - - -
- - -## [v3.3.10](https://github.com/etcd-io/etcd/releases/tag/v3.3.10) (2018-10-10) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.9...v3.3.10) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed. -- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network. -- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information. -- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats. - - Previously, it only samples the TCP connection for snapshot messages. -- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric. -- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric. - -### client v3 - -- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package. - -### Go - -- Compile with [*Go 1.10.4*](https://golang.org/doc/devel/release.html#go1.10). - - -
- - -## [v3.3.9](https://github.com/etcd-io/etcd/releases/tag/v3.3.9) (2018-07-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.8...v3.3.9) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897). - -### Security, Authentication - -- Compile with [*Go 1.10.3*](https://golang.org/doc/devel/release.html#go1.10) to support [crypto/x509 "Name Constraints"](https://github.com/etcd-io/etcd/issues/9912). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric. -- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric. -- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric. -- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric. -- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric. -- Add [`etcd_mvcc_hash_rev_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric. -- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric. -- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. -- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric. - - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819). -- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. - -### client v3 - -- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952). - - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration. - -### Go - -- Compile with [*Go 1.10.3*](https://golang.org/doc/devel/release.html#go1.10). - - -
- - -## [v3.3.8](https://github.com/etcd-io/etcd/releases/tag/v3.3.8) (2018-06-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.7...v3.3.8) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288). - - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`. - - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822). - - Provide [response size](https://github.com/etcd-io/etcd/pull/9826). -- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840). - -### Go - -- Compile with [*Go 1.9.7*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.7](https://github.com/etcd-io/etcd/releases/tag/v3.3.7) (2018-06-06) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.6...v3.3.7) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Security, Authentication - -- Support TLS cipher suite whitelisting. - - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320). - - TLS handshake fails when client hello is requested with invalid cipher suites. - - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag. - - If empty, Go auto-populates the list. - -### etcdctl v3 - -- Fix [`etcdctl move-leader` command for TLS-enabled endpoints](https://github.com/etcd-io/etcd/pull/9807). - -### Go - -- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.6](https://github.com/etcd-io/etcd/releases/tag/v3.3.6) (2018-05-31) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.5...v3.3.6) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### etcd server - -- Allow [empty auth token](https://github.com/etcd-io/etcd/pull/9369). - - Previously, when auth token is an empty string, it returns [`failed to initialize the etcd server: auth: invalid auth options` error](https://github.com/etcd-io/etcd/issues/9349). -- Fix [auth storage panic on server lease revoke routine with JWT token](https://github.com/etcd-io/etcd/issues/9695). -- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775). - - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation. - - Now, this server-side panic has been fixed. - -### Go - -- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.5](https://github.com/etcd-io/etcd/releases/tag/v3.3.5) (2018-05-09) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.4...v3.3.5) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### etcdctl v3 - -- Fix [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/9688) parsing. - - Previously, `ETCDCTL_API=3 ./bin/etcdctl watch foo -- echo watch event received` panicked. - -### Go - -- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.4](https://github.com/etcd-io/etcd/releases/tag/v3.3.4) (2018-04-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.3...v3.3.4) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric. -- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric. -- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562). - -### Security, Authentication - -- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541). - - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets. - - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online. - - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs). - -### etcd server - -- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward. - - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger. - - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election. - - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout. - - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities. - - Now, this can be disabled by setting `--initial-election-tick-advance=false`. - - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `etcd --initial-election-tick-advance` at the cost of slow initial bootstrap. - - If single-node, it advances ticks regardless. - - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333). - -### Package `embed` - -- Add [`embed.Config.InitialElectionTickAdvance`](https://github.com/etcd-io/etcd/pull/9591) to enable/disable initial election tick fast-forward. - - `embed.NewConfig()` would return `*embed.Config` with `InitialElectionTickAdvance` as true by default. - -### Go - -- Compile with [*Go 1.9.5*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.3](https://github.com/etcd-io/etcd/releases/tag/v3.3.3) (2018-03-29) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.2...v3.3.3) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333). - - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node. - - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart. -- Adjust [periodic compaction retention window](https://github.com/etcd-io/etcd/pull/9485). - - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000). - - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=72h` automatically `Compact` with 72-hour retention windown for every 7.2-hour. **Now, `Compact` happens, for every 1-hour but still with 72-hour retention window.** - - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown for every 3-minute. **Now, `Compact` happens, for every 30-minute but still with 30-minute retention window.** - - Periodic compactor keeps recording latest revisions for every compaction period when given period is less than 1-hour, or for every 1-hour when given compaction period is greater than 1-hour (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`). - - For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data. - - The retention window of compaction period moves for every given compaction period or hour. - - For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour. - - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute. - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437). - -### Go - -- Compile with [*Go 1.9.5*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.2](https://github.com/etcd-io/etcd/releases/tag/v3.3.2) (2018-03-08) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.1...v3.3.2) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### etcd server - -- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379). - - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server. - - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`. -- Fix [revision-based compaction retention parsing](https://github.com/etcd-io/etcd/pull/9339). - - Previously, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` was [translated to revision retention 3600000000000](https://github.com/etcd-io/etcd/issues/9337). - - Now, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` is correctly parsed as revision retention 1. -- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399). - - `TTL` parameter to `Grant` request is unit of second. - - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374). - - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years). - - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days! -- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347). - -### Proxy v2 - -- Fix [v2 proxy leaky HTTP requests](https://github.com/etcd-io/etcd/pull/9336). - -### Go - -- Compile with [*Go 1.9.4*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.1](https://github.com/etcd-io/etcd/releases/tag/v3.3.1) (2018-02-12) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.3.1) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Add [warnings on requests taking too long](https://github.com/etcd-io/etcd/pull/9288). - - e.g. `etcdserver: read-only range request "key:\"\\000\" range_end:\"\\000\" " took too long [3.389041388s] to execute` - -### etcd server - -- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9281). - - "unsynced" watcher is watcher that needs to be in sync with events that have happened. - - That is, "unsynced" watcher is the slow watcher that was requested on old revision. - - "unsynced" watcher restore operation was not correctly populating its underlying watcher group. - - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086). - - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node. - -### Go - -- Compile with [*Go 1.9.4*](https://golang.org/doc/devel/release.html#go1.9). - - -
- - -## [v3.3.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0) (2018-02-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.3.0) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes. - -- [v3.3.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0) (2018-02-01), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.4...v3.3.0). -- [v3.3.0-rc.4](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.4) (2018-01-22), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.3...v3.3.0-rc.4). -- [v3.3.0-rc.3](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.3) (2018-01-17), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.2...v3.3.0-rc.3). -- [v3.3.0-rc.2](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.2) (2018-01-11), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.1...v3.3.0-rc.2). -- [v3.3.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.1) (2018-01-02), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.0...v3.3.0-rc.1). -- [v3.3.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.0) (2017-12-20), see [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.3.0-rc.0). - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).** - -### Improved - -- Use [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to replace [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status). - - Fix [etcd database size grows until `mvcc: database space exceeded`](https://github.com/etcd-io/etcd/issues/8009). -- [Support database size larger than 8GiB](https://github.com/etcd-io/etcd/pull/7525) (8GiB is now a suggested maximum size for normal environments) -- [Reduce memory allocation](https://github.com/etcd-io/etcd/pull/8428) on [Range operations](https://github.com/etcd-io/etcd/pull/8475). -- [Rate limit](https://github.com/etcd-io/etcd/pull/8099) and [randomize](https://github.com/etcd-io/etcd/pull/8101) lease revoke on restart or leader elections. - - Prevent [spikes in Raft proposal rate](https://github.com/etcd-io/etcd/issues/8096). -- Support `clientv3` balancer failover under [network faults/partitions](https://github.com/etcd-io/etcd/issues/8711). -- Better warning on [mismatched `etcd --initial-cluster`](https://github.com/etcd-io/etcd/pull/8083) flag. - - etcd compares `etcd --initial-advertise-peer-urls` against corresponding `etcd --initial-cluster` URLs with forward-lookup. - - If resolved IP addresses of `etcd --initial-advertise-peer-urls` and `etcd --initial-cluster` do not match (e.g. [due to DNS error](https://github.com/etcd-io/etcd/pull/9210)), etcd will exit with errors. - - v3.2 error: `etcd --initial-cluster must include s1=https://s1.test:2380 given --initial-advertise-peer-urls=https://s1.test:2380`. - - v3.3 error: `failed to resolve https://s1.test:2380 to match --initial-cluster=s1=https://s1.test:2380 (failed to resolve "https://s1.test:2380" (error ...))`. - -### Breaking Changes - -- Require [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) or [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5). - - Deprecate [`metadata.Incoming/OutgoingContext`](https://github.com/etcd-io/etcd/pull/7896). - - Deprecate `grpclog.Logger`, upgrade to [`grpclog.LoggerV2`](https://github.com/etcd-io/etcd/pull/8533). - - Deprecate [`grpc.ErrClientConnTimeout`](https://github.com/etcd-io/etcd/pull/8505) errors in `clientv3`. - - Use [`MaxRecvMsgSize` and `MaxSendMsgSize`](https://github.com/etcd-io/etcd/pull/8437) to limit message size, in etcd server. -- Translate [gRPC status error in v3 client `Snapshot` API](https://github.com/etcd-io/etcd/pull/9038). -- v3 `etcdctl` [`lease timetolive LEASE_ID`](https://github.com/etcd-io/etcd/issues/9028) on expired lease now prints [`"lease LEASE_ID already expired"`](https://github.com/etcd-io/etcd/pull/9047). - - <=3.2 prints `"lease LEASE_ID granted with TTL(0s), remaining(-1s)"`. -- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3alpha` with [`/v3beta`](https://github.com/etcd-io/etcd/pull/8880). - - To deprecate [`/v3alpha`](https://github.com/etcd-io/etcd/issues/8125) in v3.4. - - In v3.3, `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.4. Use `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- Change `etcd --auto-compaction-retention` flag to [accept string values](https://github.com/etcd-io/etcd/pull/8563) with [finer granularity](https://github.com/etcd-io/etcd/issues/8503). - - Now that `etcd --auto-compaction-retention` accepts string values, etcd configuration YAML file `auto-compaction-retention` field must be changed to `string` type. - - Previously, `--config-file etcd.config.yaml` can have `auto-compaction-retention: 24` field, now must be `auto-compaction-retention: "24"` or `auto-compaction-retention: "24h"`. - - If configured as `etcd --auto-compaction-mode periodic --auto-compaction-retention "24h"`, the time duration value for `etcd --auto-compaction-retention` flag must be valid for [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function in Go. - -### Dependency - -- Upgrade [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status) from [**`v1.3.0`**](https://github.com/boltdb/bolt/releases/tag/v1.3.0) to [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) [**`v1.3.1-coreos.6`**](https://github.com/coreos/bbolt/releases/tag/v1.3.1-coreos.6). -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1) to [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5). -- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`v1.1`**](https://github.com/ugorji/go/releases/tag/v1.1), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/8721). -- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`ugorji/go@54210f4e0`**](https://github.com/ugorji/go/commit/54210f4e076c57f351166f0ed60e67d3fca57a36), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/8574). -- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.2.2`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.2) to [**`v1.3.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0). -- Upgrade [`golang.org/x/crypto/bcrypt`](https://github.com/golang/crypto) to [**`golang/crypto@6c586e17d`**](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117). - -### Metrics, Monitoring - -See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd --listen-metrics-urls`](https://github.com/etcd-io/etcd/pull/8242) flag for additional `/metrics` and `/health` endpoints. - - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/etcd-io/etcd/issues/8060). -- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric. - - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948). -- Add [`etcd_debugging_mvcc_db_compaction_keys_total`](https://github.com/etcd-io/etcd/pull/8280) Prometheus metric. -- Add [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/8064) Prometheus metric. - - To improve [lease revoke monitoring](https://github.com/etcd-io/etcd/issues/8050). -- Document [Prometheus 2.0 rules](https://github.com/etcd-io/etcd/pull/8879). -- Initialize gRPC server [metrics with zero values](https://github.com/etcd-io/etcd/pull/8878). -- Fix [range/put/delete operation metrics](https://github.com/etcd-io/etcd/pull/8054) with transaction. - - `etcd_debugging_mvcc_range_total` - - `etcd_debugging_mvcc_put_total` - - `etcd_debugging_mvcc_delete_total` - - `etcd_debugging_mvcc_txn_total` -- Fix [`etcd_debugging_mvcc_keys_total`](https://github.com/etcd-io/etcd/pull/8390) on restore. -- Fix [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/8120) on restore. - - Also change to [`prometheus.NewGaugeFunc`](https://github.com/etcd-io/etcd/pull/8150). - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- Add [CRL based connection rejection](https://github.com/etcd-io/etcd/pull/8124) to manage [revoked certs](https://github.com/etcd-io/etcd/issues/4034). -- Document [TLS authentication changes](https://github.com/etcd-io/etcd/pull/8895). - - [Server accepts connections if IP matches, without checking DNS entries](https://github.com/etcd-io/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names. - - [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/etcd-io/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address. -- Add [`etcd --peer-cert-allowed-cn`](https://github.com/etcd-io/etcd/pull/8616) flag. - - To support [CommonName(CN) based auth](https://github.com/etcd-io/etcd/issues/8262) for inter peer connection. -- [Swap priority](https://github.com/etcd-io/etcd/pull/8594) of cert CommonName(CN) and username + password. - - To address ["username and password specified in the request should take priority over CN in the cert"](https://github.com/etcd-io/etcd/issues/8584). -- Protect [lease revoke with auth](https://github.com/etcd-io/etcd/pull/8031). -- Provide user's role on [auth permission error](https://github.com/etcd-io/etcd/pull/8164). -- Fix [auth store panic with disabled token](https://github.com/etcd-io/etcd/pull/8695). - -### etcd server - -- Add [`etcd --experimental-initial-corrupt-check`](https://github.com/etcd-io/etcd/pull/8554) flag to [check cluster database hashes before serving client/peer traffic](https://github.com/etcd-io/etcd/issues/8313). - - `etcd --experimental-initial-corrupt-check=false` by default. - - v3.4 will enable `--initial-corrupt-check=true` by default. -- Add [`etcd --experimental-corrupt-check-time`](https://github.com/etcd-io/etcd/pull/8420) flag to [raise corrupt alarm monitoring](https://github.com/etcd-io/etcd/issues/7125). - - `etcd --experimental-corrupt-check-time=0s` disabled by default. -- Add [`etcd --experimental-enable-v2v3`](https://github.com/etcd-io/etcd/pull/8407) flag to [emulate v2 API with v3](https://github.com/etcd-io/etcd/issues/6925). - - `etcd --experimental-enable-v2v3=false` by default. -- Add [`etcd --max-txn-ops`](https://github.com/etcd-io/etcd/pull/7976) flag to [configure maximum number operations in transaction](https://github.com/etcd-io/etcd/issues/7826). -- Add [`etcd --max-request-bytes`](https://github.com/etcd-io/etcd/pull/7968) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923). - - If not configured, it defaults to 1.5 MiB. -- Add [`etcd --client-crl-file`, `--peer-crl-file`](https://github.com/etcd-io/etcd/pull/8124) flags for [Certificate revocation list](https://github.com/etcd-io/etcd/issues/4034). -- Add [`etcd --peer-cert-allowed-cn`](https://github.com/etcd-io/etcd/pull/8616) flag to support [CN-based auth for inter-peer connection](https://github.com/etcd-io/etcd/issues/8262). -- Add [`etcd --listen-metrics-urls`](https://github.com/etcd-io/etcd/pull/8242) flag for additional `/metrics` and `/health` endpoints. - - Support [additional (non) TLS `/metrics` endpoints for a TLS-enabled cluster](https://github.com/etcd-io/etcd/pull/8282). - - e.g. `etcd --listen-metrics-urls=https://localhost:2378,http://localhost:9379` to serve `/metrics` and `/health` on secure port 2378 and insecure port 9379. - - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/etcd-io/etcd/issues/8060). -- Add [`etcd --auto-compaction-mode`](https://github.com/etcd-io/etcd/pull/8123) flag to [support revision-based compaction](https://github.com/etcd-io/etcd/issues/8098). -- Change `etcd --auto-compaction-retention` flag to [accept string values](https://github.com/etcd-io/etcd/pull/8563) with [finer granularity](https://github.com/etcd-io/etcd/issues/8503). - - Now that `etcd --auto-compaction-retention` accepts string values, etcd configuration YAML file `auto-compaction-retention` field must be changed to `string` type. - - Previously, `etcd --config-file etcd.config.yaml` can have `auto-compaction-retention: 24` field, now must be `auto-compaction-retention: "24"` or `auto-compaction-retention: "24h"`. - - If configured as `--auto-compaction-mode periodic --auto-compaction-retention "24h"`, the time duration value for `etcd --auto-compaction-retention` flag must be valid for [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function in Go. - - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000). - - e.g. `etcd --auto-compaction-mode=periodic --auto-compaction-retention=72h` automatically `Compact` with 72-hour retention windown, for every 7.2-hour. - - e.g. `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown, for every 3-minute. - - Periodic compactor continues to record latest revisions for every 1/10 of given compaction period (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=10h`). - - For every 1/10 of given compaction period, compactor uses the last revision that was fetched before compaction period, to discard historical data. - - The retention window of compaction period moves for every 1/10 of given compaction period. - - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2.x, v3.3.0, v3.3.1, and v3.3.2 compact revision 1000, 1100, and 1200 for every 1-hour. Furthermore, when writes per minute are 1000, v3.3.0, v3.3.1, and v3.3.2 with `--auto-compaction-mode=periodic --auto-compaction-retention=30m` compact revision 30000, 33000, and 36000, for every 3-minute with more finer granularity. - - Whether compaction succeeds or not, this process repeats for every 1/10 of given compaction period. If compaction succeeds, it just removes compacted revision from historical revision records. -- Add [`etcd --grpc-keepalive-min-time`, `etcd --grpc-keepalive-interval`, `etcd --grpc-keepalive-timeout`](https://github.com/etcd-io/etcd/pull/8535) flags to configure server-side keepalive policies. -- Serve [`/health` endpoint as unhealthy](https://github.com/etcd-io/etcd/pull/8272) when [alarm (e.g. `NOSPACE`) is raised or there's no leader](https://github.com/etcd-io/etcd/issues/8207). - - Define [`etcdhttp.Health`](https://godoc.org/github.com/coreos/etcd/etcdserver/api/etcdhttp#Health) struct with JSON encoder. - - Note that `"health"` field is [`string` type, not `bool`](https://github.com/etcd-io/etcd/pull/9143). - - e.g. `{"health":"false"}`, `{"health":"true"}` - - [Remove `"errors"` field](https://github.com/etcd-io/etcd/pull/9162) since `v3.3.0-rc.3` (did exist only in `v3.3.0-rc.0`, `v3.3.0-rc.1`, `v3.3.0-rc.2`). -- Move [logging setup to embed package](https://github.com/etcd-io/etcd/pull/8810) - - Disable gRPC server info-level logs by default (can be enabled with `etcd --debug` flag). -- Use [monotonic time in Go 1.9](https://github.com/etcd-io/etcd/pull/8507) for `lease` package. -- Warn on [empty hosts in advertise URLs](https://github.com/etcd-io/etcd/pull/8384). - - Address [advertise client URLs accepts empty hosts](https://github.com/etcd-io/etcd/issues/8379). - - etcd v3.4 will exit on this error. - - e.g. `etcd --advertise-client-urls=http://:2379`. -- Warn on [shadowed environment variables](https://github.com/etcd-io/etcd/pull/8385). - - Address [error on shadowed environment variables](https://github.com/etcd-io/etcd/issues/8380). - - etcd v3.4 will exit on this error. - -### API - -- Support [ranges in transaction comparisons](https://github.com/etcd-io/etcd/pull/8025) for [disconnected linearized reads](https://github.com/etcd-io/etcd/issues/7924). -- Add [nested transactions](https://github.com/etcd-io/etcd/pull/8102) to extend [proxy use cases](https://github.com/etcd-io/etcd/issues/7857). -- Add [lease comparison target in transaction](https://github.com/etcd-io/etcd/pull/8324). -- Add [lease list](https://github.com/etcd-io/etcd/pull/8358). -- Add [hash by revision](https://github.com/etcd-io/etcd/pull/8263) for [better corruption checking against boltdb](https://github.com/etcd-io/etcd/issues/8016). - -### client v3 - -- Add [health balancer](https://github.com/etcd-io/etcd/pull/8545) to fix [watch API hangs](https://github.com/etcd-io/etcd/issues/7247), improve [endpoint switch under network faults](https://github.com/etcd-io/etcd/issues/7941). -- [Refactor balancer](https://github.com/etcd-io/etcd/pull/8840) and add [client-side keepalive pings](https://github.com/etcd-io/etcd/pull/8199) to handle [network partitions](https://github.com/etcd-io/etcd/issues/8711). -- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config). - - Fix [exceeded response size limit error in client-side](https://github.com/etcd-io/etcd/issues/9043). - - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099). - - In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB. - - `MaxCallSendMsgSize` default value is 2 MiB, if not configured. - - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured. -- Accept [`Compare_LEASE`](https://github.com/etcd-io/etcd/pull/8324) in [`clientv3.Compare`](https://godoc.org/github.com/coreos/etcd/clientv3#Compare). -- Add [`LeaseValue` helper](https://github.com/etcd-io/etcd/pull/8488) to `Cmp` `LeaseID` values in `Txn`. -- Add [`MoveLeader`](https://github.com/etcd-io/etcd/pull/8153) to `Maintenance`. -- Add [`HashKV`](https://github.com/etcd-io/etcd/pull/8351) to `Maintenance`. -- Add [`Leases`](https://github.com/etcd-io/etcd/pull/8358) to `Lease`. -- Add [`clientv3/ordering`](https://github.com/etcd-io/etcd/pull/8092) for enforce [ordering in serialized requests](https://github.com/etcd-io/etcd/issues/7623). -- Fix ["put at-most-once" violation](https://github.com/etcd-io/etcd/pull/8335). -- Fix [`WatchResponse.Canceled`](https://github.com/etcd-io/etcd/pull/8283) on [compacted watch request](https://github.com/etcd-io/etcd/issues/8231). -- Fix [`concurrency/stm` `Put` with serializable snapshot](https://github.com/etcd-io/etcd/pull/8439). - - Use store revision from first fetch to resolve write conflicts instead of modified revision. - -### etcdctl v3 - -- Add [`etcdctl --discovery-srv`](https://github.com/etcd-io/etcd/pull/8462) flag. -- Add [`etcdctl --keepalive-time`, `--keepalive-timeout`](https://github.com/etcd-io/etcd/pull/8663) flags. -- Add [`etcdctl lease list`](https://github.com/etcd-io/etcd/pull/8358) command. -- Add [`etcdctl lease keep-alive --once`](https://github.com/etcd-io/etcd/pull/8775) flag. -- Make [`lease timetolive LEASE_ID`](https://github.com/etcd-io/etcd/issues/9028) on expired lease print [`lease LEASE_ID already expired`](https://github.com/etcd-io/etcd/pull/9047). - - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`. -- Add [`etcdctl snapshot restore --wal-dir`](https://github.com/etcd-io/etcd/pull/9124) flag. -- Add [`etcdctl defrag --data-dir`](https://github.com/etcd-io/etcd/pull/8367) flag. -- Add [`etcdctl move-leader`](https://github.com/etcd-io/etcd/pull/8153) command. -- Add [`etcdctl endpoint hashkv`](https://github.com/etcd-io/etcd/pull/8351) command. -- Add [`etcdctl endpoint --cluster`](https://github.com/etcd-io/etcd/pull/8143) flag, equivalent to [v2 `etcdctl cluster-health`](https://github.com/etcd-io/etcd/issues/8117). -- Make `etcdctl endpoint health` command terminate with [non-zero exit code on unhealthy status](https://github.com/etcd-io/etcd/pull/8342). -- Add [`etcdctl lock --ttl`](https://github.com/etcd-io/etcd/pull/8370) flag. -- Support [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/8919), equivalent to [v2 `etcdctl exec-watch`](https://github.com/etcd-io/etcd/issues/8814). - - Make `etcdctl watch -- [exec-command]` set environmental variables [`ETCD_WATCH_REVISION`, `ETCD_WATCH_EVENT_TYPE`, `ETCD_WATCH_KEY`, `ETCD_WATCH_VALUE`](https://github.com/etcd-io/etcd/pull/9142) for each event. -- Support [`etcdctl watch` with environmental variables `ETCDCTL_WATCH_KEY` and `ETCDCTL_WATCH_RANGE_END`](https://github.com/etcd-io/etcd/pull/9142). -- Enable [`clientv3.WithRequireLeader(context.Context)` for `watch`](https://github.com/etcd-io/etcd/pull/8672) command. -- Print [`"del"` instead of `"delete"`](https://github.com/etcd-io/etcd/pull/8297) in `txn` interactive mode. -- Print [`ETCD_INITIAL_ADVERTISE_PEER_URLS` in `member add`](https://github.com/etcd-io/etcd/pull/8332). -- Fix [`etcdctl snapshot status` to not modify snapshot file](https://github.com/etcd-io/etcd/pull/8815). - - For example, start etcd `v3.3.10` - - Write some data - - Use etcdctl `v3.3.10` to save snapshot - - Somehow, upgrading Kubernetes fails, thus rolling back to previous version etcd `v3.2.24` - - Run etcdctl `v3.2.24` `snapshot status` against the snapshot file saved from `v3.3.10` server - - Run etcdctl `v3.2.24` `snapshot restore` fails with `"expected sha256 [12..."` - -### etcdctl v3 - -- Handle [empty key permission](https://github.com/etcd-io/etcd/pull/8514) in `etcdctl`. - -### etcdctl v2 - -- Add [`etcdctl backup --with-v3`](https://github.com/etcd-io/etcd/pull/8479) flag. - -### gRPC Proxy - -- Add [`grpc-proxy start --experimental-leasing-prefix`](https://github.com/etcd-io/etcd/pull/8341) flag. - - For disconnected linearized reads. - - Based on [V system leasing](https://github.com/etcd-io/etcd/issues/6065). - - See ["Disconnected consistent reads with etcd" blog post](https://coreos.com/blog/coreos-labs-disconnected-consistent-reads-with-etcd). -- Add [`grpc-proxy start --experimental-serializable-ordering`](https://github.com/etcd-io/etcd/pull/8315) flag. - - To ensure serializable reads have monotonically increasing store revisions across endpoints. -- Add [`grpc-proxy start --metrics-addr`](https://github.com/etcd-io/etcd/pull/8242) flag for an additional `/metrics` endpoint. - - Set `--metrics-addr=http://[HOST]:9379` to serve `/metrics` in insecure port 9379. -- Serve [`/health` endpoint in grpc-proxy](https://github.com/etcd-io/etcd/pull/8322). -- Add [`grpc-proxy start --debug`](https://github.com/etcd-io/etcd/pull/8994) flag. -- Add [`grpc-proxy start --max-send-bytes`](https://github.com/etcd-io/etcd/pull/9250) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923). -- Add [`grpc-proxy start --max-recv-bytes`](https://github.com/etcd-io/etcd/pull/9250) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923). -- Fix [Snapshot API error handling](https://github.com/etcd-io/etcd/commit/dbd16d52fbf81e5fd806d21ff5e9148d5bf203ab). -- Fix [KV API `PrevKv` flag handling](https://github.com/etcd-io/etcd/pull/8366). -- Fix [KV API `KeysOnly` flag handling](https://github.com/etcd-io/etcd/pull/8552). - -### gRPC gateway - -- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3alpha` with [`/v3beta`](https://github.com/etcd-io/etcd/pull/8880). - - To deprecate [`/v3alpha`](https://github.com/etcd-io/etcd/issues/8125) in v3.4. - - In v3.3, `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.4. Use `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- Support ["authorization" token](https://github.com/etcd-io/etcd/pull/7999). -- Support [websocket for bi-directional streams](https://github.com/etcd-io/etcd/pull/8257). - - Fix [`Watch` API with gRPC gateway](https://github.com/etcd-io/etcd/issues/8237). -- Upgrade gRPC gateway to [v1.3.0](https://github.com/etcd-io/etcd/issues/8838). - -### etcd server - -- Fix [backend database in-memory index corruption](https://github.com/etcd-io/etcd/pull/8127) issue on restore (only 3.2.0 is affected). -- Fix [watch restore from snapshot](https://github.com/etcd-io/etcd/pull/8427). -- Fix [`mvcc/backend.defragdb` nil-pointer dereference on create bucket failure](https://github.com/etcd-io/etcd/pull/9119). -- Fix [server crash](https://github.com/etcd-io/etcd/pull/8010) on [invalid transaction request from gRPC gateway](https://github.com/etcd-io/etcd/issues/7889). -- Prevent [server panic from member update/add](https://github.com/etcd-io/etcd/pull/9174) with [wrong scheme URLs](https://github.com/etcd-io/etcd/issues/9173). -- Make [peer dial timeout longer](https://github.com/etcd-io/etcd/pull/8599). - - See [coreos/etcd-operator#1300](https://github.com/etcd-io/etcd-operator/issues/1300) for more detail. -- Make server [wait up to request time-out](https://github.com/etcd-io/etcd/pull/8267) with [pending RPCs](https://github.com/etcd-io/etcd/issues/8224). -- Fix [`grpc.Server` panic on `GracefulStop`](https://github.com/etcd-io/etcd/pull/8987) with [TLS-enabled server](https://github.com/etcd-io/etcd/issues/8916). -- Fix ["multiple peer URLs cannot start" issue](https://github.com/etcd-io/etcd/issues/8383). -- Fix server-side auth so [concurrent auth operations do not return old revision error](https://github.com/etcd-io/etcd/pull/8442). -- Handle [WAL renaming failure on Windows](https://github.com/etcd-io/etcd/pull/8286). -- Upgrade [`coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) to `v15` (see https://github.com/coreos/go-systemd/releases/tag/v15). -- [Put back `/v2/machines`](https://github.com/etcd-io/etcd/pull/8062) endpoint for python-etcd wrapper. - -### client v2 - -- [Fail-over v2 client](https://github.com/etcd-io/etcd/pull/8519) to next endpoint on [oneshot failure](https://github.com/etcd-io/etcd/issues/8515). - -### Package `raft` - -- Add [non-voting member](https://github.com/etcd-io/etcd/pull/8751). - - To implement [Raft thesis 4.2.1 Catching up new servers](https://github.com/etcd-io/etcd/issues/8568). - - `Learner` node does not vote or promote itself. - -### Other - -- Support previous two minor versions (see our [new release policy](https://github.com/etcd-io/etcd/pull/8805)). -- `v3.3.x` is the last release cycle that supports `ACI`. - - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016. - - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore. - - `*.aci` files won't be available from etcd v3.4 release. -- Add container registry [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd). - - [quay.io/coreos/etcd](https://quay.io/coreos/etcd) is still supported as secondary. - -### Go - -- Require [*Go 1.9+*](https://github.com/etcd-io/etcd/issues/6174). -- Compile with [*Go 1.9.3*](https://golang.org/doc/devel/release.html#go1.9). -- Deprecate [`golang.org/x/net/context`](https://github.com/etcd-io/etcd/pull/8511). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.4.md b/CHANGELOG/CHANGELOG-3.4.md deleted file mode 100644 index 77caa2bfb73..00000000000 --- a/CHANGELOG/CHANGELOG-3.4.md +++ /dev/null @@ -1,1199 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.3](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.3.md). - -
- -## v3.4.24 (TBD) - -### etcd server -- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15097). -- Improve [mvcc: reduce count-only range overhead](https://github.com/etcd-io/etcd/pull/15099) -- Improve [mvcc: push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/15137) -- Improve [server: set multiple concurrentReadTx instances share one txReadBuffer](https://github.com/etcd-io/etcd/pull/15195) - -### Dependency -- Upgrade [github.com/grpc-ecosystem/grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [v1.9.5](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.9.5) to [v1.11.0](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.11.0). -- Bump bbolt to [v1.3.7](https://github.com/etcd-io/etcd/pull/15223). - -### Other -- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15038). - -### Package `netutil` -- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15188) - -
- -## v3.4.23 (2022-12-21) - -### Package `clientv3` -- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14792). - -### etcd server -- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14853). -- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14900). - -### Security -- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15017) to address critical Vulnerabilities. -- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15019) to address some HIGH Vulnerabilities. - -### Go -- Require [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019). -- Compile with [Go 1.17+](https://go.dev/doc/devel/release#go1.17) - -
- -## v3.4.22 (2022-11-02) - -### etcd server -- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14530) -- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14548) -- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14562) -- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14649) - -### Package `netutil` -- Fix [netutil: add url comparison without resolver to URLStringsEqual](https://github.com/etcd-io/etcd/pull/14577) - -### Package `clientv3` -- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14581). - -### etcd grpc-proxy -- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14601) flag to support adding configurable cipher list. - -
- -## v3.4.21 (2022-09-15) - -### etcd server -- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14423) -- Fix [Panic due to nil log object](https://github.com/etcd-io/etcd/pull/14420) -- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14410) - -### etcdctl v3 - -- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14441) - -
- -## v3.4.20 (2022-08-06) - -### Package `clientv3` - -- Fix [filter learners members during autosync](https://github.com/etcd-io/etcd/pull/14236). - -### etcd server -- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14251) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32. -- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/14253) flag to enable checkpoint persisting. -- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/14253), requires enabling checkpoint persisting. -- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14230) -- Fix [raft: postpone MsgReadIndex until first commit in the term](https://github.com/etcd-io/etcd/pull/14258) -- Fix [etcdserver: resend ReadIndex request on empty apply request](https://github.com/etcd-io/etcd/pull/14269) -- Fix [remove temp files in snap dir when etcdserver starting](https://github.com/etcd-io/etcd/pull/14246) -- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/14177) -- Fix [Grant lease with negative ID can possibly cause db out of sync](https://github.com/etcd-io/etcd/pull/14239) -- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14254) - -
- -## v3.4.19 (2022-07-12) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.18...v3.4.19) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server -- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13475). -- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13713). -- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13206). -- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14150). -- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14150). - -### Package `clientv3` -- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13999). - -### Dependency -- Upgrade [go.etcd.io/bbolt](https://github.com/etcd-io/bbolt/releases) from [v1.3.3](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3) to [v1.3.6](https://github.com/etcd-io/bbolt/releases/tag/v1.3.6). - -### Security -- Upgrade [golang.org/x/crypto](https://github.com/etcd-io/etcd/pull/14179) to v0.0.0-20220411220226-7b82a4e95df4 to address [CVE-2022-27191 ](https://github.com/advisories/GHSA-8c26-wmh5-6g9v). -- Upgrade [gopkg.in/yaml.v2](https://github.com/etcd-io/etcd/pull/14192) to v2.4.0 to address [CVE-2019-11254](https://github.com/advisories/GHSA-wxc4-f4m6-wwqv). - -### Go -- Require [Go 1.16+](https://github.com/etcd-io/etcd/pull/14136). -- Compile with [Go 1.16+](https://go.dev/doc/devel/release#go1.16). -- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/14136) (instead of vendor dir) to track dependencies. - -
- -## v3.4.18 (2021-10-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.17...v3.4.18) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397). - -### Other - -- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs: - - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption - - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc - - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp - - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads. - -
- -## v3.4.17 (2021-10-03) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.16...v3.4.17) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### `etcdctl` - -- Fix [etcdctl check datascale command](https://github.com/etcd-io/etcd/pull/11896) to work with https endpoints. - -### gRPC gateway - -- Add [`MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/13077) support for http client. - -### Dependency - -- Replace [`github.com/dgrijalva/jwt-go with github.com/golang-jwt/jwt'](https://github.com/etcd-io/etcd/pull/13378). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - -
- -## v3.4.16 (2021-05-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.15...v3.4.16) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server - -- Add [`--experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable. -- Fix [`--unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/12751) to still write-out data avoiding corruption (most of the time). -- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871). -- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880). - -### Metrics - -- Fix [incorrect metrics generated when clients cancel watches](https://github.com/etcd-io/etcd/pull/12803) back-ported from (https://github.com/etcd-io/etcd/pull/12196). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.15](https://github.com/etcd-io/etcd/releases/tag/v3.4.15) (2021-02-26) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.14...v3.4.15) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server - -- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677). -- Fix [64 KB websocket notification message limit](https://github.com/etcd-io/etcd/pull/12402). - -### Package `fileutil` - -- Fix [`F_OFD_` constants](https://github.com/etcd-io/etcd/pull/12444). - -### Dependency - -- Bump up [`gorilla/websocket` to v1.4.2](https://github.com/etcd-io/etcd/pull/12645). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.14](https://github.com/etcd-io/etcd/releases/tag/v3.4.14) (2020-11-25) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.13...v3.4.14) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Package `clientv3` - -- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready. - -### etcd server - -- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node. - -### Package `netutil` - -- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491). - - These are not used anymore. They were only used for older versions of functional testing. - - Removed to adhere to best security practices, minimize arbitrary shell invocation. - -### `tools/etcd-dump-metrics` - -- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.13](https://github.com/etcd-io/etcd/releases/tag/v3.4.13) (2020-8-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.12...v3.4.13) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Security - -- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd use any existing directory that has a permission different than 700 on Linux and 777 on Windows. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.12](https://github.com/etcd-io/etcd/releases/tag/v3.4.12) (2020-08-19) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.11...v3.4.12) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server - -- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197). - - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - - -
- - - -## [v3.4.11](https://github.com/etcd-io/etcd/releases/tag/v3.4.11) (2020-08-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.10...v3.4.11) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server - -- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986). -- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable. - -### Package `clientv3` - -- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187). - - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450). - -### Package `runtime` - -- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214). - -### Metrics, Monitoring - -- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214). -- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - - - -
- - - - -## [v3.4.10](https://github.com/etcd-io/etcd/releases/tag/v3.4.10) (2020-07-16) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.9...v3.4.10) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Package `etcd server` - -- Add [`--unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag. - - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system. -- Add [etcd --auth-token-ttl](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings. -- Improve [runtime.FDUsage objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986). -- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987). -- Fix [`int64` convert panic in raft logger](https://github.com/etcd-io/etcd/pull/12106). - - Fix [kubernetes/kubernetes#91937](https://github.com/kubernetes/kubernetes/issues/91937). - -### Breaking Changes - -- Changed behavior on [existing dir permission](https://github.com/etcd-io/etcd/pull/11798). - - Previously, the permission was not checked on existing data directory and the directory used for automatically generating self-signed certificates for TLS connections with clients. Now a check is added to make sure those directories, if already exist, has a desired permission of 700 on Linux and 777 on Windows. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.9](https://github.com/etcd-io/etcd/releases/tag/v3.4.9) (2020-05-20) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.8...v3.4.9) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Package `wal` - -- Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924). - - See https://github.com/etcd-io/etcd/issues/11918. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.8](https://github.com/etcd-io/etcd/releases/tag/v3.4.8) (2020-05-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.7...v3.4.8) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### `etcdctl` - -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). - -### Package `clientv3` - -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). - -### etcd server - -- Improve logging around snapshot send and receive. -- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670). -- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817). -- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888). - - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot. - - See https://github.com/etcd-io/etcd/issues/10219 for more. - -### Package Auth - -- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652). - -### Metrics, Monitoring - -- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.7](https://github.com/etcd-io/etcd/releases/tag/v3.4.7) (2020-04-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.6...v3.4.7) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### etcd server - -- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734). - -### Package `wal` - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Metrics, Monitoring - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.6](https://github.com/etcd-io/etcd/releases/tag/v3.4.6) (2020-03-29) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.5...v3.4.6) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -### Package `lease` - -- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731). - - https://github.com/etcd-io/etcd/issues/11495 - - https://github.com/etcd-io/etcd/issues/11730 - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.5](https://github.com/etcd-io/etcd/releases/tag/v3.4.5) (2020-03-18) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.4...v3.4.5) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### etcd server - -- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704). - -### client v3 - -- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687). - - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys. - -### etcdctl v3 - -- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11638) command to prevent potential timeout. - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687). - -### gRPC Proxy - -- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler. - -### Go - -- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.4](https://github.com/etcd-io/etcd/releases/tag/v3.4.4) (2020-02-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.3...v3.4.4) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### etcd server - -- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308). - - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup. - - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node. -- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613). -- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640). -- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled. - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric. -- Fix bug where [etcd_debugging_mvcc_db_compaction_keys_total is always 0](https://github.com/etcd-io/etcd/pull/11400). - -### Auth - -- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414)) -- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586) - - -
- - -## [v3.4.3](https://github.com/etcd-io/etcd/releases/tag/v3.4.3) (2019-10-24) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.2...v3.4.3) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version. - -### Go - -- Compile with [*Go 1.12.12*](https://golang.org/doc/devel/release.html#go1.12). - - -
- - -## [v3.4.2](https://github.com/etcd-io/etcd/releases/tag/v3.4.2) (2019-10-11) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.1...v3.4.2) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### etcdctl v3 - -- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11194) command to prevent potential timeout. - -### etcdserver - -- Add [`tracing`](https://github.com/etcd-io/etcd/pull/11179) to range, put and compact requests in etcdserver. - -### Go - -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - -### client v3 - -- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184). - - Fix ["kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch" (kubernetes#83028)](https://github.com/kubernetes/kubernetes/issues/83028). -- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211). - - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550). - - -
- - -## [v3.4.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.1) (2019-09-17) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.4.1) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. -- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. - -### etcd server - -- Fix [secure server logging message](https://github.com/etcd-io/etcd/commit/8b053b0f44c14ac0d9f39b9b78c17c57d47966eb). -- Remove [redundant `%` characters in file descriptor warning message](https://github.com/etcd-io/etcd/commit/d5f79adc9cea9ec8c93669526464b0aa19ed417b). - -### Package `embed` - -- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11148) to allow creating a custom zap logger. - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.23.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.1). - -### Go - -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - - -
- - -## v3.4.0 (2019-08-30) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.4.0) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes. - -- [v3.4.0](https://github.com/etcd-io/etcd/releases/tag/v3.4.0) (2019-08-30), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.4...v3.4.0). -- [v3.4.0-rc.4](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.4) (2019-08-29), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.3...v3.4.0-rc.4). -- [v3.4.0-rc.3](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.3) (2019-08-27), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.2...v3.4.0-rc.3). -- [v3.4.0-rc.2](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.2) (2019-08-23), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.1...v3.4.0-rc.2). -- [v3.4.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.1) (2019-08-15), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.0...v3.4.0-rc.1). -- [v3.4.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.0) (2019-08-12), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.4.0-rc.0). - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).** - -### Documentation - -- etcd now has a new website! Please visit https://etcd.io. - -### Improved - -- Add Raft learner: [etcd#10725](https://github.com/etcd-io/etcd/pull/10725), [etcd#10727](https://github.com/etcd-io/etcd/pull/10727), [etcd#10730](https://github.com/etcd-io/etcd/pull/10730). - - User guide: [runtime-configuration document](https://etcd.io/docs/latest/op-guide/runtime-configuration/#add-a-new-member-as-learner). - - API change: [API reference document](https://etcd.io/docs/latest/dev-guide/api_reference_v3/). - - More details on implementation: [learner design document](https://etcd.io/docs/latest/learning/design-learner/) and [implementation task list](https://github.com/etcd-io/etcd/issues/10537). -- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106). - - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911). - - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911). - - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - - Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956). - - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. To block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`. -- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840). -- Add [jitter to watch progress notify](https://github.com/etcd-io/etcd/pull/9278) to prevent [spikes in `etcd_network_client_grpc_sent_bytes_total`](https://github.com/etcd-io/etcd/issues/9246). -- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network. -- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288). - - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`. - - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822). - - Provide [response size](https://github.com/etcd-io/etcd/pull/9826). -- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed. -- Improve [TLS setup error logging](https://github.com/etcd-io/etcd/pull/9518) to help debug [TLS-enabled cluster configuring issues](https://github.com/etcd-io/etcd/issues/9400). -- Improve [long-running concurrent read transactions under light write workloads](https://github.com/etcd-io/etcd/pull/9296). - - Previously, periodic commit on pending writes blocks incoming read transactions, even if there is no pending write. - - Now, periodic commit operation does not block concurrent read transactions, thus improves long-running read transaction performance. -- Make [backend read transactions fully concurrent](https://github.com/etcd-io/etcd/pull/10523). - - Previously, ongoing long-running read transactions block writes and future reads. - - With this change, write throughput is increased by 70% and P99 write latency is reduced by 90% in the presence of long-running reads. -- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897). -- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333). - - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node. - - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart. -- Add [Raft Pre-Vote feature](https://github.com/etcd-io/etcd/pull/9352) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333). - - For instance, a flaky(or rejoining) member may drop in and out, and start campaign. This member will end up with a higher term, and ignore all incoming messages with lower term. In this case, a new leader eventually need to get elected, thus disruptive to cluster availability. Raft implements Pre-Vote phase to prevent this kind of disruptions. If enabled, Raft runs an additional phase of election to check if pre-candidate can get enough votes to win an election. -- Adjust [periodic compaction retention window](https://github.com/etcd-io/etcd/pull/9485). - - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000). - - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h` automatically `Compact` with 24-hour retention windown for every 2.4-hour. Now, `Compact` happens for every 1-hour. - - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown for every 3-minute. Now, `Compact` happens for every 30-minute. - - Periodic compactor keeps recording latest revisions for every compaction period when given period is less than 1-hour, or for every 1-hour when given compaction period is greater than 1-hour (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`). - - For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data. - - The retention window of compaction period moves for every given compaction period or hour. - - For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour. - - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute. -- Improve [lease expire/revoke operation performance](https://github.com/etcd-io/etcd/pull/9418), address [lease scalability issue](https://github.com/etcd-io/etcd/issues/9496). -- Make [Lease `Lookup` non-blocking with concurrent `Grant`/`Revoke`](https://github.com/etcd-io/etcd/pull/9229). -- Make etcd server return `raft.ErrProposalDropped` on internal Raft proposal drop in [v3 applier](https://github.com/etcd-io/etcd/pull/9549) and [v2 applier](https://github.com/etcd-io/etcd/pull/9558). - - e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/etcd-io/etcd/issues/8975). -- Add [`snapshot`](https://github.com/etcd-io/etcd/pull/9118) package for easier snapshot workflow (see [`godoc.org/github.com/etcd/clientv3/snapshot`](https://godoc.org/github.com/etcd-io/etcd/clientv3/snapshot) for more). -- Improve [functional tester](https://github.com/etcd-io/etcd/tree/main/functional) coverage: [proxy layer to run network fault tests in CI](https://github.com/etcd-io/etcd/pull/9081), [TLS is enabled both for server and client](https://github.com/etcd-io/etcd/pull/9534), [liveness mode](https://github.com/etcd-io/etcd/issues/9230), [shuffle test sequence](https://github.com/etcd-io/etcd/issues/9381), [membership reconfiguration failure cases](https://github.com/etcd-io/etcd/pull/9564), [disastrous quorum loss and snapshot recover from a seed member](https://github.com/etcd-io/etcd/pull/9565), [embedded etcd](https://github.com/etcd-io/etcd/pull/9572). -- Improve [index compaction blocking](https://github.com/etcd-io/etcd/pull/9511) by using a copy on write clone to avoid holding the lock for the traversal of the entire index. -- Update [JWT methods](https://github.com/etcd-io/etcd/pull/9883) to allow for use of any supported signature method/algorithm. -- Add [Lease checkpointing](https://github.com/etcd-io/etcd/pull/9924) to persist remaining TTLs to the consensus log periodically so that long lived leases progress toward expiry in the presence of leader elections and server restarts. - - Enabled by experimental flag "--experimental-enable-lease-checkpoint". -- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information. -- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error. -- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603). -- Improve [heartbeat send failure logging](https://github.com/etcd-io/etcd/pull/10663). -- Support [users with no password](https://github.com/etcd-io/etcd/pull/9817) for reducing security risk introduced by leaked password. The users can only be authenticated with `CommonName` based auth. -- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/10524). -- Add `etcd --experimental-compaction-batch-limit` to [sets the maximum revisions deleted in each compaction batch](https://github.com/etcd-io/etcd/pull/11034). -- Reduced default compaction batch size from 10k revisions to 1k revisions to improve p99 latency during compactions and reduced wait between compactions from 100ms to 10ms. - -### Breaking Changes - -- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106). - - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911). - - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911). - - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102). - - Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956). - - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. To block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`. -- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045). - - Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. -- Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063). - - <= 3.3 puts `vendor` directory under `cmd/vendor` directory to [prevent conflicting transitive dependencies](https://github.com/etcd-io/etcd/issues/4913). - - 3.4 moves `cmd/vendor` directory to `vendor` at repository root. - - Remove recursive symlinks in `cmd` directory. - - Now `go get/install/build` on `etcd` packages (e.g. `clientv3`, `tools/benchmark`) enforce builds with etcd `vendor` directory. -- Deprecated `latest` [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tag. - - **`docker pull gcr.io/etcd-development/etcd:latest` would not be up-to-date**. -- Deprecated [minor](https://semver.org/) version [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tags. - - `docker pull gcr.io/etcd-development/etcd:v3.3` would still work. - - **`docker pull gcr.io/etcd-development/etcd:v3.4` would not work**. - - Use **`docker pull gcr.io/etcd-development/etcd:v3.4.x`** instead, with the exact patch version. -- Deprecated [ACIs from official release](https://github.com/etcd-io/etcd/pull/9059). - - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016. - - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore. - - `*.aci` files are not available from `v3.4` release. -- Move [`"github.com/coreos/etcd"`](https://github.com/etcd-io/etcd/issues/9965) to [`"github.com/etcd-io/etcd"`](https://github.com/etcd-io/etcd/issues/9965). - - Change import path to `"go.etcd.io/etcd"`. - - e.g. `import "go.etcd.io/etcd/raft"`. -- Make [`ETCDCTL_API=3 etcdctl` default](https://github.com/etcd-io/etcd/issues/9600). - - Now, `etcdctl set foo bar` must be `ETCDCTL_API=2 etcdctl set foo bar`. - - Now, `ETCDCTL_API=3 etcdctl put foo bar` could be just `etcdctl put foo bar`. -- Make [`etcd --enable-v2=false` default](https://github.com/etcd-io/etcd/pull/10935). -- Make [`embed.DefaultEnableV2` `false` default](https://github.com/etcd-io/etcd/pull/10935). -- **Deprecated `etcd --ca-file` flag**. Use [`etcd --trusted-ca-file`](https://github.com/etcd-io/etcd/pull/9470) instead (`etcd --ca-file` flag has been marked deprecated since v2.1). -- **Deprecated `etcd --peer-ca-file` flag**. Use [`etcd --peer-trusted-ca-file`](https://github.com/etcd-io/etcd/pull/9470) instead (`etcd --peer-ca-file` flag has been marked deprecated since v2.1). -- **Deprecated `pkg/transport.TLSInfo.CAFile` field**. Use [`pkg/transport.TLSInfo.TrustedCAFile`](https://github.com/etcd-io/etcd/pull/9470) instead (`CAFile` field has been marked deprecated since v2.1). -- Exit on [empty hosts in advertise URLs](https://github.com/etcd-io/etcd/pull/8786). - - Address [advertise client URLs accepts empty hosts](https://github.com/etcd-io/etcd/issues/8379). - - e.g. exit with error on `--advertise-client-urls=http://:2379`. - - e.g. exit with error on `--initial-advertise-peer-urls=http://:2380`. -- Exit on [shadowed environment variables](https://github.com/etcd-io/etcd/pull/9382). - - Address [error on shadowed environment variables](https://github.com/etcd-io/etcd/issues/8380). - - e.g. exit with error on `ETCD_NAME=abc etcd --name=def`. - - e.g. exit with error on `ETCD_INITIAL_CLUSTER_TOKEN=abc etcd --initial-cluster-token=def`. - - e.g. exit with error on `ETCDCTL_ENDPOINTS=abc.com ETCDCTL_API=3 etcdctl endpoint health --endpoints=def.com`. -- Change [`etcdserverpb.AuthRoleRevokePermissionRequest/key,range_end` fields type from `string` to `bytes`](https://github.com/etcd-io/etcd/pull/9433). -- Deprecating `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) instead. -- Deprecating `etcd_debugging_mvcc_put_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_put_total`](https://github.com/etcd-io/etcd/pull/10962) instead. -- Deprecating `etcd_debugging_mvcc_delete_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_delete_total`](https://github.com/etcd-io/etcd/pull/10962) instead. -- Deprecating `etcd_debugging_mvcc_range_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_range_total`](https://github.com/etcd-io/etcd/pull/10968) instead. -- Deprecating `etcd_debugging_mvcc_txn_total`Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_txn_total`](https://github.com/etcd-io/etcd/pull/10968) instead. -- Rename `etcdserver.ServerConfig.SnapCount` field to `etcdserver.ServerConfig.SnapshotCount`, to be consistent with the flag name `etcd --snapshot-count`. -- Rename `embed.Config.SnapCount` field to [`embed.Config.SnapshotCount`](https://github.com/etcd-io/etcd/pull/9745), to be consistent with the flag name `etcd --snapshot-count`. -- Change [`embed.Config.CorsInfo` in `*cors.CORSInfo` type to `embed.Config.CORS` in `map[string]struct{}` type](https://github.com/etcd-io/etcd/pull/9490). -- Deprecated [`embed.Config.SetupLogging`](https://github.com/etcd-io/etcd/pull/9572). - - Now logger is set up automatically based on [`embed.Config.Logger`, `embed.Config.LogOutputs`, `embed.Config.Debug` fields](https://github.com/etcd-io/etcd/pull/9572). -- Rename [`etcd --log-output` to `etcd --log-outputs`](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs. - - **`etcd --log-output`** will be deprecated in v3.5. -- Rename [**`embed.Config.LogOutput`** to **`embed.Config.LogOutputs`**](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs. -- Change [**`embed.Config.LogOutputs`** type from `string` to `[]string`](https://github.com/etcd-io/etcd/pull/9579) to support multiple log outputs. - - Now that `etcd --log-outputs` accepts multiple writers, etcd configuration YAML file `log-outputs` field must be changed to `[]string` type. - - Previously, `etcd --config-file etcd.config.yaml` can have `log-outputs: default` field, now must be `log-outputs: [default]`. -- Deprecating [`etcd --debug`](https://github.com/etcd-io/etcd/pull/10947) flag. Use `etcd --log-level=debug` flag instead. - - v3.5 will deprecate `etcd --debug` flag in favor of `etcd --log-level=debug`. -- Change v3 `etcdctl snapshot` exit codes with [`snapshot` package](https://github.com/etcd-io/etcd/pull/9118/commits/df689f4280e1cce4b9d61300be13ca604d41670a). - - Exit on error with exit code 1 (no more exit code 5 or 6 on `snapshot save/restore` commands). -- Deprecated [`grpc.ErrClientConnClosing`](https://github.com/etcd-io/etcd/pull/10981). - - `clientv3` and `proxy/grpcproxy` now does not return `grpc.ErrClientConnClosing`. - - `grpc.ErrClientConnClosing` has been [deprecated in gRPC >= 1.10](https://github.com/grpc/grpc-go/pull/1854). - - Use `clientv3.IsConnCanceled(error)` or `google.golang.org/grpc/status.FromError(error)` instead. -- Deprecated [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3beta` with [`/v3`](https://github.com/etcd-io/etcd/pull/9298). - - Deprecated [`/v3alpha`](https://github.com/etcd-io/etcd/pull/9298). - - To deprecate [`/v3beta`](https://github.com/etcd-io/etcd/issues/9189) in v3.5. - - In v3.4, `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- Change [`wal` package function signatures](https://github.com/etcd-io/etcd/pull/9572) to support [structured logger and logging to file](https://github.com/etcd-io/etcd/issues/9438) in server-side. - - Previously, `Open(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`. - - Previously, `OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`. - - Previously, `Repair(dirpath string) bool`, now `Repair(lg *zap.Logger, dirpath string) bool`. - - Previously, `Create(dirpath string, metadata []byte) (*WAL, error)`, now `Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error)`. -- Remove [`pkg/cors` package](https://github.com/etcd-io/etcd/pull/9490). -- Move internal packages to `etcdserver`. - - `"github.com/coreos/etcd/alarm"` to `"go.etcd.io/etcd/etcdserver/api/v3alarm"`. - - `"github.com/coreos/etcd/compactor"` to `"go.etcd.io/etcd/etcdserver/api/v3compactor"`. - - `"github.com/coreos/etcd/discovery"` to `"go.etcd.io/etcd/etcdserver/api/v2discovery"`. - - `"github.com/coreos/etcd/etcdserver/auth"` to `"go.etcd.io/etcd/etcdserver/api/v2auth"`. - - `"github.com/coreos/etcd/etcdserver/membership"` to `"go.etcd.io/etcd/etcdserver/api/membership"`. - - `"github.com/coreos/etcd/etcdserver/stats"` to `"go.etcd.io/etcd/etcdserver/api/v2stats"`. - - `"github.com/coreos/etcd/error"` to `"go.etcd.io/etcd/etcdserver/api/v2error"`. - - `"github.com/coreos/etcd/rafthttp"` to `"go.etcd.io/etcd/etcdserver/api/rafthttp"`. - - `"github.com/coreos/etcd/snap"` to `"go.etcd.io/etcd/etcdserver/api/snap"`. - - `"github.com/coreos/etcd/store"` to `"go.etcd.io/etcd/etcdserver/api/v2store"`. -- Change [snapshot file permissions](https://github.com/etcd-io/etcd/pull/9977): On Linux, the snapshot file changes from readable by all (mode 0644) to readable by the user only (mode 0600). -- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959). - - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt). -- Release branch `/version` defines version `3.4.x-pre`, instead of `3.4.y+git`. - - Use `3.4.5-pre`, instead of `3.4.4+git`. - -### Dependency - -- Upgrade [`github.com/coreos/bbolt`](https://github.com/etcd-io/bbolt/releases) from [**`v1.3.1-coreos.6`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.1-coreos.6) to [`go.etcd.io/bbolt`](https://github.com/etcd-io/bbolt/releases) [**`v1.3.3`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3). -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5) to [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0). -- Migrate [`github.com/ugorji/go/codec`](https://github.com/ugorji/go/releases) to [**`github.com/json-iterator/go`**](https://github.com/json-iterator/go), to [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/9494) (See [#10667](https://github.com/etcd-io/etcd/pull/10667) for more). -- Migrate [`github.com/ghodss/yaml`](https://github.com/ghodss/yaml/releases) to [**`sigs.k8s.io/yaml`**](https://github.com/kubernetes-sigs/yaml) (See [#10687](https://github.com/etcd-io/etcd/pull/10687) for more). -- Upgrade [`golang.org/x/crypto`](https://github.com/golang/crypto) from [**`crypto@9419663f5`**](https://github.com/golang/crypto/commit/9419663f5a44be8b34ca85f08abc5fe1be11f8a3) to [**`crypto@0709b304e793`**](https://github.com/golang/crypto/commit/0709b304e793a5edb4a2c0145f281ecdc20838a4). -- Upgrade [`golang.org/x/net`](https://github.com/golang/net) from [**`net@66aacef3d`**](https://github.com/golang/net/commit/66aacef3dd8a676686c7ae3716979581e8b03c47) to [**`net@adae6a3d119a`**](https://github.com/golang/net/commit/adae6a3d119ae4890b46832a2e88a95adc62b8e7). -- Upgrade [`golang.org/x/sys`](https://github.com/golang/sys) from [**`sys@ebfc5b463`**](https://github.com/golang/sys/commit/ebfc5b4631820b793c9010c87fd8fef0f39eb082) to [**`sys@c7b8b68b1456`**](https://github.com/golang/sys/commit/c7b8b68b14567162c6602a7c5659ee0f26417c18). -- Upgrade [`golang.org/x/text`](https://github.com/golang/text) from [**`text@b19bf474d`**](https://github.com/golang/text/commit/b19bf474d317b857955b12035d2c5acb57ce8b01) to [**`v0.3.0`**](https://github.com/golang/text/releases/tag/v0.3.0). -- Upgrade [`golang.org/x/time`](https://github.com/golang/time) from [**`time@c06e80d93`**](https://github.com/golang/time/commit/c06e80d9300e4443158a03817b8a8cb37d230320) to [**`time@fbb02b229`**](https://github.com/golang/time/commit/fbb02b2291d28baffd63558aa44b4b56f178d650). -- Upgrade [`github.com/golang/protobuf`](https://github.com/golang/protobuf/releases) from [**`golang/protobuf@1e59b77b5`**](https://github.com/golang/protobuf/commit/1e59b77b52bf8e4b449a57e6f79f21226d571845) to [**`v1.3.2`**](https://github.com/golang/protobuf/releases/tag/v1.3.2). -- Upgrade [`gopkg.in/yaml.v2`](https://github.com/go-yaml/yaml/releases) from [**`yaml@cd8b52f82`**](https://github.com/go-yaml/yaml/commit/cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b) to [**`yaml@5420a8b67`**](https://github.com/go-yaml/yaml/commit/5420a8b6744d3b0345ab293f6fcba19c978f1183). -- Upgrade [`github.com/dgrijalva/jwt-go`](https://github.com/dgrijalva/jwt-go/releases) from [**`v3.0.0`**](https://github.com/dgrijalva/jwt-go/releases/tag/v3.0.0) to [**`v3.2.0`**](https://github.com/dgrijalva/jwt-go/releases/tag/v3.2.0). -- Upgrade [`github.com/soheilhy/cmux`](https://github.com/soheilhy/cmux/releases) from [**`v0.1.3`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.3) to [**`v0.1.4`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.4). -- Upgrade [`github.com/google/btree`](https://github.com/google/btree/releases) from [**`google/btree@925471ac9`**](https://github.com/google/btree/commit/925471ac9e2131377a91e1595defec898166fe49) to [**`v1.0.0`**](https://github.com/google/btree/releases/tag/v1.0.0). -- Upgrade [`github.com/spf13/cobra`](https://github.com/spf13/cobra/releases) from [**`spf13/cobra@1c44ec8d3`**](https://github.com/spf13/cobra/commit/1c44ec8d3f1552cac48999f9306da23c4d8a288b) to [**`v0.0.3`**](https://github.com/spf13/cobra/releases/tag/v0.0.3). -- Upgrade [`github.com/spf13/pflag`](https://github.com/spf13/pflag/releases) from [**`v1.0.0`**](https://github.com/spf13/pflag/releases/tag/v1.0.0) to [**`spf13/pflag@1ce0cc6db`**](https://github.com/spf13/pflag/commit/1ce0cc6db4029d97571db82f85092fccedb572ce). -- Upgrade [`github.com/coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) from [**`v15`**](https://github.com/coreos/go-systemd/releases/tag/v15) to [**`v17`**](https://github.com/coreos/go-systemd/releases/tag/v17). -- Upgrade [`github.com/prometheus/client_golang`](https://github.com/prometheus/client_golang/releases) from [**``prometheus/client_golang@5cec1d042``**](https://github.com/prometheus/client_golang/commit/5cec1d0429b02e4323e042eb04dafdb079ddf568) to [**`v1.0.0`**](https://github.com/prometheus/client_golang/releases/tag/v1.0.0). -- Upgrade [`github.com/grpc-ecosystem/go-grpc-prometheus`](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases) from [**``grpc-ecosystem/go-grpc-prometheus@0dafe0d49``**](https://github.com/grpc-ecosystem/go-grpc-prometheus/commit/0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7) to [**`v1.2.0`**](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0). -- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.3.1`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.1) to [**`v1.4.1`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.4.1). -- Migrate [`github.com/kr/pty`](https://github.com/kr/pty/releases) to [**`github.com/creack/pty`**](https://github.com/creack/pty/releases/tag/v1.1.7), as the later has replaced the original module. -- Upgrade [`github.com/gogo/protobuf`](https://github.com/gogo/protobuf/releases) from [**`v1.0.0`**](https://github.com/gogo/protobuf/releases/tag/v1.0.0) to [**`v1.2.1`**](https://github.com/gogo/protobuf/releases/tag/v1.2.1). - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric. -- Add [`etcd_network_active_peers`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric. - - Let's say `"7339c4e5e833c029"` server `/metrics` returns `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 1`. This indicates that the local node `"7339c4e5e833c029"` currently has two active remote peers `"729934363faa4a24"` and `"b548c2511513015"` in a 3-node cluster. If the node `"b548c2511513015"` is down, the local node `"7339c4e5e833c029"` will show `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 0`. -- Add [`etcd_network_disconnected_peers_total`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric. - - If a remote peer `"b548c2511513015"` is down, the local node `"7339c4e5e833c029"` server `/metrics` would return `etcd_network_disconnected_peers_total{Local="7339c4e5e833c029",Remote="b548c2511513015"} 1`, while active peer metrics will show `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 0`. -- Add [`etcd_network_server_stream_failures_total`](https://github.com/etcd-io/etcd/pull/9760) Prometheus metric. - - e.g. `etcd_network_server_stream_failures_total{API="lease-keepalive",Type="receive"} 1` - - e.g. `etcd_network_server_stream_failures_total{API="watch",Type="receive"} 1` -- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats. - - Previously, it only samples the TCP connection for snapshot messages. -- Increase [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric histogram upper-bound. - - Previously, highest bucket only collects requests taking 0.8192 seconds or more. - - Now, highest buckets collect 0.8192 seconds, 1.6384 seconds, and 3.2768 seconds or more. -- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric. -- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric. -- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/10257) Prometheus metric. -- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric. - - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948). -- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric. -- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric. -- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric. -- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric. -- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric. -- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric. -- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. -- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric. - - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`. - - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB. - - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB. - - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete. - - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation. -- Add [`etcd_mvcc_db_open_read_transactions`](https://github.com/etcd-io/etcd/pull/10523/commits/ad80752715aaed449629369687c5fd30eb1bda76) Prometheus metric. -- Add [`etcd_snap_fsync_duration_seconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric. -- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric. -- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric. -- Add [`etcd_mvcc_hash_rev_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric. -- Add [`etcd_debugging_disk_backend_commit_rebalance_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric. -- Add [`etcd_debugging_disk_backend_commit_spill_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric. -- Add [`etcd_debugging_disk_backend_commit_write_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric. -- Add [`etcd_debugging_lease_granted_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric. -- Add [`etcd_debugging_lease_revoked_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric. -- Add [`etcd_debugging_lease_renewed_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric. -- Add [`etcd_debugging_lease_ttl_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric. -- Add [`etcd_network_snapshot_send_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. -- Add [`etcd_network_snapshot_receive_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. -- Add [`etcd_server_snapshot_apply_in_progress_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric. -- Add [`etcd_server_is_learner`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric. -- Add [`etcd_server_learner_promote_failures`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric. -- Add [`etcd_server_learner_promote_successes`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric. -- Increase [`etcd_debugging_mvcc_index_compaction_pause_duration_milliseconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric histogram upper-bound. - - Previously, highest bucket only collects requests taking 1.024 seconds or more. - - Now, highest buckets collect 1.024 seconds, 2.048 seconds, and 4.096 seconds or more. -- Fix missing [`etcd_network_peer_sent_failures_total`](https://github.com/etcd-io/etcd/pull/9437) Prometheus metric count. -- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric. -- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562). -- Change [gRPC proxy to expose etcd server endpoint /metrics](https://github.com/etcd-io/etcd/pull/10618). - - The metrics that were exposed via the proxy were not etcd server members but instead the proxy itself. -- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646). -- Deprecating `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) instead. -- Deprecating `etcd_debugging_mvcc_put_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_put_total`](https://github.com/etcd-io/etcd/pull/10962) instead. -- Deprecating `etcd_debugging_mvcc_delete_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_delete_total`](https://github.com/etcd-io/etcd/pull/10962) instead. -- Deprecating `etcd_debugging_mvcc_range_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_range_total`](https://github.com/etcd-io/etcd/pull/10968) instead. -- Deprecating `etcd_debugging_mvcc_txn_total`Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_txn_total`](https://github.com/etcd-io/etcd/pull/10968) instead. - -### Security, Authentication - -See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details. - -- Support TLS cipher suite whitelisting. - - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320). - - TLS handshake fails when client hello is requested with invalid cipher suites. - - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag. - - If empty, Go auto-populates the list. -- Add [`etcd --host-whitelist`](https://github.com/etcd-io/etcd/pull/9372) flag, [`etcdserver.Config.HostWhitelist`](https://github.com/etcd-io/etcd/pull/9372), and [`embed.Config.HostWhitelist`](https://github.com/etcd-io/etcd/pull/9372), to prevent ["DNS Rebinding"](https://en.wikipedia.org/wiki/DNS_rebinding) attack. - - Any website can simply create an authorized DNS name, and direct DNS to `"localhost"` (or any other address). Then, all HTTP endpoints of etcd server listening on `"localhost"` becomes accessible, thus vulnerable to [DNS rebinding attacks (CVE-2018-5702)](https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2). - - Client origin enforce policy works as follow: - - If client connection is secure via HTTPS, allow any hostnames.. - - If client connection is not secure and `"HostWhitelist"` is not empty, only allow HTTP requests whose Host field is listed in whitelist. - - By default, `"HostWhitelist"` is `"*"`, which means insecure server allows all client HTTP requests. - - Note that the client origin policy is enforced whether authentication is enabled or not, for tighter controls. - - When specifying hostnames, loopback addresses are not added automatically. To allow loopback interfaces, add them to whitelist manually (e.g. `"localhost"`, `"127.0.0.1"`, etc.). - - e.g. `etcd --host-whitelist example.com`, then the server will reject all HTTP requests whose Host field is not `example.com` (also rejects requests to `"localhost"`). -- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway). -- Support [`ttl` field for `etcd` Authentication JWT token](https://github.com/etcd-io/etcd/pull/8302). - - e.g. `etcd --auth-token jwt,pub-key=,priv-key=,sign-method=,ttl=5m`. -- Allow empty token provider in [`etcdserver.ServerConfig.AuthToken`](https://github.com/etcd-io/etcd/pull/9369). -- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541). - - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets. - - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online. - - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs). - -### etcd server - -- Add [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094). - - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout. -- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward. - - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger. - - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election. - - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout. - - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities. - - Now, this can be disabled by setting `etcd --initial-election-tick-advance=false`. - - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `etcd --initial-election-tick-advance` at the cost of slow initial bootstrap. - - If single-node, it advances ticks regardless. - - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333). -- Add [`etcd --pre-vote`](https://github.com/etcd-io/etcd/pull/9352) flag to enable to run an additional Raft election phase. - - For instance, a flaky(or rejoining) member may drop in and out, and start campaign. This member will end up with a higher term, and ignore all incoming messages with lower term. In this case, a new leader eventually need to get elected, thus disruptive to cluster availability. Raft implements Pre-Vote phase to prevent this kind of disruptions. If enabled, Raft runs an additional phase of election to check if pre-candidate can get enough votes to win an election. - - `etcd --pre-vote=false` by default. - - v3.5 will enable `etcd --pre-vote=true` by default. -- Add `etcd --experimental-compaction-batch-limit` to [sets the maximum revisions deleted in each compaction batch](https://github.com/etcd-io/etcd/pull/11034). -- Reduced default compaction batch size from 10k revisions to 1k revisions to improve p99 latency during compactions and reduced wait between compactions from 100ms to 10ms. -- Add [`etcd --discovery-srv-name`](https://github.com/etcd-io/etcd/pull/8690) flag to support custom DNS SRV name with discovery. - - If not given, etcd queries `_etcd-server-ssl._tcp.[YOUR_HOST]` and `_etcd-server._tcp.[YOUR_HOST]`. - - If `etcd --discovery-srv-name="foo"`, then query `_etcd-server-ssl-foo._tcp.[YOUR_HOST]` and `_etcd-server-foo._tcp.[YOUR_HOST]`. - - Useful for operating multiple etcd clusters under the same domain. -- Support TLS cipher suite whitelisting. - - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320). - - TLS handshake fails when client hello is requested with invalid cipher suites. - - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag. - - If empty, Go auto-populates the list. -- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway). -- Rename [`etcd --log-output` to `etcd --log-outputs`](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs. - - **`etcd --log-output` will be deprecated in v3.5**. -- Add [`etcd --logger`](https://github.com/etcd-io/etcd/pull/9572) flag to support [structured logger and multiple log outputs](https://github.com/etcd-io/etcd/issues/9438) in server-side. - - **`etcd --logger=capnslog` will be deprecated in v3.5**. - - Main motivation is to promote automated etcd monitoring, rather than looking back server logs when it starts breaking. Future development will make etcd log as few as possible, and make etcd easier to monitor with metrics and alerts. - - `etcd --logger=capnslog --log-outputs=default` is the default setting and same as previous etcd server logging format. - - `etcd --logger=zap --log-outputs=default` is not supported when `etcd --logger=zap`. - - Use `etcd --logger=zap --log-outputs=stderr` instead. - - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal. - - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback. - - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient. - - To avoid this problem, systemd journal logging must be configured manually. - - `etcd --logger=zap --log-outputs=stderr` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to `os.Stderr`. Use this to override journald log redirects. - - `etcd --logger=zap --log-outputs=stdout` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to `os.Stdout` Use this to override journald log redirects. - - `etcd --logger=zap --log-outputs=a.log` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to the specified file `a.log`. - - `etcd --logger=zap --log-outputs=a.log,b.log,c.log,stdout` [writes server logs to multiple files `a.log`, `b.log` and `c.log` at the same time](https://github.com/etcd-io/etcd/pull/9579) and outputs to `os.Stderr`, in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig). - - `etcd --logger=zap --log-outputs=/dev/null` will discard all server logs. -- Add [`etcd --log-level`](https://github.com/etcd-io/etcd/pull/10947) flag to support log level. - - v3.5 will deprecate `etcd --debug` flag in favor of `etcd --log-level=debug`. -- Add [`etcd --backend-batch-limit`](https://github.com/etcd-io/etcd/pull/10283) flag. -- Add [`etcd --backend-batch-interval`](https://github.com/etcd-io/etcd/pull/10283) flag. -- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9281). - - "unsynced" watcher is watcher that needs to be in sync with events that have happened. - - That is, "unsynced" watcher is the slow watcher that was requested on old revision. - - "unsynced" watcher restore operation was not correctly populating its underlying watcher group. - - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086). - - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node. -- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775). - - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation. - - Now, this server-side panic has been fixed. -- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379). - - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server. - - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`. -- Fix [revision-based compaction retention parsing](https://github.com/etcd-io/etcd/pull/9339). - - Previously, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` was [translated to revision retention 3600000000000](https://github.com/etcd-io/etcd/issues/9337). - - Now, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` is correctly parsed as revision retention 1. -- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399). - - `TTL` parameter to `Grant` request is unit of second. - - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374). - - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years). - - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days! -- Fix [expired lease revoke](https://github.com/etcd-io/etcd/pull/10693). - - Fix ["the key is not deleted when the bound lease expires"](https://github.com/etcd-io/etcd/issues/10686). -- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347). -- Allow [non-WAL files in `etcd --wal-dir` directory](https://github.com/etcd-io/etcd/pull/9743). - - Previously, existing files such as [`lost+found`](https://github.com/etcd-io/etcd/issues/7287) in WAL directory prevent etcd server boot. - - Now, WAL directory that contains only `lost+found` or a file that's not suffixed with `.wal` is considered non-initialized. -- Fix [`ETCD_CONFIG_FILE` env variable parsing in `etcd`](https://github.com/etcd-io/etcd/pull/10762). -- Fix [race condition in `rafthttp` transport pause/resume](https://github.com/etcd-io/etcd/pull/10826). -- Fix [server crash from creating an empty role](https://github.com/etcd-io/etcd/pull/10907). - - Previously, creating a role with an empty name crashed etcd server with an error code `Unavailable`. - - Now, creating a role with an empty name is not allowed with an error code `InvalidArgument`. - -### API - -- Add `isLearner` field to `etcdserverpb.Member`, `etcdserverpb.MemberAddRequest` and `etcdserverpb.StatusResponse` as part of [raft learner implementation](https://github.com/etcd-io/etcd/pull/10725). -- Add `MemberPromote` rpc to `etcdserverpb.Cluster` interface and the corresponding `MemberPromoteRequest` and `MemberPromoteResponse` as part of [raft learner implementation](https://github.com/etcd-io/etcd/pull/10725). -- Add [`snapshot`](https://github.com/etcd-io/etcd/pull/9118) package for snapshot restore/save operations (see [`godoc.org/github.com/etcd/clientv3/snapshot`](https://godoc.org/github.com/coreos/etcd/clientv3/snapshot) for more). -- Add [`watch_id` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9065) to allow user-provided watch ID to `mvcc`. - - Corresponding `watch_id` is returned via `etcdserverpb.WatchResponse`, if any. -- Add [`fragment` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9291) to request etcd server to [split watch events](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes. - - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes. - - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit. - - Useful when client-side has limited bandwidths. - - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client. - - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-recv-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`. - - Client must implement fragmented watch event merge (which `clientv3` does in etcd v3.4). -- Add [`raftAppliedIndex` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9176) for current Raft applied index. -- Add [`errors` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9206) for server-side error. - - e.g. `"etcdserver: no leader", "NOSPACE", "CORRUPT"` -- Add [`dbSizeInUse` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9256) for actual DB size after compaction. -- Add [`WatchRequest.WatchProgressRequest`](https://github.com/etcd-io/etcd/pull/9869). - - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams. - - Think of it as `WithProgressNotify` that can be triggered manually. - -Note: **v3.5 will deprecate `etcd --log-package-levels` flag for `capnslog`**; `etcd --logger=zap --log-outputs=stderr` will the default. **v3.5 will deprecate `[CLIENT-URL]/config/local/log` endpoint.** - -### Package `embed` - -- Add [`embed.Config.CipherSuites`](https://github.com/etcd-io/etcd/pull/9801) to specify a list of supported cipher suites for TLS handshake between client/server and peers. - - If empty, Go auto-populates the list. - - Both `embed.Config.ClientTLSInfo.CipherSuites` and `embed.Config.CipherSuites` cannot be non-empty at the same time. - - If not empty, specify either `embed.Config.ClientTLSInfo.CipherSuites` or `embed.Config.CipherSuites`. -- Add [`embed.Config.InitialElectionTickAdvance`](https://github.com/etcd-io/etcd/pull/9591) to enable/disable initial election tick fast-forward. - - `embed.NewConfig()` would return `*embed.Config` with `InitialElectionTickAdvance` as true by default. -- Define [`embed.CompactorModePeriodic`](https://godoc.org/github.com/etcd-io/etcd/embed#pkg-variables) for `compactor.ModePeriodic`. -- Define [`embed.CompactorModeRevision`](https://godoc.org/github.com/etcd-io/etcd/embed#pkg-variables) for `compactor.ModeRevision`. -- Change [`embed.Config.CorsInfo` in `*cors.CORSInfo` type to `embed.Config.CORS` in `map[string]struct{}` type](https://github.com/etcd-io/etcd/pull/9490). -- Remove [`embed.Config.SetupLogging`](https://github.com/etcd-io/etcd/pull/9572). - - Now logger is set up automatically based on [`embed.Config.Logger`, `embed.Config.LogOutputs`, `embed.Config.Debug` fields](https://github.com/etcd-io/etcd/pull/9572). -- Add [`embed.Config.Logger`](https://github.com/etcd-io/etcd/pull/9518) to support [structured logger `zap`](https://github.com/uber-go/zap) in server-side. -- Add [`embed.Config.LogLevel`](https://github.com/etcd-io/etcd/pull/10947). -- Rename `embed.Config.SnapCount` field to [`embed.Config.SnapshotCount`](https://github.com/etcd-io/etcd/pull/9745), to be consistent with the flag name `etcd --snapshot-count`. -- Rename [**`embed.Config.LogOutput`** to **`embed.Config.LogOutputs`**](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs. -- Change [**`embed.Config.LogOutputs`** type from `string` to `[]string`](https://github.com/etcd-io/etcd/pull/9579) to support multiple log outputs. -- Add [`embed.Config.BackendBatchLimit`](https://github.com/etcd-io/etcd/pull/10283) field. -- Add [`embed.Config.BackendBatchInterval`](https://github.com/etcd-io/etcd/pull/10283) field. -- Make [`embed.DefaultEnableV2` `false` default](https://github.com/etcd-io/etcd/pull/10935). - -### Package `pkg/adt` - -- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959). - - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt). -- Improve [`pkg/adt.IntervalTree` test coverage](https://github.com/etcd-io/etcd/pull/10959). - - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt). -- Fix [Red-Black tree to maintain black-height property](https://github.com/etcd-io/etcd/pull/10978). - - Previously, delete operation violates [black-height property](https://github.com/etcd-io/etcd/issues/10965). - -### Package `integration` - -- Add [`CLUSTER_DEBUG` to enable test cluster logging](https://github.com/etcd-io/etcd/pull/9678). - - Deprecated `capnslog` in integration tests. - -### client v3 - -- Add [`MemberAddAsLearner`](https://github.com/etcd-io/etcd/pull/10725) to `Clientv3.Cluster` interface. This API is used to add a learner member to etcd cluster. -- Add [`MemberPromote`](https://github.com/etcd-io/etcd/pull/10727) to `Clientv3.Cluster` interface. This API is used to promote a learner member in etcd cluster. -- Client may receive [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094) from server. - - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout. -- Add [`WithFragment` `OpOption`](https://github.com/etcd-io/etcd/pull/9291) to support [watch events fragmentation](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes. - - Watch fragmentation is disabled by default. - - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes. - - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit. - - Useful when client-side has limited bandwidths. - - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client. - - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`. -- Add [`Watcher.RequestProgress` method](https://github.com/etcd-io/etcd/pull/9869). - - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams. - - Think of it as `WithProgressNotify` that can be triggered manually. -- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952). - - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration. -- Change [snapshot file permissions](https://github.com/etcd-io/etcd/pull/9977): On Linux, the snapshot file changes from readable by all (mode 0644) to readable by the user only (mode 0600). -- Client may choose to send keepalive pings to server using [`PermitWithoutStream`](https://github.com/etcd-io/etcd/pull/10146). - - By setting `PermitWithoutStream` to true, client can send keepalive pings to server without any active streams(RPCs). In other words, it allows sending keepalive pings with unary or simple RPC calls. - - `PermitWithoutStream` is set to false by default. -- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package. -- Fix [`(*Client).Endpoints()` method race condition](https://github.com/etcd-io/etcd/pull/10595). -- Deprecated [`grpc.ErrClientConnClosing`](https://github.com/etcd-io/etcd/pull/10981). - - `clientv3` and `proxy/grpcproxy` now does not return `grpc.ErrClientConnClosing`. - - `grpc.ErrClientConnClosing` has been [deprecated in gRPC >= 1.10](https://github.com/grpc/grpc-go/pull/1854). - - Use `clientv3.IsConnCanceled(error)` or `google.golang.org/grpc/status.FromError(error)` instead. - -### etcdctl v3 - -- Make [`ETCDCTL_API=3 etcdctl` default](https://github.com/etcd-io/etcd/issues/9600). - - Now, `etcdctl set foo bar` must be `ETCDCTL_API=2 etcdctl set foo bar`. - - Now, `ETCDCTL_API=3 etcdctl put foo bar` could be just `etcdctl put foo bar`. -- Add [`etcdctl member add --learner` and `etcdctl member promote`](https://github.com/etcd-io/etcd/pull/10725) to add and promote raft learner member in etcd cluster. -- Add [`etcdctl --password`](https://github.com/etcd-io/etcd/pull/9730) flag. - - To support [`:` character in user name](https://github.com/etcd-io/etcd/issues/9691). - - e.g. `etcdctl --user user --password password get foo` -- Add [`etcdctl user add --new-user-password`](https://github.com/etcd-io/etcd/pull/9730) flag. -- Add [`etcdctl check datascale`](https://github.com/etcd-io/etcd/pull/9185) command. -- Add [`etcdctl check datascale --auto-compact, --auto-defrag`](https://github.com/etcd-io/etcd/pull/9351) flags. -- Add [`etcdctl check perf --auto-compact, --auto-defrag`](https://github.com/etcd-io/etcd/pull/9330) flags. -- Add [`etcdctl defrag --cluster`](https://github.com/etcd-io/etcd/pull/9390) flag. -- Add ["raft applied index" field to `endpoint status`](https://github.com/etcd-io/etcd/pull/9176). -- Add ["errors" field to `endpoint status`](https://github.com/etcd-io/etcd/pull/9206). -- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540). - - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532). -- Add [missing newline in `etcdctl endpoint health`](https://github.com/etcd-io/etcd/pull/10793). -- Fix [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/9688) parsing. - - Previously, `ETCDCTL_API=3 etcdctl watch foo -- echo watch event received` panicked. -- Fix [`etcdctl move-leader` command for TLS-enabled endpoints](https://github.com/etcd-io/etcd/pull/9807). -- Add [`progress` command to `etcdctl watch --interactive`](https://github.com/etcd-io/etcd/pull/9869). - - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams. - - Think of it as `WithProgressNotify` that can be triggered manually. -- Add [timeout](https://github.com/etcd-io/etcd/pull/10301) to `etcdctl snapshot - save`. - - User can specify timeout of `etcdctl snapshot save` command using flag `--command-timeout`. - - Fix etcdctl to [strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) - -### gRPC proxy - -- Fix [etcd server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775). - - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation. - - Especially, gRPC proxy was affected, since it detects a leader loss with a key `"proxy-namespace__lostleader"` and a watch revision `"int64(math.MaxInt64 - 2)"`. - - Now, this server-side panic has been fixed. -- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327). -- Change [gRPC proxy to expose etcd server endpoint /metrics](https://github.com/etcd-io/etcd/pull/10618). - - The metrics that were exposed via the proxy were not etcd server members but instead the proxy itself. - -### gRPC gateway - -- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3beta` with [`/v3`](https://github.com/etcd-io/etcd/pull/9298). - - Deprecated [`/v3alpha`](https://github.com/etcd-io/etcd/pull/9298). - - To deprecate [`/v3beta`](https://github.com/etcd-io/etcd/issues/9189) in v3.5. - - In v3.4, `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- Add API endpoints [`/{v3beta,v3}/lease/leases, /{v3beta,v3}/lease/revoke, /{v3beta,v3}/lease/timetolive`](https://github.com/etcd-io/etcd/pull/9450). - - To deprecate [`/{v3beta,v3}/kv/lease/leases, /{v3beta,v3}/kv/lease/revoke, /{v3beta,v3}/kv/lease/timetolive`](https://github.com/etcd-io/etcd/issues/9430) in v3.5. -- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway). - -### Package `raft` - -- Fix [deadlock during PreVote migration process](https://github.com/etcd-io/etcd/pull/8525). -- Add [`raft.ErrProposalDropped`](https://github.com/etcd-io/etcd/pull/9067). - - Now [`(r *raft) Step` returns `raft.ErrProposalDropped`](https://github.com/etcd-io/etcd/pull/9137) if a proposal has been ignored. - - e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/etcd-io/etcd/issues/8975). -- Improve [Raft `becomeLeader` and `stepLeader`](https://github.com/etcd-io/etcd/pull/9073) by keeping track of latest `pb.EntryConfChange` index. - - Previously record `pendingConf` boolean field scanning the entire tail of the log, which can delay heartbeat send. -- Fix [missing learner nodes on `(n *node) ApplyConfChange`](https://github.com/etcd-io/etcd/pull/9116). -- Add [`raft.Config.MaxUncommittedEntriesSize`](https://github.com/etcd-io/etcd/pull/10167) to limit the total size of the uncommitted entries in bytes. - - Once exceeded, raft returns `raft.ErrProposalDropped` error. - - Prevent [unbounded Raft log growth](https://github.com/cockroachdb/cockroach/issues/27772). - - There was a bug in [PR#10167](https://github.com/etcd-io/etcd/pull/10167) but fixed via [PR#10199](https://github.com/etcd-io/etcd/pull/10199). -- Add [`raft.Ready.CommittedEntries` pagination using `raft.Config.MaxSizePerMsg`](https://github.com/etcd-io/etcd/pull/9982). - - This prevents out-of-memory errors if the raft log has become very large and commits all at once. - - Fix [correctness bug in CommittedEntries pagination](https://github.com/etcd-io/etcd/pull/10063). -- Optimize [message send flow control](https://github.com/etcd-io/etcd/pull/9985). - - Leader now sends more append entries if it has more non-empty entries to send after updating flow control information. - - Now, Raft allows multiple in-flight append messages. -- Optimize [memory allocation when boxing slice in `maybeCommit`](https://github.com/etcd-io/etcd/pull/10679). - - By boxing a heap-allocated slice header instead of the slice header on the stack, we can avoid an allocation when passing through the sort.Interface interface. -- Avoid [memory allocation in Raft entry `String` method](https://github.com/etcd-io/etcd/pull/10680). -- Avoid [multiple memory allocations when merging stable and unstable log](https://github.com/etcd-io/etcd/pull/10684). -- Extract [progress tracking into own component](https://github.com/etcd-io/etcd/pull/10683). - - Add [package `raft/tracker`](https://github.com/etcd-io/etcd/pull/10807). - - Optimize [string representation of `Progress`](https://github.com/etcd-io/etcd/pull/10882). -- Make [relationship between `node` and `RawNode` explicit](https://github.com/etcd-io/etcd/pull/10803). -- Prevent [learners from becoming leader](https://github.com/etcd-io/etcd/pull/10822). -- Add [package `raft/quorum` to reason about committed indexes as well as vote outcomes for both majority and joint quorums](https://github.com/etcd-io/etcd/pull/10779). - - Bundle [Voters and Learner into `raft/tracker.Config` struct](https://github.com/etcd-io/etcd/pull/10865). -- Use [membership sets in progress tracking](https://github.com/etcd-io/etcd/pull/10779). -- Implement [joint quorum computation](https://github.com/etcd-io/etcd/pull/10779). -- Refactor [`raft/node.go` to centralize configuration change application](https://github.com/etcd-io/etcd/pull/10865). -- Allow [voter to become learner through snapshot](https://github.com/etcd-io/etcd/pull/10864). -- Add [package `raft/confchange` to internally support joint consensus](https://github.com/etcd-io/etcd/pull/10779). -- Use [`RawNode` for node's event loop](https://github.com/etcd-io/etcd/pull/10892). -- Add [`RawNode.Bootstrap` method](https://github.com/etcd-io/etcd/pull/10892). -- Add [`raftpb.ConfChangeV2` to use joint quorums](https://github.com/etcd-io/etcd/pull/10914). - - `raftpb.ConfChange` continues to work as today: it allows carrying out a single configuration change. A `pb.ConfChange` proposal gets added to the Raft log as such and is thus also observed by the app during Ready handling, and fed back to ApplyConfChange. - - `raftpb.ConfChangeV2` allows joint configuration changes but will continue to carry out configuration changes in "one phase" (i.e. without ever entering a joint config) when this is possible. - - `raftpb.ConfChangeV2` messages initiate configuration changes. They support both the simple "one at a time" membership change protocol and full Joint Consensus allowing for arbitrary changes in membership. -- Change [`raftpb.ConfState.Nodes` to `raftpb.ConfState.Voters`](https://github.com/etcd-io/etcd/pull/10914). -- Allow [learners to vote, but still learners do not count in quorum](https://github.com/etcd-io/etcd/pull/10998). - - necessary in the situation in which a learner has been promoted (i.e. is now a voter) but has not learned about this yet. -- Fix [restoring joint consensus](https://github.com/etcd-io/etcd/pull/11003). -- Visit [`Progress` in stable order](https://github.com/etcd-io/etcd/pull/11004). -- Proactively [probe newly added followers](https://github.com/etcd-io/etcd/pull/11037). - - The general expectation in `tracker.Progress.Next == c.LastIndex` is that the follower has no log at all (and will thus likely need a snapshot), though the app may have applied a snapshot out of band before adding the replica (thus making the first index the better choice). - - Previously, when the leader applied a new configuration that added voters, it would not immediately probe these voters, delaying when they would be caught up. - -### Package `wal` - -- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603). -- Fix [`wal` directory cleanup on creation failures](https://github.com/etcd-io/etcd/pull/10689). - -### Tooling - -- Add [`etcd-dump-logs --entry-type`](https://github.com/etcd-io/etcd/pull/9628) flag to support WAL log filtering by entry type. -- Add [`etcd-dump-logs --stream-decoder`](https://github.com/etcd-io/etcd/pull/9790) flag to support custom decoder. -- Add [`SHA256SUMS`](https://github.com/etcd-io/etcd/pull/11087) file to release assets. - - etcd maintainers are a distributed team, this change allows for releases to be cut and validation provided without requiring a signing key. - -### Go - -- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045). -- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes. - -### Dockerfile - -- [Rebase etcd image from Alpine to Debian](https://github.com/etcd-io/etcd/pull/10805) to improve security and maintenance effort for etcd release. - -
- diff --git a/CHANGELOG/CHANGELOG-3.5.md b/CHANGELOG/CHANGELOG-3.5.md deleted file mode 100644 index 5919842ff74..00000000000 --- a/CHANGELOG/CHANGELOG-3.5.md +++ /dev/null @@ -1,467 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.4](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.4.md). - -
- -## v3.5.8 (TBD) - -### Package `netutil` -- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15187) - -### Dependency -- Bump bbolt to [v1.3.7](https://github.com/etcd-io/etcd/pull/15222). - -### Other -- [Remove nsswitch.conf from docker image](https://github.com/etcd-io/etcd/pull/15161) - -
- -## v3.5.7 (2023-01-20) - -### etcd server -- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14852). -- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14884). -- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14899). -- Fix [The last record which was partially synced to disk isn't automatically repaired](https://github.com/etcd-io/etcd/pull/15069). -- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15096). - -### Package `clientv3` -- Reverted the fix to [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14995). - -### Security -- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15016) to address critical Vulnerabilities. -- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15037). -- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15018) to address some HIGH Vulnerabilities. - -### Go -- Require [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019). -- Compile with [Go 1.17+](https://go.dev/doc/devel/release#go1.17) - -
- -## v3.5.6 (2022-11-21) - -### etcd server -- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14547) -- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14563) -- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14648) -- Fix [revision might be inconsistency between members when etcd crashes during processing defragmentation operation](https://github.com/etcd-io/etcd/pull/14733) -- Fix [timestamp in inconsistent format](https://github.com/etcd-io/etcd/pull/14799) -- Fix [Failed resolving host due to lost DNS record](https://github.com/etcd-io/etcd/pull/14573) - -### Package `clientv3` -- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14582). -- Fix [stack overflow error in double barrier](https://github.com/etcd-io/etcd/pull/14658) -- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14790). - -### etcd grpc-proxy -- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14500) flag to support adding configurable cipher list. - -
- -## v3.5.5 (2022-09-15) - -### Deprecations -- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14366). - -### Package `clientv3` -- Fix [do not overwrite authTokenBundle on dial](https://github.com/etcd-io/etcd/pull/14132). -- Fix [IsOptsWithPrefix returns false even if WithPrefix() is included](https://github.com/etcd-io/etcd/pull/14187). - -### etcd server -- [Build official darwin/arm64 artifacts](https://github.com/etcd-io/etcd/pull/14436). -- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14219) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32. -- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions. -- Fix [unexpected error during txn](https://github.com/etcd-io/etcd/issues/14110). -- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13205). -- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14087). -- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14087). -- Fix [Restrict the max size of each WAL entry to the remaining size of the WAL file](https://github.com/etcd-io/etcd/pull/14127). -- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14227) -- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14272) -- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14424) -- Fix [etcd fails to start after performing alarm list operation and then power off/on](https://github.com/etcd-io/etcd/pull/14429) -- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14409) - -### etcdctl v3 - -- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14434) - - -### Other -- [Bump golang.org/x/crypto to latest version](https://github.com/etcd-io/etcd/pull/13996) to address [CVE-2022-27191](https://github.com/advisories/GHSA-8c26-wmh5-6g9v). -- [Bump OpenTelemetry to 1.0.1 and gRPC to 1.41.0](https://github.com/etcd-io/etcd/pull/14312). - -
- -## v3.5.4 (2022-04-24) - -### etcd server -- Fix [etcd panic on startup (auth enabled)](https://github.com/etcd-io/etcd/pull/13946) - -### package `client/pkg/v3` - -- [Revert the change of trimming the trailing dot from SRV.Target](https://github.com/etcd-io/etcd/pull/13950) returned by DNS lookup - - -
- -## v3.5.3 (2022-04-13) - -### etcd server -- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13706) -- Fix [inconsistent log format](https://github.com/etcd-io/etcd/pull/13864) -- Fix [Inconsistent revision and data occurs](https://github.com/etcd-io/etcd/pull/13908) -- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/13932) -- Fix [consistent_index coming from snapshot is overwritten by the old local value](https://github.com/etcd-io/etcd/pull/13933) -- [Update container base image snapshot](https://github.com/etcd-io/etcd/pull/13862) -- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13701). - -### package `client/pkg/v3` - -- [Trim the suffix dot from the target](https://github.com/etcd-io/etcd/pull/13714) in SRV records returned by DNS lookup - -### etcdctl v3 - -- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13727) when displaying member list in json. - -
- -## [v3.5.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.2) (2022-02-01) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.1...v3.5.2) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes. - -### etcd server -- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13476). -- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to enable checkpoint persisting. -- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508), requires enabling checkpoint persisting. -- Fix [assertion failed due to tx closed when recovering v3 backend from a snapshot db](https://github.com/etcd-io/etcd/pull/13501) -- Fix [segmentation violation(SIGSEGV) error due to premature unlocking of watchableStore](https://github.com/etcd-io/etcd/pull/13541) - -
- -## [v3.5.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.1) (2021-10-15) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.5.1) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes. - -### etcd server - -- Fix [self-signed-cert-validity parameter cannot be specified in the config file](https://github.com/etcd-io/etcd/pull/13237). -- Fix [ensure that cluster members stored in v2store and backend are in sync](https://github.com/etcd-io/etcd/pull/13348) - -### etcd client - -- [Fix etcd client sends invalid :authority header](https://github.com/etcd-io/etcd/issues/13192) - -### package clientv3 - -- Endpoints self identify now as `etcd-endpoints://{id}/{authority}` where authority is based on first endpoint passed, for example `etcd-endpoints://0xc0009d8540/localhost:2079` - -### Other - -- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs: - - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption - - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc - - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp - - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads. - -
- -## v3.5.0 (2021-06) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes. - -- [v3.5.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0) (2021 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.1...v3.5.0). -- [v3.5.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.1) (2021-06-10), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.0...v3.5.0-rc.1). -- [v3.5.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.0) (2021-06-04), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.4...v3.5.0-rc.0). -- [v3.5.0-beta.4](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.4) (2021-05-26), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.3...v3.5.0-beta.4). -- [v3.5.0-beta.3](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.3) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.2...v3.5.0-beta.3). -- [v3.5.0-beta.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.2) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.1...v3.5.0-beta.2). -- [v3.5.0-beta.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.1) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0-beta.1). - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/).** - -### Breaking Changes - -- `go.etcd.io/etcd` Go packages have moved to `go.etcd.io/etcd/{api,pkg,raft,client,etcdctl,server,raft,tests}/v3` to follow the [Go modules](https://github.com/golang/go/wiki/Modules) conventions -- `go.etcd.io/clientv3/snapshot` SnapshotManager class have moved to `go.etcd.io/clientv3/etcdctl`. - The method `snapshot.Save` to download a snapshot from the remote server was preserved in 'go.etcd.io/clientv3/snapshot`. -- `go.etcd.io/client' package got migrated to 'go.etcd.io/client/v2'. -- Changed behavior of clientv3 API [MemberList](https://github.com/etcd-io/etcd/pull/11639). - - Previously, it is directly served with server's local data, which could be stale. - - Now, it is served with linearizable guarantee. If the server is disconnected from quorum, `MemberList` call will fail. -- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint. - - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298). - - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` doesn't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- **`etcd --experimental-enable-v2v3` flag remains experimental and to be deprecated.** - - v2 storage emulation feature will be deprecated in the next release. - - etcd 3.5 is the last version that supports V2 API. Flags `--enable-v2` and `--experimental-enable-v2v3` [are now deprecated](https://github.com/etcd-io/etcd/pull/12940) and will be removed in etcd v3.6 release. -- **`etcd --experimental-backend-bbolt-freelist-type` flag has been deprecated.** Use **`etcd --backend-bbolt-freelist-type`** instead. The default type is hashmap and it is stable now. -- **`etcd --debug` flag has been deprecated.** Use **`etcd --log-level=debug`** instead. -- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947). -- **`etcd --log-output` flag has been deprecated.** Use **`etcd --log-outputs`** instead. -- **`etcd --logger=zap --log-outputs=stderr`** is now the default. -- **`etcd --logger=capnslog` flag value has been deprecated.** -- **`etcd --logger=zap --log-outputs=default` flag value is not supported.**. - - Use `etcd --logger=zap --log-outputs=stderr`. - - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal. - - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback. - - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient. - - To avoid this problem, systemd journal logging must be configured manually. -- **`etcd --log-outputs=stderr`** is now the default. -- **`etcd --log-package-levels` flag for `capnslog` has been deprecated.** Now, **`etcd --logger=zap --log-outputs=stderr`** is the default. -- **`[CLIENT-URL]/config/local/log` endpoint has been deprecated, as is `etcd --log-package-levels` flag.** - - `curl http://127.0.0.1:2379/config/local/log -XPUT -d '{"Level":"DEBUG"}'` won't work. - - Please use `etcd --logger=zap --log-outputs=stderr` instead. -- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead. -- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead. -- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead. -- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead. -- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead. -- Main branch `/version` outputs `3.5.0-pre`, instead of `3.4.0+git`. -- Changed `proxy` package function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11614). - - Previously, `NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`, now `NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`. - - Previously, `Register(c *clientv3.Client, prefix string, addr string, ttl int)`, now `Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{}`. - - Previously, `NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`, now `NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`. -- Changed `pkg/flags` function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11616). - - Previously, `SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error`, now `SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error`. - - Previously, `SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error`, now `SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error`. -- ClientV3 supports [grpc resolver API](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go). - - Endpoints can be managed using [endpoints.Manager](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/endpoints/endpoints.go) - - Previously supported [GRPCResolver was decomissioned](https://github.com/etcd-io/etcd/pull/12675). Use [resolver](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go) instead. -- Turned on [--pre-vote by default](https://github.com/etcd-io/etcd/pull/12770). Should prevent disrupting RAFT leader by an individual member. -- [ETCD_CLIENT_DEBUG env](https://github.com/etcd-io/etcd/pull/12786): Now supports log levels (debug, info, warn, error, dpanic, panic, fatal). Only when set, overrides application-wide grpc logging settings. -- [Embed Etcd.Close()](https://github.com/etcd-io/etcd/pull/12828) needs to called exactly once and closes Etcd.Err() stream. -- [Embed Etcd does not override global/grpc logger](https://github.com/etcd-io/etcd/pull/12861) be default any longer. If desired, please call `embed.Config::SetupGlobalLoggers()` explicitly. -- [Embed Etcd custom logger should be configured using simpler builder `NewZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/12973). -- Client errors of `context cancelled` or `context deadline exceeded` are exposed as `codes.Canceled` and `codes.DeadlineExceeded`, instead of `codes.Unknown`. - - -### Storage format changes -- [WAL log's snapshots persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12735) -- [Backend persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12962) in the `meta` bucket `confState` key. -- [Backend persists applied term](https://github.com/etcd-io/etcd/pull/) in the `meta` bucket. -- Backend persists `downgrade` in the `cluster` bucket - -### Security - -- Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864). -- Changed [the format of WAL entries related to auth for not keeping password as a plain text](https://github.com/etcd-io/etcd/pull/11943). -- Add third party [Security Audit Report](https://github.com/etcd-io/etcd/pull/12201). -- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd uses any existing directory that has a permission different than 700 on Linux and 777 on Windows. -- Add optional [`ClientCertFile` and `ClientKeyFile`](https://github.com/etcd-io/etcd/pull/12705) options for peer and client tls configuration when split certificates are used. - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -Note that any `etcd_debugging_*` metrics are experimental and subject to change. - -- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead. -- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead. -- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead. -- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead. -- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead. -- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. -- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric. -- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version. -- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric. -- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687). -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). -- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3). -- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214). -- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13395). - -### etcd server - - - Add [don't attempt to grant nil permission to a role](https://github.com/etcd-io/etcd/pull/13086). - - Add [don't activate alarms w/missing AlarmType](https://github.com/etcd-io/etcd/pull/13084). - - Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864). - - Automatically [create parent directory if it does not exist](https://github.com/etcd-io/etcd/pull/9626) (fix [issue#9609](https://github.com/etcd-io/etcd/issues/9609)). - - v4.0 will configure `etcd --enable-v2=true --enable-v2v3=/aaa` to enable v2 API server that is backed by **v3 storage**. -- [`etcd --backend-bbolt-freelist-type`] flag is now stable. - - `etcd --experimental-backend-bbolt-freelist-type` has been deprecated. -- Support [downgrade API](https://github.com/etcd-io/etcd/pull/11715). -- Deprecate v2 apply on cluster version. [Use v3 request to set cluster version and recover cluster version from v3 backend](https://github.com/etcd-io/etcd/pull/11427). -- [Use v2 api to update cluster version to support mixed version cluster during upgrade](https://github.com/etcd-io/etcd/pull/12988). -- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613). -- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640). -- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled. -- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704). -- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677). -- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734). -- [Refactor consistentindex](https://github.com/etcd-io/etcd/pull/11699). -- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670). -- Improve [count-only range performance](https://github.com/etcd-io/etcd/pull/11771). -- Remove [redundant storage restore operation to shorten the startup time](https://github.com/etcd-io/etcd/pull/11779). - - With 40 million key test data,it can shorten the startup time from 5 min to 2.5 min. -- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817). -- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888). - - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot. - - See https://github.com/etcd-io/etcd/issues/10219 for more. - - Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924). - - See https://github.com/etcd-io/etcd/issues/11918. -- Improve logging around snapshot send and receive. -- [Push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/11990). -- Add [reason field for /health response](https://github.com/etcd-io/etcd/pull/11983). -- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880). -- Add [`etcd --unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag. - - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system. -- Add [`etcd --auth-token-ttl`](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings. -- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986). -- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987). -- Log [expensive request info in UnaryInterceptor](https://github.com/etcd-io/etcd/pull/12086). -- [Fix invalid Go type in etcdserverpb](https://github.com/etcd-io/etcd/pull/12000). -- [Improve healthcheck by using v3 range request and its corresponding timeout](https://github.com/etcd-io/etcd/pull/12195). -- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable. -- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197). - - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238). -- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node. -- Add [`etcd --self-signed-cert-validity`](https://github.com/etcd-io/etcd/pull/12429) flag to support setting certificate expiration time. - - Notice, certificates generated by etcd are valid for 1 year by default when specifying the auto-tls or peer-auto-tls option. -- Add [`etcd --experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable. -- Add [`etcd --experimental-memory-mlock`](https://github.com/etcd-io/etcd/pull/TODO) flag which prevents etcd memory pages to be swapped out. -- Add [`etcd --socket-reuse-port`](https://github.com/etcd-io/etcd/pull/12702) flag - - Setting this flag enables `SO_REUSEPORT` which allows rebind of a port already in use. User should take caution when using this flag to ensure flock is properly enforced. -- Add [`etcd --socket-reuse-address`](https://github.com/etcd-io/etcd/pull/12702) flag - - Setting this flag enables `SO_REUSEADDR` which allows binding to an address in `TIME_WAIT` state, improving etcd restart time. -- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871). -- `ETCD_VERIFY="all"` environment triggers [additional verification of consistency](https://github.com/etcd-io/etcd/pull/12901) of etcd data-dir files. -- Add [`etcd --enable-log-rotation`](https://github.com/etcd-io/etcd/pull/12774) boolean flag which enables log rotation if true. -- Add [`etcd --log-rotation-config-json`](https://github.com/etcd-io/etcd/pull/12774) flag which allows passthrough of JSON config to configure log rotation for a file output target. -- Add experimental distributed tracing boolean flag [`--experimental-enable-distributed-tracing`](https://github.com/etcd-io/etcd/pull/12919) which enables tracing. -- Add [`etcd --experimental-distributed-tracing-address`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows configuring the OpenTelemetry collector address. -- Add [`etcd --experimental-distributed-tracing-service-name`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows changing the default "etcd" service name. -- Add [`etcd --experimental-distributed-tracing-instance-id`](https://github.com/etcd-io/etcd/pull/12919) string flag which configures an instance ID, which must be unique per etcd instance. -- Add [`--experimental-bootstrap-defrag-threshold-megabytes`](https://github.com/etcd-io/etcd/pull/12941) which configures a threshold for the unused db size and etcdserver will automatically perform defragmentation on bootstrap when it exceeds this value. The functionality is disabled if the value is 0. - -### Package `runtime` - -- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214). - -### Package `embed` - -- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947). - - Use `embed.Config.LogLevel` instead. -- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11147) to allow creating a custom zap logger. -- Replace [global `*zap.Logger` with etcd server logger object](https://github.com/etcd-io/etcd/pull/12212). -- Add [`embed.Config.EnableLogRotation`](https://github.com/etcd-io/etcd/pull/12774) which enables log rotation if true. -- Add [`embed.Config.LogRotationConfigJSON`](https://github.com/etcd-io/etcd/pull/12774) to allow passthrough of JSON config to configure log rotation for a file output target. -- Add [`embed.Config.ExperimentalEnableDistributedTracing`](https://github.com/etcd-io/etcd/pull/12919) which enables experimental distributed tracing if true. -- Add [`embed.Config.ExperimentalDistributedTracingAddress`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default collector address. -- Add [`embed.Config.ExperimentalDistributedTracingServiceName`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default "etcd" service name. -- Add [`embed.Config.ExperimentalDistributedTracingServiceInstanceID`](https://github.com/etcd-io/etcd/pull/12919) which allows configuring an instance ID, which must be uniquer per etcd instance. - -### Package `clientv3` - -- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187). - - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450). -- Add [`TryLock`](https://github.com/etcd-io/etcd/pull/11104) method to `clientv3/concurrency/Mutex`. A non-blocking method on `Mutex` which does not wait to get lock on the Mutex, returns immediately if Mutex is locked by another session. -- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184). - - Fix [`"kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch"`](https://github.com/kubernetes/kubernetes/issues/83028). -- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211). - - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550). -- Fix [errors caused by grpc changing balancer/resolver API](https://github.com/etcd-io/etcd/pull/11564). This change is compatible with grpc >= [v1.26.0](https://github.com/grpc/grpc-go/releases/tag/v1.26.0), but is not compatible with < v1.26.0 version. -- Use [ServerName as the authority](https://github.com/etcd-io/etcd/pull/11574) after bumping to grpc v1.26.0. Remove workaround in [#11184](https://github.com/etcd-io/etcd/pull/11184). -- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687). - - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys. -- Fix [watch leak caused by lazy cancellation](https://github.com/etcd-io/etcd/pull/11850). When clients cancel their watches, a cancel request will now be immediately sent to the server instead of waiting for the next watch event. -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). -- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready. -- Improve [clientv3:get AuthToken gracefully without extra connection](https://github.com/etcd-io/etcd/pull/12165). -- Changed [clientv3 dialing code](https://github.com/etcd-io/etcd/pull/12671) to use grpc resolver API instead of custom balancer. - - Endpoints self identify now as `etcd-endpoints://{id}/#initially={list of endpoints}` e.g. `etcd-endpoints://0xc0009d8540/#initially=[localhost:2079]` -- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896). - -### Package `lease` - -- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731). - - https://github.com/etcd-io/etcd/issues/11495 - - https://github.com/etcd-io/etcd/issues/11730 -- Make sure [grant/revoke won't be applied repeatedly after restarting etcd](https://github.com/etcd-io/etcd/pull/11935). - -### Package `wal` - -- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738). -- Handle [out-of-range slice bound in `ReadAll` and entry limit in `decodeRecord`](https://github.com/etcd-io/etcd/pull/11793). - -### etcdctl v3 - -- Fix `etcdctl member add` command to prevent potential timeout. ([PR#11194](https://github.com/etcd-io/etcd/pull/11194) and [PR#11638](https://github.com/etcd-io/etcd/pull/11638)) -- Add [`etcdctl watch --progress-notify`](https://github.com/etcd-io/etcd/pull/11462) flag. -- Add [`etcdctl auth status`](https://github.com/etcd-io/etcd/pull/11536) command to check if authentication is enabled -- Add [`etcdctl get --count-only`](https://github.com/etcd-io/etcd/pull/11743) flag for output type `fields`. -- Add [`etcdctl member list -w=json --hex`](https://github.com/etcd-io/etcd/pull/11812) flag to print memberListResponse in hex format json. -- Changed [`etcdctl lock exec-command`](https://github.com/etcd-io/etcd/pull/12829) to return exit code of exec-command. -- [New tool: `etcdutl`](https://github.com/etcd-io/etcd/pull/12971) incorporated functionality of: `etcdctl snapshot status|restore`, `etcdctl backup`, `etcdctl defrag --data-dir ...`. -- [ETCDCTL_API=3 `etcdctl migrate`](https://github.com/etcd-io/etcd/pull/12971) has been decommissioned. Use etcd <=v3.4 to restore v2 storage. - -### gRPC gateway - -- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint. - - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298). - - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` does work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead. -- Set [`enable-grpc-gateway`](https://github.com/etcd-io/etcd/pull/12297) flag to true when using a config file to keep the defaults the same as the command line configuration. - -### gRPC Proxy - -- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler. -- Add [gRPC keepalive related flags](https://github.com/etcd-io/etcd/pull/11711) `grpc-keepalive-min-time`, `grpc-keepalive-interval` and `grpc-keepalive-timeout`. -- [Fix grpc watch proxy hangs when failed to cancel a watcher](https://github.com/etcd-io/etcd/pull/12030) . -- Add [metrics handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12107). -- Add [health handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12114). - -### Auth - -- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414)) -- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586) -- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652). -- [Improve checkPassword performance](https://github.com/etcd-io/etcd/pull/11735). -- [Add authRevision field in AuthStatus](https://github.com/etcd-io/etcd/pull/11659). -- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13308). -- -### API - -- Add [`/v3/auth/status`](https://github.com/etcd-io/etcd/pull/11536) endpoint to check if authentication is enabled -- [Add `Linearizable` field to `etcdserverpb.MemberListRequest`](https://github.com/etcd-io/etcd/pull/11639). -- [Learner support Snapshot RPC](https://github.com/etcd-io/etcd/pull/12890/). - -### Package `netutil` - -- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491). - - These are not used anymore. They were only used for older versions of functional testing. - - Removed to adhere to best security practices, minimize arbitrary shell invocation. - -### `tools/etcd-dump-metrics` - -- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491). - -### Dependency - -- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.37.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.37.0). -- Upgrade [`go.uber.org/zap`](https://github.com/uber-go/zap/releases) from [**`v1.14.1`**](https://github.com/uber-go/zap/releases/tag/v1.14.1) to [**`v1.16.0`**](https://github.com/uber-go/zap/releases/tag/v1.16.0). - -### Platforms - -- etcd now [officially supports `arm64`](https://github.com/etcd-io/etcd/pull/12929). - - See https://github.com/etcd-io/etcd/pull/12928 for adding automated tests with `arm64` EC2 instances (Graviton 2). - - See https://github.com/etcd-io/website/pull/273 for new platform support tier policies. - -### Release - -- Add s390x build support ([PR#11548](https://github.com/etcd-io/etcd/pull/11548) and [PR#11358](https://github.com/etcd-io/etcd/pull/11358)) - -### Go - -- Require [*Go 1.16+*](https://github.com/etcd-io/etcd/pull/11110). -- Compile with [*Go 1.16+*](https://golang.org/doc/devel/release.html#go1.16) -- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/12279) (instead of vendor dir) to track dependencies. - -### Project Governance - -- The etcd team has added, a well defined and openly discussed, project [governance](https://github.com/etcd-io/etcd/pull/11175). - - -
- diff --git a/CHANGELOG/CHANGELOG-3.6.md b/CHANGELOG/CHANGELOG-3.6.md deleted file mode 100644 index 0d8924bf245..00000000000 --- a/CHANGELOG/CHANGELOG-3.6.md +++ /dev/null @@ -1,96 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.5](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.5.md). - -
- -## v3.6.0 (TBD) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0). - -### Breaking Changes - -- `etcd` will no longer start on data dir created by newer versions (for example etcd v3.6 will not run on v3.7+ data dir). To downgrade data dir please check out `etcdutl migrate` command. -- `etcd` doesn't support serving client requests on the peer listen endpoints (--listen-peer-urls). See [pull/13565](https://github.com/etcd-io/etcd/pull/13565). -- `etcdctl` will sleep(2s) in case of range delete without `--range` flag. See [pull/13747](https://github.com/etcd-io/etcd/pull/13747) -- Applications which depend on etcd v3.6 packages must be built with go version >= v1.18. - -### Deprecations - -- Deprecated [V2 discovery](https://etcd.io/docs/v3.5/dev-internal/discovery_protocol/). -- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14356). -- Removed [etcdctl defrag --data-dir](https://github.com/etcd-io/etcd/pull/13793). -- Removed [etcdctl snapshot status](https://github.com/etcd-io/etcd/pull/13809). -- Removed [etcdctl snapshot restore](https://github.com/etcd-io/etcd/pull/13809). -- Removed [etcdutl snapshot save](https://github.com/etcd-io/etcd/pull/13809). - - -### etcdctl v3 - -- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13133). -- When print endpoint status, [show db size in use](https://github.com/etcd-io/etcd/pull/13639) -- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13711) when displaying member list in json. -- [Add one more field `storageVersion`](https://github.com/etcd-io/etcd/pull/13773) into the response of command `etcdctl endpoint status`. -- Add [`--max-txn-ops`](https://github.com/etcd-io/etcd/pull/14340) flag to make-mirror command. -- Display [field `hash_revision`](https://github.com/etcd-io/etcd/pull/14812) for `etcdctl endpoint hash` command. - -### etcdutl v3 - -- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142). -- Add `migrate` command for downgrading/upgrading etcd data dir files. - -### Package `server` - -- Package `mvcc` was moved to `storage/mvcc` -- Package `mvcc/backend` was moved to `storage/backend` -- Package `mvcc/buckets` was moved to `storage/schema` -- Package `wal` was moved to `storage/wal` -- Package `datadir` was moved to `storage/datadir` - -### Package `raft` -- Send empty `MsgApp` when entry in-flight limits are exceeded. See [pull/14633](https://github.com/etcd-io/etcd/pull/14633). -- Add [MaxInflightBytes](https://github.com/etcd-io/etcd/pull/14624) setting in `raft.Config` for better flow control of entries. -- [Decouple raft from etcd](https://github.com/etcd-io/etcd/issues/14713). Migrated raft to a separate [repository](https://github.com/etcd-io/raft), and renamed raft module to `go.etcd.io/raft/v3`. - -### etcd server - -- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format. -- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership. -- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled. -- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror. -- Add [`etcd --experimental-wait-cluster-ready-timeout`](https://github.com/etcd-io/etcd/pull/13525) flag to wait for cluster to be ready before serving client requests. -- Add [v3 discovery](https://github.com/etcd-io/etcd/pull/13635) to bootstrap a new etcd cluster. -- Add [field `storage`](https://github.com/etcd-io/etcd/pull/13772) into the response body of endpoint `/version`. -- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14169) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32. -- Add [`etcd grpc-proxy --experimental-enable-grpc-logging`](https://github.com/etcd-io/etcd/pull/14266) flag to logging all grpc requests and responses. -- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions. -- Add [Protection on maintenance request when auth is enabled](https://github.com/etcd-io/etcd/pull/14663). -- Graduated [`--experimental-warning-unary-request-duration` to `--warning-unary-request-duration`](https://github.com/etcd-io/etcd/pull/14414). Note the experimental flag is deprecated and will be decommissioned in v3.7. -- Add [field `hash_revision` into `HashKVResponse`](https://github.com/etcd-io/etcd/pull/14537). -- Add [`etcd --experimental-snapshot-catch-up-entries`](https://github.com/etcd-io/etcd/pull/15033) flag to configure number of entries for a slow follower to catch up after compacting the the raft storage entries and defaults to 5k. - -### etcd grpc-proxy - -- Add [`etcd grpc-proxy start --endpoints-auto-sync-interval`](https://github.com/etcd-io/etcd/pull/14354) flag to enable and configure interval of auto sync of endpoints with server. -- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14308) flag to support adding configurable cipher list. - -### tools/benchmark - -- [Add etcd client autoSync flag](https://github.com/etcd-io/etcd/pull/13416) - -### Metrics, Monitoring - -See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release. - -- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13371). -- Add [`etcd_debugging_server_alarms`](https://github.com/etcd-io/etcd/pull/14276). - -### Go -- Require [Go 1.19+](https://github.com/etcd-io/etcd/pull/14463). -- Compile with [Go 1.19+](https://golang.org/doc/devel/release.html#go1.19). Please refer to [gc-guide](https://go.dev/doc/gc-guide) to configure `GOGC` and `GOMEMLIMIT` properly. - -### Other - -- Use Distroless as base image to make the image less vulnerable and reduce image size. - -
diff --git a/CHANGELOG/CHANGELOG-4.0.md b/CHANGELOG/CHANGELOG-4.0.md deleted file mode 100644 index 860e5efd072..00000000000 --- a/CHANGELOG/CHANGELOG-4.0.md +++ /dev/null @@ -1,44 +0,0 @@ - - -Previous change logs can be found at [CHANGELOG-3.x](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.x.md). - -
- -## v4.0.0 (TBD) - -See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v4.0.0) and [v4.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_4_0/) for any breaking changes. - -**Again, before running upgrades from any previous release, please make sure to read change logs below and [v4.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_4_0/).** - -### Breaking Changes - -- [Secure etcd by default](https://github.com/etcd-io/etcd/issues/9475)? -- Deprecate [`etcd --proxy*`](TODO) flags; **no more v2 proxy**. -- Deprecate [v2 storage backend](https://github.com/etcd-io/etcd/issues/9232); **no more v2 store**. - - v2 API is still supported via [v2 emulation](TODO). -- Deprecate [`etcdctl backup`](TODO) command. -- `clientv3.Client.KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)` is now [`clientv4.Client.KeepAlive(ctx context.Context, id LeaseID) <-chan *LeaseKeepAliveResponse`](TODO). - - Similar to `Watch`, [`KeepAlive` does not return errors](https://github.com/etcd-io/etcd/issues/7488). - - If there's an unknown server error, kill all open channels and create a new stream on the next `KeepAlive` call. -- Rename `github.com/coreos/client` to `github.com/coreos/clientv2`. -- [`etcd --experimental-initial-corrupt-check`](TODO) has been deprecated. - - Use [`etcd --initial-corrupt-check`](TODO) instead. -- [`etcd --experimental-corrupt-check-time`](TODO) has been deprecated. - - Use [`etcd --corrupt-check-time`](TODO) instead. -- Enable TLS 1.13, deprecate TLS cipher suites. - -### etcd server - -- [`etcd --initial-corrupt-check`](TODO) flag is now stable (`etcd --experimental-initial-corrupt-check` has been deprecated). - - `etcd --initial-corrupt-check=true` by default, to check cluster database hashes before serving client/peer traffic. -- [`etcd --corrupt-check-time`](TODO) flag is now stable (`etcd --experimental-corrupt-check-time` has been deprecated). - - `etcd --corrupt-check-time=12h` by default, to check cluster database hashes for every 12-hour. -- Enable TLS 1.13, deprecate TLS cipher suites. - -### Go - -- Require [*Go 2*](https://blog.golang.org/go2draft). - - -
- diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md deleted file mode 100644 index 8f31bc34329..00000000000 --- a/CHANGELOG/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Change logs - -## Production recommendation - -The minimum recommended etcd versions to run in **production** are v3.4.8+ and v3.5.4+. Refer to the [versioning policy](https://etcd.io/docs/v3.5/op-guide/versioning/) for more details. - -### v3.5 data corruption issue - -Running etcd v3.5.2, v3.5.1 and v3.5.0 under high load can cause a data corruption issue. -If etcd process is killed, occasionally some committed transactions are not reflected on all the members. -Recommendation is to upgrade to v3.5.4+. - -If you have encountered data corruption, please follow instructions on https://etcd.io/docs/v3.5/op-guide/data_corruption/. - -## Change log rules -1. Each patch release only includes changes against previous patch release. -For example, the change log of v3.5.5 should only include items which are new to v3.5.4. -2. For the first release (e.g. 3.4.0, 3.5.0, 3.6.0, 4.0.0 etc.) for each minor or major -version, it only includes changes which are new to the first release of previous minor -or major version. For example, v3.5.0 should only include items which are new to v3.4.0, -and v3.6.0 should only include items which are new to v3.5.0. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 05cb8f8ed87..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,125 +0,0 @@ -# How to contribute - -etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. -This document outlines basics of contributing to etcd. - -This is a rough outline of what a contributor's workflow looks like: -* [Find something to work on](#Find-something-to-work-on) -* [Setup development environment](#Setup-development-environment) -* [Implement your change](#Implement-your-change) -* [Commit your change](#Commit-your-change) -* [Create a pull request](#Create-a-pull-request) -* [Get your pull request reviewed](#Get-your-pull-request-reviewed) - -If you have any questions about, please reach out using one of the methods listed in [contact]. - -[contact]: ./README.md#Contact - -## Learn more about etcd - -Before making a change please look through resources below to learn more about etcd and tools used for development. - -* Please learn about [Git](https://github.com/git-guides) version control system used in etcd. -* Read the [etcd learning resources](https://etcd.io/docs/v3.5/learning/) -* Read the [etcd contributing guides](https://github.com/etcd-io/etcd/tree/main/Documentation/contributor-guide) -* Watch [etcd deep dive](https://www.youtube.com/watch?v=D2pm6ufIt98&t=927s) -* Watch [etcd code walk through](https://www.youtube.com/watch?v=H3XaSF6wF7w) - -## Find something to work on - -All the work in etcd project is tracked in [github issue tracker]. -Issues should be properly labeled making it easy to find something for you. - -Depending on your interest and experience you should check different labels: -* If you are just starting, check issues labeled with [good first issue]. -* When you feel more conformable in your contributions, checkout [help wanted]. -* Advanced contributors can try to help with issues labeled [priority/important] covering most relevant work at the time. - -If any of aforementioned labels don't have unassigned issues, please [contact] one of the [maintainers] asking to triage more issues. - -[github issue tracker]: https://github.com/etcd-io/etcd/issues -[good first issue]: https://github.com/etcd-io/etcd/labels/good%20first%20issue -[help wanted]: https://github.com/etcd-io/etcd/labels/help%20wanted -[maintainers]: https://github.com/etcd-io/etcd/blob/main/MAINTAINERS -[priority/important]: https://github.com/etcd-io/etcd/labels/priority%2Fimportant - -## Setup development environment - -etcd supported development environments include only linux-amd64. -Bug reports for any non-supported environments will be ignored. -Supporting new environments requires introduction of proper tests and maintainer support that is currently lacking in etcd project. -If you want help etcd support your preferred environment, please [file an issue]. - -Setup environment: -- [Clone the repository](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository) -- Install Go by following [installation](https://go.dev/doc/install). Please check minimal go version in [go.mod file](./go.mod#L3). -- Install build tools (make): - - For ubuntu and debian run `sudo apt-get install build-essentials` -- Verify that everything is installed by running `make build` - -Note: `make build` runs with `-v`. Other build flags can be added through env `GO_BUILD_FLAGS`, **if required**. Eg., -```console -GO_BUILD_FLAGS="-buildmode=pie" make build -``` - -[file an issue]: https://github.com/etcd-io/etcd/issues/new/choose - -## Implement your change - -etcd code should follow coding style suggested by the Golang community. -See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details. - -Please ensure that your change passes static analysis (requires [golangci-lint](https://golangci-lint.run/usage/install/)): -- `make verify` to verify if all checks pass. -- `make verify-*` to verify a single check, for example `make verify-bom` to verify if bill-of-materials.json file is up-to-date. -- `make fix` to fix all checks. -- `make fix-*` to fix a single checks, for example `make fix-bom` to update bill-of-materials.json. - -Please ensure that your change passes tests. -- `make test-unit` to run unit tests. -- `make test-integration` to run integration tests. -- `make test-e2e` to run e2e tests. - -All changes are expected to come with unit test. -All new features are expected to have either e2e or integration tests. - -## Commit your change - -etcd follows a rough convention for commit messages: -* First line: - * Should start name of package (for example `etcdserver`, `etcdctl`) followed by `:` character. - * Describe the `what` behind the change -* Optionally author might provide the `why` behind the change in the main commit message body. -* Last line should be `Signed-off-by: firstname lastname ` (can be automatically generate by providing `--signoff` to git commit command). - -Example of commit message: -``` -etcdserver: add grpc interceptor to log info on incoming requests - -To improve debuggability of etcd v3. Added a grpc interceptor to log -info on incoming requests to etcd server. The log output includes -remote client info, request content (with value field redacted), request -handling latency, response size, etc. Uses zap logger if available, -otherwise uses capnslog. - -Signed-off-by: FirstName LastName -``` - -## Create a pull request - -Please follow [making a pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects#making-a-pull-request) guide. - -If you are still working on the pull request, you can convert it to draft by clicking `Convert to draft` link just below list of reviewers. - -Multiple small PRs are preferred over single large ones (>500 lines of code). - -## Get your pull request reviewed - -Before requesting review please ensure that all GitHub checks were successful. -It might happen that some unrelated tests on your PR are failing, due to their flakiness. -In such cases please [file an issue] to deflake the problematic test and ask one of [maintainers] to rerun the tests. - -If all checks were successful feel free to reach out for review from people that were involved in the original discussion or [maintainers]. -Depending on complexity of the PR it might require between 1 and 2 maintainers to approve your change before merging. - -Thanks for contributing! diff --git a/DCO b/DCO deleted file mode 100644 index 716561d5d28..00000000000 --- a/DCO +++ /dev/null @@ -1,36 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/Dockerfile-release.amd64 b/Dockerfile-release.amd64 deleted file mode 100644 index 4f2fcbed349..00000000000 --- a/Dockerfile-release.amd64 +++ /dev/null @@ -1,13 +0,0 @@ -FROM --platform=linux/amd64 gcr.io/distroless/static-debian11 - -ADD etcd /usr/local/bin/ -ADD etcdctl /usr/local/bin/ -ADD etcdutl /usr/local/bin/ - -WORKDIR /var/etcd/ -WORKDIR /var/lib/etcd/ - -EXPOSE 2379 2380 - -# Define default command. -CMD ["/usr/local/bin/etcd"] diff --git a/Dockerfile-release.arm64 b/Dockerfile-release.arm64 deleted file mode 100644 index c93763f661b..00000000000 --- a/Dockerfile-release.arm64 +++ /dev/null @@ -1,13 +0,0 @@ -FROM --platform=linux/arm64 gcr.io/distroless/static-debian11 - -ADD etcd /usr/local/bin/ -ADD etcdctl /usr/local/bin/ -ADD etcdutl /usr/local/bin/ - -WORKDIR /var/etcd/ -WORKDIR /var/lib/etcd/ - -EXPOSE 2379 2380 - -# Define default command. -CMD ["/usr/local/bin/etcd"] diff --git a/Dockerfile-release.ppc64le b/Dockerfile-release.ppc64le deleted file mode 100644 index 268e397410c..00000000000 --- a/Dockerfile-release.ppc64le +++ /dev/null @@ -1,13 +0,0 @@ -FROM --platform=linux/ppc64le gcr.io/distroless/static-debian11 - -ADD etcd /usr/local/bin/ -ADD etcdctl /usr/local/bin/ -ADD etcdutl /usr/local/bin/ - -WORKDIR /var/etcd/ -WORKDIR /var/lib/etcd/ - -EXPOSE 2379 2380 - -# Define default command. -CMD ["/usr/local/bin/etcd"] diff --git a/Dockerfile-release.s390x b/Dockerfile-release.s390x deleted file mode 100644 index 4a280551deb..00000000000 --- a/Dockerfile-release.s390x +++ /dev/null @@ -1,13 +0,0 @@ -FROM --platform=linux/s390x gcr.io/distroless/static-debian11 - -ADD etcd /usr/local/bin/ -ADD etcdctl /usr/local/bin/ -ADD etcdutl /usr/local/bin/ - -WORKDIR /var/etcd/ -WORKDIR /var/lib/etcd/ - -EXPOSE 2379 2380 - -# Define default command. -CMD ["/usr/local/bin/etcd"] diff --git a/Documentation/README.md b/Documentation/README.md deleted file mode 100644 index 5c1262f8e2e..00000000000 --- a/Documentation/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This directory includes etcd project internal documentation for new and existing contributors. - -For user and developer documentation please go to [etcd.io](https://etcd.io/), -which is developed in [website](https://github.com/etcd-io/website/) repo. diff --git a/Documentation/contributor-guide/branch_management.md b/Documentation/contributor-guide/branch_management.md deleted file mode 100644 index 838a08c7213..00000000000 --- a/Documentation/contributor-guide/branch_management.md +++ /dev/null @@ -1,27 +0,0 @@ -# Branch management - -## Guide - -* New development occurs on the [main branch][main]. -* Main branch should always have a green build! -* Backwards-compatible bug fixes should target the main branch and subsequently be ported to stable branches. -* Once the main branch is ready for release, it will be tagged and become the new stable branch. - -The etcd team has adopted a *rolling release model* and supports two stable versions of etcd. - -### Main branch - -The `main` branch is our development branch. All new features land here first. - -To try new and experimental features, pull `main` and play with it. Note that `main` may not be stable because new features may introduce bugs. - -Before the release of the next stable version, feature PRs will be frozen. A [release manager](./release.md/#release-management) will be assigned to major/minor version and will lead the etcd community in test, bug-fix and documentation of the release for one to two weeks. - -### Stable branches - -All branches with prefix `release-` are considered _stable_ branches. - -After every minor release ([semver.org](https://semver.org/)), we will have a new stable branch for that release, managed by a [patch release manager](./release.md/#release-management). We will keep fixing the backwards-compatible bugs for the latest two stable releases. A _patch_ release to each supported release branch, incorporating any bug fixes, will be once every two weeks, given any patches. - -[main]: https://github.com/etcd-io/etcd/tree/main - diff --git a/Documentation/contributor-guide/features.md b/Documentation/contributor-guide/features.md deleted file mode 100644 index a88ac9a2548..00000000000 --- a/Documentation/contributor-guide/features.md +++ /dev/null @@ -1,83 +0,0 @@ -# Features - -This document provides an overview of etcd features and general development guidelines for adding and deprecating them. The project maintainers can override these guidelines per the need of the project following the project governance. - -## Overview - -The etcd features fall into three stages, experimental, stable, and unsafe. - -### Experimental - -Any new feature is usually added as an experimental feature. An experimental feature is characterized as below: -- Might be buggy due to a lack of user testing. Enabling the feature may not work as expected. -- Disabled by default when added initially. -- Support for such a feature may be dropped at any time without notice - - Feature related issues may be given lower priorities. - - It can be removed in the next minor or major release without following the feature deprecation policy unless it graduates to the stable future. - -### Stable - -A stable feature is characterized as below: -- Supported as part of the supported releases of etcd. -- May be enabled by default. -- Discontinuation of support must follow the feature deprecation policy. - -### Unsafe - -Unsafe features are rare and listed under the `Unsafe feature:` section in the etcd usage documentation. By default, they are disabled. They should be used with caution following documentation. An unsafe feature can be removed in the next minor or major release without following feature deprecation policy. - -## Development Guidelines - -### Adding a new feature - -Any new enhancements to the etcd are typically added as an experimental feature. The general development requirements are listed below. They can be somewhat flexible depending on the scope of the feature and review discussions, and will evolve over time. -- Open an issue - - It must provide a clear need for the proposed feature. - - It should list development work items as checkboxes. There must be one work item towards future graduation to the stable future. - - Label the issue with `type/feature` and `experimental`. - - Keep the issue open for tracking purpose until a decision is made on graduation. -- Open a Pull Request (PR) - - Provide unit tests. Integreation tests are also recommended as possible. - - Provide robust e2e test coverage. If the feature being added is complicated or quickly needed, maintainers can decide to go with e2e tests for basic coverage initially and have robust coverage added at the later time before feature graduation to the stable feature. - - Provide logs for proper debugging. - - Provide metrics and benchmarks as needed. - - The Feature should be disabled by default. - - Any configuration flags related to the implementation of the feature must be prefixed with `experimental` e.g. `--experimental-feature-name`. - - Add a CHANGELOG entry. -- At least two maintainers must approve feature requirements and related code changes. - -### Graduating an Experimental feature to Stable - -It is important that experimental features don't get stuck in that stage. They should be revisited and moved to the stable stage following the graduation steps as described here. - -#### Locate graduation candidate -Decide if an experimental feature is ready for graduation to the stable stage. -- Find the issue that was used to enable the experimental feature initially. One way to find such issues is to search for issues with `type/feature` and `experimental` labels. -- Fix any known open issues against the feature. -- Make sure the feature was enabled for at least one previous release. Check the PR(s) reference from the issue to see when the feature related code changes were merged. - -#### Provide implementation -If an experimental feature is found ready for graduation to the stable stage, open a Pull Request (PR) with the following changes. -- Add robust e2e tests if not already provided. -- Add a new stable feature flag identical to the experimental feature flag but without the `--experimental` prefix. -- Deprecate the experimental feature following the [feature deprecation policy](#Deprecating-a-feature). -- Implementation must ensure that both the graduated and deprecated experimental feature flags work as expected. Note that both these flags will co-exist for the timeframe described in the feature deprecation policy. -- Enable the graduated feature by default if needed. - -At least two maintainers must approve the work. Patch releases should not be considered for graduation. - -### Deprecating a feature - -#### Experimental -An experimental feature deprecates when it graduates to the stable stage. -- Add a deprecation message in the documentation of the experimental feature with a recommendation to use related stable feature. e.g. `DEPRECATED. Use instead.` -- Add a `deprecated` label in the issue that was initially used to enable the experimental feature. - -#### Stable -As the project evolves, a stable feature may sometimes need to be deprecated and removed. Such a situation should be handled using the steps below: -- Create an issue for tracking purpose. -- Add a deprecation message in the feature usage documentation before a planned release for feature deprecation. e.g. `To be deprecated in .`. If a new feature replaces the `To be deprecated` feature, then also provide a message saying so. e.g. `Use instead.`. -- Deprecate the feature in the planned release with a message as part of the feature usage documentation. e.g. `DEPRECATED`. If a new feature replaces the deprecated feature, then also provide a message saying so. e.g. `DEPRECATED. Use instead.`. -- Add a `deprecated` label in the related issue. - -Remove the deprecated feature in the following release. Close any related issue(s). At least two maintainers must approve the work. Patch releases should not be considered for deprecation. diff --git a/Documentation/contributor-guide/local_cluster.md b/Documentation/contributor-guide/local_cluster.md deleted file mode 100644 index 675674eec24..00000000000 --- a/Documentation/contributor-guide/local_cluster.md +++ /dev/null @@ -1,150 +0,0 @@ -# Set up local cluster - -For testing and development deployments, the quickest and easiest way is to configure a local cluster. For a production deployment, refer to the [clustering][clustering] section. - -## Local standalone cluster - -### Starting a cluster - -Run the following to deploy an etcd cluster as a standalone cluster: - -``` -$ ./etcd -... -``` - -If the `etcd` binary is not present in the current working directory, it might be located either at `$GOPATH/bin/etcd` or at `/usr/local/bin/etcd`. Run the command appropriately. - -The running etcd member listens on `localhost:2379` for client requests. - -### Interacting with the cluster - -Use `etcdctl` to interact with the running cluster: - -1. Store an example key-value pair in the cluster: - - ``` - $ ./etcdctl put foo bar - OK - ``` - - If OK is printed, storing key-value pair is successful. - -2. Retrieve the value of `foo`: - - ``` - $ ./etcdctl get foo - bar - ``` - - If `bar` is returned, interaction with the etcd cluster is working as expected. - -## Local multi-member cluster - -### Starting a cluster - -A `Procfile` at the base of the etcd git repository is provided to easily configure a local multi-member cluster. To start a multi-member cluster, navigate to the root of the etcd source tree and perform the following: - -1. Install `goreman` to control Procfile-based applications: - - ``` - $ go install github.com/mattn/goreman@latest - ``` - The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. Make sure that $PATH is set accordingly in your environment. - -2. Start a cluster with `goreman` using etcd's stock Procfile: - - ``` - $ goreman -f Procfile start - ``` - - The members start running. They listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` respectively for client requests. - -### Interacting with the cluster - -Use `etcdctl` to interact with the running cluster: - -1. Print the list of members: - - ``` - $ etcdctl --write-out=table --endpoints=localhost:2379 member list - ``` - The list of etcd members are displayed as follows: - - ``` - +------------------+---------+--------+------------------------+------------------------+ - | ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | - +------------------+---------+--------+------------------------+------------------------+ - | 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 | - | 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 | - | fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 | - +------------------+---------+--------+------------------------+------------------------+ - ``` - -2. Store an example key-value pair in the cluster: - - ``` - $ etcdctl put foo bar - OK - ``` - - If OK is printed, storing key-value pair is successful. - -### Testing fault tolerance - -To exercise etcd's fault tolerance, kill a member and attempt to retrieve the key. - -1. Identify the process name of the member to be stopped. - - The `Procfile` lists the properties of the multi-member cluster. For example, consider the member with the process name, `etcd2`. - -2. Stop the member: - - ``` - # kill etcd2 - $ goreman run stop etcd2 - ``` - -3. Store a key: - - ``` - $ etcdctl put key hello - OK - ``` - -4. Retrieve the key that is stored in the previous step: - - ``` - $ etcdctl get key - hello - ``` - -5. Retrieve a key from the stopped member: - - ``` - $ etcdctl --endpoints=localhost:22379 get key - ``` - - The command should display an error caused by connection failure: - - ``` - 2017/06/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379" - Error: grpc: timed out trying to connect - ``` -6. Restart the stopped member: - - ``` - $ goreman run restart etcd2 - ``` - -7. Get the key from the restarted member: - - ``` - $ etcdctl --endpoints=localhost:22379 get key - hello - ``` - - Restarting the member re-establish the connection. `etcdctl` will now be able to retrieve the key successfully. To learn more about interacting with etcd, read [interacting with etcd section][interacting]. - -[clustering]: https://etcd.io/docs/latest/op-guide/clustering/ -[interacting]: https://etcd.io/docs/latest/dev-guide/interacting_v3/ diff --git a/Documentation/contributor-guide/logging.md b/Documentation/contributor-guide/logging.md deleted file mode 100644 index 9eb9032013e..00000000000 --- a/Documentation/contributor-guide/logging.md +++ /dev/null @@ -1,33 +0,0 @@ -# Logging Conventions - -etcd uses the [zap][zap] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions: - -* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. Usually not used in production. - * Examples: - * Send a normal message to a remote peer - * Write a log entry to disk - -* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. Should rather not be logged more frequently than once per a few seconds in normal server's operation. - * Examples: - * Startup configuration - * Start to do snapshot - -* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Examples: - * Failure to send raft message to a remote peer - * Failure to receive heartbeat message within the configured election timeout - -* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost. - * Examples: - * Failure to allocate disk space for WAL - -* Panic: Unrecoverable or unexpected error situation that requires stopping execution. - * Examples: - * Failure to create the database - -* Fatal: Unrecoverable or unexpected error situation that requires immediate exit. Mostly used in the test. - * Examples: - * Failure to find the data directory - * Failure to run a test function - -[zap]: https://github.com/uber-go/zap diff --git a/Documentation/contributor-guide/modules-future.svg b/Documentation/contributor-guide/modules-future.svg deleted file mode 100644 index 92d060a29fc..00000000000 --- a/Documentation/contributor-guide/modules-future.svg +++ /dev/null @@ -1,604 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - go.etcd.io/raft/v3 - - diff --git a/Documentation/contributor-guide/modules.md b/Documentation/contributor-guide/modules.md deleted file mode 100644 index a8551aa39eb..00000000000 --- a/Documentation/contributor-guide/modules.md +++ /dev/null @@ -1,94 +0,0 @@ -# Golang modules - -The etcd project (since version 3.5) is organized into multiple -[golang modules](https://golang.org/ref/mod) hosted in a [single repository](https://golang.org/ref/mod#vcs-dir). - -![modules graph](modules.svg) - -There are following modules: - - - **go.etcd.io/etcd/api/v3** - contains API definitions - (like protos & proto-generated libraries) that defines communication protocol - between etcd clients and server. - - - **go.etcd.io/etcd/pkg/v3** - collection of utility packages used by etcd - without being specific to etcd itself. A package belongs here - only if it could possibly be moved out into its own repository in the future. - Please avoid adding here code that has a lot of dependencies on its own, as - they automatically becoming dependencies of the client library - (that we want to keep lightweight). - - - **go.etcd.io/etcd/client/v3** - client library used to contact etcd over - the network (grpc). Recommended for all new usage of etcd. - - - **go.etcd.io/etcd/client/v2** - legacy client library used to contact etcd - over HTTP protocol. Deprecated. All new usage should depend on /v3 library. - - - **go.etcd.io/raft/v3** - implementation of distributed consensus - protocol. Should have no etcd specific code. Hosted in a separate repository: - https://github.com/etcd-io/raft. - - - **go.etcd.io/etcd/server/v3** - etcd implementation. - The code in this package is etcd internal and should not be consumed - by external projects. The package layout and API can change within the minor versions. - - - **go.etcd.io/etcd/etcdctl/v3** - a command line tool to access and manage etcd. - - - **go.etcd.io/etcd/tests/v3** - a module that contains all integration tests of etcd. - Notice: All unit-tests (fast and not requiring cross-module dependencies) - should be kept in the local modules to the code under the test. - - - **go.etcd.io/bbolt** - implementation of persistent b-tree. - Hosted in a separate repository: https://github.com/etcd-io/bbolt. - - -### Operations - -1. All etcd modules should be released in the same versions, e.g. - `go.etcd.io/etcd/client/v3@v3.5.10` must depend on `go.etcd.io/etcd/api/v3@v3.5.10`. - - The consistent updating of versions can by performed using: - ```shell script - % DRY_RUN=false TARGET_VERSION="v3.5.10" ./scripts/release_mod.sh update_versions - ``` -2. The released modules should be tagged according to https://golang.org/ref/mod#vcs-version rules, - i.e. each module should get its own tag. - The tagging can be performed using: - ```shell script - % DRY_RUN=false REMOTE_REPO="origin" ./scripts/release_mod.sh push_mod_tags - ``` - -3. All etcd modules should depend on the same versions of underlying dependencies. - This can be verified using: - ```shell script - % PASSES="dep" ./test.sh - ``` - -4. The go.mod files must not contain dependencies not being used and must - conform to `go mod tidy` format. - This is being verified by: - ``` - % PASSES="mod_tidy" ./test.sh - ``` - -5. To trigger actions across all modules (e.g. auto-format all files), please - use/expand the following script: - ```shell script - % ./scripts/fix.sh - ``` - -### Future - -As a North Star, we would like to evaluate etcd modules towards following model: - -![modules graph](modules-future.svg) - -This assumes: - - Splitting etcdmigrate/etcdadm out of etcdctl binary. - Thanks to this etcdctl would become clearly a command-line wrapper - around network client API, - while etcdmigrate/etcdadm would support direct physical operations on the - etcd storage files. - - Splitting etcd-proxy out of ./etcd binary, as it contains more experimental code - so carries additional risk & dependencies. - - Deprecation of support for v2 protocol. diff --git a/Documentation/contributor-guide/modules.svg b/Documentation/contributor-guide/modules.svg deleted file mode 100644 index 5a3c3b2c39e..00000000000 --- a/Documentation/contributor-guide/modules.svg +++ /dev/null @@ -1,489 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - go.etcd.io/raft/v3 - - diff --git a/Documentation/contributor-guide/release.md b/Documentation/contributor-guide/release.md deleted file mode 100644 index 44532a2eddb..00000000000 --- a/Documentation/contributor-guide/release.md +++ /dev/null @@ -1,75 +0,0 @@ -# Release - -The guide talks about how to release a new version of etcd. - -The procedure includes some manual steps for sanity checking, but it can probably be further scripted. Please keep this document up-to-date if making changes to the release process. - -## Release management - -etcd community members are assigned to manage the release each etcd major/minor version as well as manage patches -and to each stable release branch. The managers are responsible for communicating the timelines and status of each -release and for ensuring the stability of the release branch. - -| Releases | Manager | -|------------------------|-------------------------------------------------------------| -| 3.4 patch (post 3.4.0) | Benjamin Wang [@ahrtr](https://github.com/ahrtr) | -| 3.5 patch (post 3.5.0) | Marek Siarkowicz [@serathius](https://github.com/serathius) | - -All releases version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/). - -### Major, minor version release, or its pre-release - -- Ensure the relevant milestone on GitHub is complete. All referenced issues should be closed, or moved elsewhere. -- Ensure the latest upgrade documentation is available. -- Bump [hardcoded MinClusterVerion in the repository](https://github.com/etcd-io/etcd/blob/v3.4.15/version/version.go#L29), if necessary. -- Add feature capability maps for the new version, if necessary. - -### Patch version release - -- To request a backport, devlopers submit cherrypick PRs targeting the release branch. The commits should not include merge commits. The commits should be restricted to bug fixes and security patches. -- The cherrypick PRs should target the appropriate release branch (`base:release--`). `hack/patch/cherrypick.sh` may be used to automatically generate cherrypick PRs. -- The release patch manager reviews the cherrypick PRs. Please discuss carefully what is backported to the patch release. Each patch release should be strictly better than it's predecessor. -- The release patch manager will cherry-pick these commits starting from the oldest one into stable branch. - -## Write release note - -- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make. -- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request. -- Find PRs with `release-note` label and explain them in `NEWS` file, as a straightforward summary of changes for end-users. - -## Build and push the release artifacts - -- Ensure `docker` is available. - -Run release script in root directory: - -``` -DRY_RUN=false ./scripts/release.sh ${VERSION} -``` - -It generates all release binaries and images under directory ./release. -Binaries are pushed to gcr.io and images are pushed to quay.io and gcr.io. - -## Publish release page in GitHub - -- Set release title as the version name. -- Follow the format of previous release pages. -- Attach the generated binaries and signatures. -- Select whether it is a pre-release. -- Publish the release! - -## Announce to the etcd-dev Googlegroup - -- Follow the format of [previous release emails](https://groups.google.com/forum/#!forum/etcd-dev). -- Make sure to include a list of authors that contributed since the previous release - something like the following might be handy: - -``` -git log ...${PREV_VERSION} --pretty=format:"%an" | sort | uniq | tr '\n' ',' | sed -e 's#,#, #g' -e 's#, $##' -``` - -- Send email to etcd-dev@googlegroups.com - -## Post release - -- Create new stable branch through `git push origin ${VERSION_MAJOR}.${VERSION_MINOR}` if this is a major stable release. This assumes `origin` corresponds to "https://github.com/etcd-io/etcd". -- Bump [hardcoded Version in the repository](https://github.com/etcd-io/etcd/blob/v3.4.15/version/version.go#L30) to the version `${VERSION}+git`. diff --git a/Documentation/contributor-guide/reporting_bugs.md b/Documentation/contributor-guide/reporting_bugs.md deleted file mode 100644 index 6804d369479..00000000000 --- a/Documentation/contributor-guide/reporting_bugs.md +++ /dev/null @@ -1,45 +0,0 @@ -# Reporting bugs - -If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist. - -To make the bug report accurate and easy to understand, please try to create bug reports that are: - -- Specific. Include as much details as possible: which version, what environment, what configuration, etc. If the bug is related to running the etcd server, please attach the etcd log (the starting log with etcd configuration is especially important). - -- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. If possible, please attach the affected etcd data dir and stack strace to the bug report. - -- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to provide guidance in the right direction or help with using etcd itself. - -- Unique. Do not duplicate existing bug report. - -- Scoped. One bug per report. Do not follow up with another bug inside one report. - -It may be worthwhile to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report. - -We might ask for further information to locate a bug. A duplicated bug report will be closed. - -## Frequently asked questions - -### How to get a stack trace - -``` bash -$ kill -QUIT $PID -``` - -### How to get etcd version - -``` bash -$ etcd --version -``` - -### How to get etcd configuration and log when it runs as systemd service ‘etcd2.service’ - -``` bash -$ sudo systemctl cat etcd2 -$ sudo journalctl -u etcd2 -``` - -Due to an upstream systemd bug, journald may miss the last few log lines when its processes exit. If journalctl says etcd stopped without fatal or panic message, try `sudo journalctl -f -t etcd2` to get full log. - -[etcd-issue]: https://github.com/etcd-io/etcd/issues/new -[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/ diff --git a/Documentation/contributor-guide/trige_issues.md b/Documentation/contributor-guide/trige_issues.md deleted file mode 100644 index 91ff796a015..00000000000 --- a/Documentation/contributor-guide/trige_issues.md +++ /dev/null @@ -1,46 +0,0 @@ -# Issue triage guidelines - -## Purpose - -Speed up issue management. - -The `etcd` issues are listed at https://github.com/etcd-io/etcd/issues -and are identified with labels. For example, an issue that is identified -as a bug will eventually be set to label `area/bug`. New issues will -start out without any labels, but typically `etcd` maintainers and active contributors -add labels based on their findings. The detailed list of labels can be found at -https://github.com/kubernetes/kubernetes/labels - -Following are few predetermined searches on issues for convenience: -* [Bugs](https://github.com/etcd-io/etcd/labels/area%2Fbug) -* [Help Wanted](https://github.com/etcd-io/etcd/labels/Help%20Wanted) -* [Longest untriaged issues](https://github.com/etcd-io/etcd/issues?utf8=%E2%9C%93&q=is%3Aopen+sort%3Aupdated-asc+) - -## Scope - -These guidelines serves as a primary document for triaging an incoming issues in -`etcd`. Everyone is welcome to help manage issues and PRs but the work and responsibilities discussed in this document are created with `etcd` maintainers and active contributors in mind. - -## Validate if an issue is a bug - -Validate if the issue is indeed a bug. If not, add a comment with findings and close trivial issue. For non-trivial issue, wait to hear back from issue reporter and see if there is any objection. If issue reporter does not reply in 30 days, close the issue. If the problem can not be reproduced or require more information, leave a comment for the issue reporter. - -## Inactive issues - -Issues that lack enough information from the issue reporter should be closed if issue reporter do not provide information in 60 days. - -## Duplicate issues - -If an issue is a duplicate, add a comment stating so along with a reference for the original issue and close it. - -## Issues that don't belong to etcd - -Sometime issues are reported that actually belongs to other projects that `etcd` use. For example, `grpc` or `golang` issues. Such issues should be addressed by asking reporter to open issues in appropriate other project. Close the issue unless a maintainer and issue reporter see a need to keep it open for tracking purpose. - -## Verify important labels are in place - -Make sure that issue has label on areas it belongs to, proper assignees are added and milestone is identified. If any of these labels are missing, add one. If labels can not be assigned due to limited privilege or correct label can not be decided, that’s fine, contact maintainers if needed. - -## Poke issue owner if needed - -If an issue owned by a developer has no PR created in 30 days, contact the issue owner and ask for a PR or to release ownership if needed. diff --git a/Documentation/contributor-guide/trige_prs.md b/Documentation/contributor-guide/trige_prs.md deleted file mode 100644 index c2b43ea765f..00000000000 --- a/Documentation/contributor-guide/trige_prs.md +++ /dev/null @@ -1,28 +0,0 @@ -# PR management - -## Purpose - -Speed up PR management. - -The `etcd` PRs are listed at https://github.com/etcd-io/etcd/pulls -A PR can have various labels, milestone, reviewer etc. The detailed list of labels can be found at -https://github.com/kubernetes/kubernetes/labels - -Following are few example searches on PR for convenience: -* [Open PRS for milestone etcd-v3.6](https://github.com/etcd-io/etcd/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+milestone%3Aetcd-v3.6) -* [PRs under investigation](https://github.com/etcd-io/etcd/labels/Investigating) - -## Scope - -These guidelines serves as a primary document for managing PRs in `etcd`. Everyone is welcome to help manage PRs but the work and responsibilities discussed in this document is created with `etcd` maintainers and active contributors in mind. - -## Handle inactive PRs -Poke PR owner if review comments are not addressed in 15 days. If PR owner does not reply in 90 days, update the PR with a new commit if possible. If not, inactive PR should be closed after 180 days. - -## Poke reviewer if needed - -Reviewers are responsive in a timely fashion, but considering everyone is busy, give them some time after requesting review if quick response is not provided. If response is not provided in 10 days, feel free to contact them via adding a comment in the PR or sending an email or message on the Slack. - -## Verify important labels are in place - -Make sure that appropriate reviewers are added to the PR. Also, make sure that a milestone is identified. If any of these or other important labels are missing, add them. If a correct label cannot be decided, leave a comment for the maintainers to do so as needed. diff --git a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json b/Documentation/dev-guide/apispec/swagger/rpc.swagger.json deleted file mode 100644 index ca896fb501c..00000000000 --- a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json +++ /dev/null @@ -1,3054 +0,0 @@ -{ - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "swagger": "2.0", - "info": { - "title": "api/etcdserverpb/rpc.proto", - "version": "version not set" - }, - "paths": { - "/v3/auth/authenticate": { - "post": { - "tags": [ - "Auth" - ], - "summary": "Authenticate processes an authenticate request.", - "operationId": "Auth_Authenticate", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthenticateRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthenticateResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/disable": { - "post": { - "tags": [ - "Auth" - ], - "summary": "AuthDisable disables authentication.", - "operationId": "Auth_AuthDisable", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthDisableRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthDisableResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/enable": { - "post": { - "tags": [ - "Auth" - ], - "summary": "AuthEnable enables authentication.", - "operationId": "Auth_AuthEnable", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthEnableRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthEnableResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/add": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleAdd adds a new role. Role name cannot be empty.", - "operationId": "Auth_RoleAdd", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleAddRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleAddResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/delete": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleDelete deletes a specified role.", - "operationId": "Auth_RoleDelete", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleDeleteRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleDeleteResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/get": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleGet gets detailed role information.", - "operationId": "Auth_RoleGet", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGetRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGetResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/grant": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleGrantPermission grants a permission of a specified key or range to a specified role.", - "operationId": "Auth_RoleGrantPermission", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/list": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleList gets lists of all roles.", - "operationId": "Auth_RoleList", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleListRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleListResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/role/revoke": { - "post": { - "tags": [ - "Auth" - ], - "summary": "RoleRevokePermission revokes a key or range permission of a specified role.", - "operationId": "Auth_RoleRevokePermission", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/status": { - "post": { - "tags": [ - "Auth" - ], - "summary": "AuthStatus displays authentication status.", - "operationId": "Auth_AuthStatus", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthStatusRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/add": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserAdd adds a new user. User name cannot be empty.", - "operationId": "Auth_UserAdd", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserAddRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserAddResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/changepw": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserChangePassword changes the password of a specified user.", - "operationId": "Auth_UserChangePassword", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/delete": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserDelete deletes a specified user.", - "operationId": "Auth_UserDelete", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserDeleteRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserDeleteResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/get": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserGet gets detailed user information.", - "operationId": "Auth_UserGet", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGetRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGetResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/grant": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserGrant grants a role to a specified user.", - "operationId": "Auth_UserGrantRole", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/list": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserList gets a list of all users.", - "operationId": "Auth_UserList", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserListRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserListResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/auth/user/revoke": { - "post": { - "tags": [ - "Auth" - ], - "summary": "UserRevokeRole revokes a role of specified user.", - "operationId": "Auth_UserRevokeRole", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/cluster/member/add": { - "post": { - "tags": [ - "Cluster" - ], - "summary": "MemberAdd adds a member into the cluster.", - "operationId": "Cluster_MemberAdd", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMemberAddRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberAddResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/cluster/member/list": { - "post": { - "tags": [ - "Cluster" - ], - "summary": "MemberList lists all the members in the cluster.", - "operationId": "Cluster_MemberList", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMemberListRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberListResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/cluster/member/promote": { - "post": { - "tags": [ - "Cluster" - ], - "summary": "MemberPromote promotes a member from raft learner (non-voting) to raft voting member.", - "operationId": "Cluster_MemberPromote", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMemberPromoteRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberPromoteResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/cluster/member/remove": { - "post": { - "tags": [ - "Cluster" - ], - "summary": "MemberRemove removes an existing member from the cluster.", - "operationId": "Cluster_MemberRemove", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMemberRemoveRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberRemoveResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/cluster/member/update": { - "post": { - "tags": [ - "Cluster" - ], - "summary": "MemberUpdate updates the member configuration.", - "operationId": "Cluster_MemberUpdate", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMemberUpdateRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberUpdateResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/compaction": { - "post": { - "tags": [ - "KV" - ], - "summary": "Compact compacts the event history in the etcd key-value store. The key-value\nstore should be periodically compacted or the event history will continue to grow\nindefinitely.", - "operationId": "KV_Compact", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbCompactionRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbCompactionResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/deleterange": { - "post": { - "tags": [ - "KV" - ], - "summary": "DeleteRange deletes the given range from the key-value store.\nA delete request increments the revision of the key-value store\nand generates a delete event in the event history for every deleted key.", - "operationId": "KV_DeleteRange", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbDeleteRangeRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/lease/leases": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseLeases lists all existing leases.", - "operationId": "Lease_LeaseLeases2", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseLeasesResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/lease/revoke": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.", - "operationId": "Lease_LeaseRevoke2", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/lease/timetolive": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseTimeToLive retrieves lease information.", - "operationId": "Lease_LeaseTimeToLive2", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/put": { - "post": { - "tags": [ - "KV" - ], - "summary": "Put puts the given key into the key-value store.\nA put request increments the revision of the key-value store\nand generates one event in the event history.", - "operationId": "KV_Put", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbPutRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbPutResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/range": { - "post": { - "tags": [ - "KV" - ], - "summary": "Range gets the keys in the range from the key-value store.", - "operationId": "KV_Range", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbRangeRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbRangeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/kv/txn": { - "post": { - "tags": [ - "KV" - ], - "summary": "Txn processes multiple requests in a single transaction.\nA txn request increments the revision of the key-value store\nand generates events with the same revision for every completed request.\nIt is not allowed to modify the same key several times within one txn.", - "operationId": "KV_Txn", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbTxnRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbTxnResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/lease/grant": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseGrant creates a lease which expires if the server does not receive a keepAlive\nwithin a given time to live period. All keys attached to the lease will be expired and\ndeleted if the lease expires. Each expired key generates a delete event in the event history.", - "operationId": "Lease_LeaseGrant", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseGrantRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseGrantResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/lease/keepalive": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client\nto the server and streaming keep alive responses from the server to the client.", - "operationId": "Lease_LeaseKeepAlive", - "parameters": [ - { - "description": " (streaming inputs)", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseKeepAliveRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "title": "Stream result of etcdserverpbLeaseKeepAliveResponse", - "properties": { - "error": { - "$ref": "#/definitions/runtimeStreamError" - }, - "result": { - "$ref": "#/definitions/etcdserverpbLeaseKeepAliveResponse" - } - } - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/lease/leases": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseLeases lists all existing leases.", - "operationId": "Lease_LeaseLeases", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseLeasesResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/lease/revoke": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.", - "operationId": "Lease_LeaseRevoke", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/lease/timetolive": { - "post": { - "tags": [ - "Lease" - ], - "summary": "LeaseTimeToLive retrieves lease information.", - "operationId": "Lease_LeaseTimeToLive", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/alarm": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "Alarm activates, deactivates, and queries alarms regarding cluster health.", - "operationId": "Maintenance_Alarm", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbAlarmRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbAlarmResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/defragment": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "Defragment defragments a member's backend database to recover storage space.", - "operationId": "Maintenance_Defragment", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbDefragmentRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbDefragmentResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/downgrade": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "Downgrade requests downgrades, verifies feasibility or cancels downgrade\non the cluster version.\nSupported since etcd 3.5.", - "operationId": "Maintenance_Downgrade", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbDowngradeRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbDowngradeResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/hash": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "HashKV computes the hash of all MVCC keys up to a given revision.\nIt only iterates \"key\" bucket in backend storage.", - "operationId": "Maintenance_HashKV", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbHashKVRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbHashKVResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/snapshot": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "Snapshot sends a snapshot of the entire backend from a member over a stream to a client.", - "operationId": "Maintenance_Snapshot", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbSnapshotRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "title": "Stream result of etcdserverpbSnapshotResponse", - "properties": { - "error": { - "$ref": "#/definitions/runtimeStreamError" - }, - "result": { - "$ref": "#/definitions/etcdserverpbSnapshotResponse" - } - } - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/status": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "Status gets the status of the member.", - "operationId": "Maintenance_Status", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbStatusRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbStatusResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/maintenance/transfer-leadership": { - "post": { - "tags": [ - "Maintenance" - ], - "summary": "MoveLeader requests current leader node to transfer its leadership to transferee.", - "operationId": "Maintenance_MoveLeader", - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbMoveLeaderRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/etcdserverpbMoveLeaderResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - }, - "/v3/watch": { - "post": { - "tags": [ - "Watch" - ], - "summary": "Watch watches for events happening or that have happened. Both input and output\nare streams; the input stream is for creating and canceling watchers and the output\nstream sends events. One watch RPC can watch on multiple key ranges, streaming events\nfor several watches at once. The entire event history can be watched starting from the\nlast compaction revision.", - "operationId": "Watch_Watch", - "parameters": [ - { - "description": " (streaming inputs)", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/etcdserverpbWatchRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "title": "Stream result of etcdserverpbWatchResponse", - "properties": { - "error": { - "$ref": "#/definitions/runtimeStreamError" - }, - "result": { - "$ref": "#/definitions/etcdserverpbWatchResponse" - } - } - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - } - } - } - }, - "definitions": { - "AlarmRequestAlarmAction": { - "type": "string", - "default": "GET", - "enum": [ - "GET", - "ACTIVATE", - "DEACTIVATE" - ] - }, - "CompareCompareResult": { - "type": "string", - "default": "EQUAL", - "enum": [ - "EQUAL", - "GREATER", - "LESS", - "NOT_EQUAL" - ] - }, - "CompareCompareTarget": { - "type": "string", - "default": "VERSION", - "enum": [ - "VERSION", - "CREATE", - "MOD", - "VALUE", - "LEASE" - ] - }, - "DowngradeRequestDowngradeAction": { - "type": "string", - "default": "VALIDATE", - "enum": [ - "VALIDATE", - "ENABLE", - "CANCEL" - ] - }, - "EventEventType": { - "type": "string", - "default": "PUT", - "enum": [ - "PUT", - "DELETE" - ] - }, - "RangeRequestSortOrder": { - "type": "string", - "default": "NONE", - "enum": [ - "NONE", - "ASCEND", - "DESCEND" - ] - }, - "RangeRequestSortTarget": { - "type": "string", - "default": "KEY", - "enum": [ - "KEY", - "VERSION", - "CREATE", - "MOD", - "VALUE" - ] - }, - "WatchCreateRequestFilterType": { - "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event.", - "type": "string", - "default": "NOPUT", - "enum": [ - "NOPUT", - "NODELETE" - ] - }, - "authpbPermission": { - "type": "object", - "title": "Permission is a single entity", - "properties": { - "key": { - "type": "string", - "format": "byte" - }, - "permType": { - "$ref": "#/definitions/authpbPermissionType" - }, - "range_end": { - "type": "string", - "format": "byte" - } - } - }, - "authpbPermissionType": { - "type": "string", - "default": "READ", - "enum": [ - "READ", - "WRITE", - "READWRITE" - ] - }, - "authpbUserAddOptions": { - "type": "object", - "properties": { - "no_password": { - "type": "boolean" - } - } - }, - "etcdserverpbAlarmMember": { - "type": "object", - "properties": { - "alarm": { - "description": "alarm is the type of alarm which has been raised.", - "$ref": "#/definitions/etcdserverpbAlarmType" - }, - "memberID": { - "description": "memberID is the ID of the member associated with the raised alarm.", - "type": "string", - "format": "uint64" - } - } - }, - "etcdserverpbAlarmRequest": { - "type": "object", - "properties": { - "action": { - "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm.", - "$ref": "#/definitions/AlarmRequestAlarmAction" - }, - "alarm": { - "description": "alarm is the type of alarm to consider for this request.", - "$ref": "#/definitions/etcdserverpbAlarmType" - }, - "memberID": { - "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members.", - "type": "string", - "format": "uint64" - } - } - }, - "etcdserverpbAlarmResponse": { - "type": "object", - "properties": { - "alarms": { - "description": "alarms is a list of alarms associated with the alarm request.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbAlarmMember" - } - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAlarmType": { - "type": "string", - "default": "NONE", - "enum": [ - "NONE", - "NOSPACE", - "CORRUPT" - ] - }, - "etcdserverpbAuthDisableRequest": { - "type": "object" - }, - "etcdserverpbAuthDisableResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthEnableRequest": { - "type": "object" - }, - "etcdserverpbAuthEnableResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthRoleAddRequest": { - "type": "object", - "properties": { - "name": { - "description": "name is the name of the role to add to the authentication system.", - "type": "string" - } - } - }, - "etcdserverpbAuthRoleAddResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthRoleDeleteRequest": { - "type": "object", - "properties": { - "role": { - "type": "string" - } - } - }, - "etcdserverpbAuthRoleDeleteResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthRoleGetRequest": { - "type": "object", - "properties": { - "role": { - "type": "string" - } - } - }, - "etcdserverpbAuthRoleGetResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "perm": { - "type": "array", - "items": { - "$ref": "#/definitions/authpbPermission" - } - } - } - }, - "etcdserverpbAuthRoleGrantPermissionRequest": { - "type": "object", - "properties": { - "name": { - "description": "name is the name of the role which will be granted the permission.", - "type": "string" - }, - "perm": { - "description": "perm is the permission to grant to the role.", - "$ref": "#/definitions/authpbPermission" - } - } - }, - "etcdserverpbAuthRoleGrantPermissionResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthRoleListRequest": { - "type": "object" - }, - "etcdserverpbAuthRoleListResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "roles": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbAuthRoleRevokePermissionRequest": { - "type": "object", - "properties": { - "key": { - "type": "string", - "format": "byte" - }, - "range_end": { - "type": "string", - "format": "byte" - }, - "role": { - "type": "string" - } - } - }, - "etcdserverpbAuthRoleRevokePermissionResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthStatusRequest": { - "type": "object" - }, - "etcdserverpbAuthStatusResponse": { - "type": "object", - "properties": { - "authRevision": { - "type": "string", - "format": "uint64", - "title": "authRevision is the current revision of auth store" - }, - "enabled": { - "type": "boolean" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthUserAddRequest": { - "type": "object", - "properties": { - "hashedPassword": { - "type": "string" - }, - "name": { - "type": "string" - }, - "options": { - "$ref": "#/definitions/authpbUserAddOptions" - }, - "password": { - "type": "string" - } - } - }, - "etcdserverpbAuthUserAddResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthUserChangePasswordRequest": { - "type": "object", - "properties": { - "hashedPassword": { - "description": "hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.", - "type": "string" - }, - "name": { - "description": "name is the name of the user whose password is being changed.", - "type": "string" - }, - "password": { - "description": "password is the new password for the user. Note that this field will be removed in the API layer.", - "type": "string" - } - } - }, - "etcdserverpbAuthUserChangePasswordResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthUserDeleteRequest": { - "type": "object", - "properties": { - "name": { - "description": "name is the name of the user to delete.", - "type": "string" - } - } - }, - "etcdserverpbAuthUserDeleteResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthUserGetRequest": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, - "etcdserverpbAuthUserGetResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "roles": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbAuthUserGrantRoleRequest": { - "type": "object", - "properties": { - "role": { - "description": "role is the name of the role to grant to the user.", - "type": "string" - }, - "user": { - "description": "user is the name of the user which should be granted a given role.", - "type": "string" - } - } - }, - "etcdserverpbAuthUserGrantRoleResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthUserListRequest": { - "type": "object" - }, - "etcdserverpbAuthUserListResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "users": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbAuthUserRevokeRoleRequest": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "role": { - "type": "string" - } - } - }, - "etcdserverpbAuthUserRevokeRoleResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbAuthenticateRequest": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "password": { - "type": "string" - } - } - }, - "etcdserverpbAuthenticateResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "token": { - "type": "string", - "title": "token is an authorized token that can be used in succeeding RPCs" - } - } - }, - "etcdserverpbCompactionRequest": { - "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed.", - "type": "object", - "properties": { - "physical": { - "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database.", - "type": "boolean" - }, - "revision": { - "description": "revision is the key-value store revision for the compaction operation.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbCompactionResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbCompare": { - "type": "object", - "properties": { - "create_revision": { - "type": "string", - "format": "int64", - "title": "create_revision is the creation revision of the given key" - }, - "key": { - "description": "key is the subject key for the comparison operation.", - "type": "string", - "format": "byte" - }, - "lease": { - "description": "lease is the lease id of the given key.", - "type": "string", - "format": "int64" - }, - "mod_revision": { - "description": "mod_revision is the last modified revision of the given key.", - "type": "string", - "format": "int64" - }, - "range_end": { - "description": "range_end compares the given target to all keys in the range [key, range_end).\nSee RangeRequest for more details on key ranges.", - "type": "string", - "format": "byte" - }, - "result": { - "description": "result is logical comparison operation for this comparison.", - "$ref": "#/definitions/CompareCompareResult" - }, - "target": { - "description": "target is the key-value field to inspect for the comparison.", - "$ref": "#/definitions/CompareCompareTarget" - }, - "value": { - "description": "value is the value of the given key, in bytes.", - "type": "string", - "format": "byte" - }, - "version": { - "type": "string", - "format": "int64", - "title": "version is the version of the given key" - } - } - }, - "etcdserverpbDefragmentRequest": { - "type": "object" - }, - "etcdserverpbDefragmentResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbDeleteRangeRequest": { - "type": "object", - "properties": { - "key": { - "description": "key is the first key to delete in the range.", - "type": "string", - "format": "byte" - }, - "prev_kv": { - "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response.", - "type": "boolean" - }, - "range_end": { - "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument.", - "type": "string", - "format": "byte" - } - } - }, - "etcdserverpbDeleteRangeResponse": { - "type": "object", - "properties": { - "deleted": { - "description": "deleted is the number of keys deleted by the delete range request.", - "type": "string", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "prev_kvs": { - "description": "if prev_kv is set in the request, the previous key-value pairs will be returned.", - "type": "array", - "items": { - "$ref": "#/definitions/mvccpbKeyValue" - } - } - } - }, - "etcdserverpbDowngradeRequest": { - "type": "object", - "properties": { - "action": { - "description": "action is the kind of downgrade request to issue. The action may\nVALIDATE the target version, DOWNGRADE the cluster version,\nor CANCEL the current downgrading job.", - "$ref": "#/definitions/DowngradeRequestDowngradeAction" - }, - "version": { - "description": "version is the target version to downgrade.", - "type": "string" - } - } - }, - "etcdserverpbDowngradeResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "version": { - "description": "version is the current cluster version.", - "type": "string" - } - } - }, - "etcdserverpbHashKVRequest": { - "type": "object", - "properties": { - "revision": { - "description": "revision is the key-value store revision for the hash operation.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbHashKVResponse": { - "type": "object", - "properties": { - "compact_revision": { - "description": "compact_revision is the compacted revision of key-value store when hash begins.", - "type": "string", - "format": "int64" - }, - "hash": { - "description": "hash is the hash value computed from the responding member's MVCC keys up to a given revision.", - "type": "integer", - "format": "int64" - }, - "hash_revision": { - "description": "hash_revision is the revision up to which the hash is calculated.", - "type": "string", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbHashRequest": { - "type": "object" - }, - "etcdserverpbHashResponse": { - "type": "object", - "properties": { - "hash": { - "description": "hash is the hash value computed from the responding member's KV's backend.", - "type": "integer", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbLeaseGrantRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.", - "type": "string", - "format": "int64" - }, - "TTL": { - "description": "TTL is the advisory time-to-live in seconds. Expired lease will return -1.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbLeaseGrantResponse": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID for the granted lease.", - "type": "string", - "format": "int64" - }, - "TTL": { - "description": "TTL is the server chosen lease time-to-live in seconds.", - "type": "string", - "format": "int64" - }, - "error": { - "type": "string" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbLeaseKeepAliveRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID for the lease to keep alive.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbLeaseKeepAliveResponse": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID from the keep alive request.", - "type": "string", - "format": "int64" - }, - "TTL": { - "description": "TTL is the new time-to-live for the lease.", - "type": "string", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbLeaseLeasesRequest": { - "type": "object" - }, - "etcdserverpbLeaseLeasesResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "leases": { - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbLeaseStatus" - } - } - } - }, - "etcdserverpbLeaseRevokeRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbLeaseRevokeResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbLeaseStatus": { - "type": "object", - "properties": { - "ID": { - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbLeaseTimeToLiveRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID for the lease.", - "type": "string", - "format": "int64" - }, - "keys": { - "description": "keys is true to query all the keys attached to this lease.", - "type": "boolean" - } - } - }, - "etcdserverpbLeaseTimeToLiveResponse": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the lease ID from the keep alive request.", - "type": "string", - "format": "int64" - }, - "TTL": { - "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.", - "type": "string", - "format": "int64" - }, - "grantedTTL": { - "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal.", - "type": "string", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "keys": { - "description": "Keys is the list of keys attached to this lease.", - "type": "array", - "items": { - "type": "string", - "format": "byte" - } - } - } - }, - "etcdserverpbMember": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the member ID for this member.", - "type": "string", - "format": "uint64" - }, - "clientURLs": { - "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.", - "type": "array", - "items": { - "type": "string" - } - }, - "isLearner": { - "description": "isLearner indicates if the member is raft learner.", - "type": "boolean" - }, - "name": { - "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string.", - "type": "string" - }, - "peerURLs": { - "description": "peerURLs is the list of URLs the member exposes to the cluster for communication.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbMemberAddRequest": { - "type": "object", - "properties": { - "isLearner": { - "description": "isLearner indicates if the added member is raft learner.", - "type": "boolean" - }, - "peerURLs": { - "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbMemberAddResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "member": { - "description": "member is the member information for the added member.", - "$ref": "#/definitions/etcdserverpbMember" - }, - "members": { - "description": "members is a list of all members after adding the new member.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbMember" - } - } - } - }, - "etcdserverpbMemberListRequest": { - "type": "object", - "properties": { - "linearizable": { - "type": "boolean" - } - } - }, - "etcdserverpbMemberListResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "members": { - "description": "members is a list of all members associated with the cluster.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbMember" - } - } - } - }, - "etcdserverpbMemberPromoteRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the member ID of the member to promote.", - "type": "string", - "format": "uint64" - } - } - }, - "etcdserverpbMemberPromoteResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "members": { - "description": "members is a list of all members after promoting the member.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbMember" - } - } - } - }, - "etcdserverpbMemberRemoveRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the member ID of the member to remove.", - "type": "string", - "format": "uint64" - } - } - }, - "etcdserverpbMemberRemoveResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "members": { - "description": "members is a list of all members after removing the member.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbMember" - } - } - } - }, - "etcdserverpbMemberUpdateRequest": { - "type": "object", - "properties": { - "ID": { - "description": "ID is the member ID of the member to update.", - "type": "string", - "format": "uint64" - }, - "peerURLs": { - "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "etcdserverpbMemberUpdateResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "members": { - "description": "members is a list of all members after updating the member.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbMember" - } - } - } - }, - "etcdserverpbMoveLeaderRequest": { - "type": "object", - "properties": { - "targetID": { - "description": "targetID is the node ID for the new leader.", - "type": "string", - "format": "uint64" - } - } - }, - "etcdserverpbMoveLeaderResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "etcdserverpbPutRequest": { - "type": "object", - "properties": { - "ignore_lease": { - "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist.", - "type": "boolean" - }, - "ignore_value": { - "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist.", - "type": "boolean" - }, - "key": { - "description": "key is the key, in bytes, to put into the key-value store.", - "type": "string", - "format": "byte" - }, - "lease": { - "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease.", - "type": "string", - "format": "int64" - }, - "prev_kv": { - "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response.", - "type": "boolean" - }, - "value": { - "description": "value is the value, in bytes, to associate with the key in the key-value store.", - "type": "string", - "format": "byte" - } - } - }, - "etcdserverpbPutResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "prev_kv": { - "description": "if prev_kv is set in the request, the previous key-value pair will be returned.", - "$ref": "#/definitions/mvccpbKeyValue" - } - } - }, - "etcdserverpbRangeRequest": { - "type": "object", - "properties": { - "count_only": { - "description": "count_only when set returns only the count of the keys in the range.", - "type": "boolean" - }, - "key": { - "description": "key is the first key for the range. If range_end is not given, the request only looks up key.", - "type": "string", - "format": "byte" - }, - "keys_only": { - "description": "keys_only when set returns only the keys and not the values.", - "type": "boolean" - }, - "limit": { - "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit.", - "type": "string", - "format": "int64" - }, - "max_create_revision": { - "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away.", - "type": "string", - "format": "int64" - }, - "max_mod_revision": { - "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away.", - "type": "string", - "format": "int64" - }, - "min_create_revision": { - "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create revisions will be filtered away.", - "type": "string", - "format": "int64" - }, - "min_mod_revision": { - "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away.", - "type": "string", - "format": "int64" - }, - "range_end": { - "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys.", - "type": "string", - "format": "byte" - }, - "revision": { - "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response.", - "type": "string", - "format": "int64" - }, - "serializable": { - "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster.", - "type": "boolean" - }, - "sort_order": { - "description": "sort_order is the order for returned sorted results.", - "$ref": "#/definitions/RangeRequestSortOrder" - }, - "sort_target": { - "description": "sort_target is the key-value field to use for sorting.", - "$ref": "#/definitions/RangeRequestSortTarget" - } - } - }, - "etcdserverpbRangeResponse": { - "type": "object", - "properties": { - "count": { - "description": "count is set to the number of keys within the range when requested.", - "type": "string", - "format": "int64" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "kvs": { - "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested.", - "type": "array", - "items": { - "$ref": "#/definitions/mvccpbKeyValue" - } - }, - "more": { - "description": "more indicates if there are more keys to return in the requested range.", - "type": "boolean" - } - } - }, - "etcdserverpbRequestOp": { - "type": "object", - "properties": { - "request_delete_range": { - "$ref": "#/definitions/etcdserverpbDeleteRangeRequest" - }, - "request_put": { - "$ref": "#/definitions/etcdserverpbPutRequest" - }, - "request_range": { - "$ref": "#/definitions/etcdserverpbRangeRequest" - }, - "request_txn": { - "$ref": "#/definitions/etcdserverpbTxnRequest" - } - } - }, - "etcdserverpbResponseHeader": { - "type": "object", - "properties": { - "cluster_id": { - "description": "cluster_id is the ID of the cluster which sent the response.", - "type": "string", - "format": "uint64" - }, - "member_id": { - "description": "member_id is the ID of the member which sent the response.", - "type": "string", - "format": "uint64" - }, - "raft_term": { - "description": "raft_term is the raft term when the request was applied.", - "type": "string", - "format": "uint64" - }, - "revision": { - "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbResponseOp": { - "type": "object", - "properties": { - "response_delete_range": { - "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" - }, - "response_put": { - "$ref": "#/definitions/etcdserverpbPutResponse" - }, - "response_range": { - "$ref": "#/definitions/etcdserverpbRangeResponse" - }, - "response_txn": { - "$ref": "#/definitions/etcdserverpbTxnResponse" - } - } - }, - "etcdserverpbSnapshotRequest": { - "type": "object" - }, - "etcdserverpbSnapshotResponse": { - "type": "object", - "properties": { - "blob": { - "description": "blob contains the next chunk of the snapshot in the snapshot stream.", - "type": "string", - "format": "byte" - }, - "header": { - "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot.", - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "remaining_bytes": { - "type": "string", - "format": "uint64", - "title": "remaining_bytes is the number of blob bytes to be sent after this message" - }, - "version": { - "description": "local version of server that created the snapshot.\nIn cluster with binaries with different version, each cluster can return different result.\nInforms which etcd server version should be used when restoring the snapshot.", - "type": "string" - } - } - }, - "etcdserverpbStatusRequest": { - "type": "object" - }, - "etcdserverpbStatusResponse": { - "type": "object", - "properties": { - "dbSize": { - "description": "dbSize is the size of the backend database physically allocated, in bytes, of the responding member.", - "type": "string", - "format": "int64" - }, - "dbSizeInUse": { - "description": "dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.", - "type": "string", - "format": "int64" - }, - "errors": { - "description": "errors contains alarm/health information and status.", - "type": "array", - "items": { - "type": "string" - } - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "isLearner": { - "description": "isLearner indicates if the member is raft learner.", - "type": "boolean" - }, - "leader": { - "description": "leader is the member ID which the responding member believes is the current leader.", - "type": "string", - "format": "uint64" - }, - "raftAppliedIndex": { - "description": "raftAppliedIndex is the current raft applied index of the responding member.", - "type": "string", - "format": "uint64" - }, - "raftIndex": { - "description": "raftIndex is the current raft committed index of the responding member.", - "type": "string", - "format": "uint64" - }, - "raftTerm": { - "description": "raftTerm is the current raft term of the responding member.", - "type": "string", - "format": "uint64" - }, - "storageVersion": { - "description": "storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.", - "type": "string" - }, - "version": { - "description": "version is the cluster protocol version used by the responding member.", - "type": "string" - } - } - }, - "etcdserverpbTxnRequest": { - "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.", - "type": "object", - "properties": { - "compare": { - "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbCompare" - } - }, - "failure": { - "description": "failure is a list of requests which will be applied when compare evaluates to false.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbRequestOp" - } - }, - "success": { - "description": "success is a list of requests which will be applied when compare evaluates to true.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbRequestOp" - } - } - } - }, - "etcdserverpbTxnResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "responses": { - "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false.", - "type": "array", - "items": { - "$ref": "#/definitions/etcdserverpbResponseOp" - } - }, - "succeeded": { - "description": "succeeded is set to true if the compare evaluated to true or false otherwise.", - "type": "boolean" - } - } - }, - "etcdserverpbWatchCancelRequest": { - "type": "object", - "properties": { - "watch_id": { - "description": "watch_id is the watcher id to cancel so that no more events are transmitted.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbWatchCreateRequest": { - "type": "object", - "properties": { - "filters": { - "description": "filters filter the events at server side before it sends back to the watcher.", - "type": "array", - "items": { - "$ref": "#/definitions/WatchCreateRequestFilterType" - } - }, - "fragment": { - "description": "fragment enables splitting large revisions into multiple watch responses.", - "type": "boolean" - }, - "key": { - "description": "key is the key to register for watching.", - "type": "string", - "format": "byte" - }, - "prev_kv": { - "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned.", - "type": "boolean" - }, - "progress_notify": { - "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load.", - "type": "boolean" - }, - "range_end": { - "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched.", - "type": "string", - "format": "byte" - }, - "start_revision": { - "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\".", - "type": "string", - "format": "int64" - }, - "watch_id": { - "description": "If watch_id is provided and non-zero, it will be assigned to this watcher.\nSince creating a watcher in etcd is not a synchronous operation,\nthis can be used ensure that ordering is correct when creating multiple\nwatchers on the same stream. Creating a watcher with an ID already in\nuse on the stream will cause an error to be returned.", - "type": "string", - "format": "int64" - } - } - }, - "etcdserverpbWatchProgressRequest": { - "description": "Requests the a watch stream progress status be sent in the watch response stream as soon as\npossible.", - "type": "object" - }, - "etcdserverpbWatchRequest": { - "type": "object", - "properties": { - "cancel_request": { - "$ref": "#/definitions/etcdserverpbWatchCancelRequest" - }, - "create_request": { - "$ref": "#/definitions/etcdserverpbWatchCreateRequest" - }, - "progress_request": { - "$ref": "#/definitions/etcdserverpbWatchProgressRequest" - } - } - }, - "etcdserverpbWatchResponse": { - "type": "object", - "properties": { - "cancel_reason": { - "description": "cancel_reason indicates the reason for canceling the watcher.", - "type": "string" - }, - "canceled": { - "description": "canceled is set to true if the response is for a cancel watch request.\nNo further events will be sent to the canceled watcher.", - "type": "boolean" - }, - "compact_revision": { - "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store.\n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again.", - "type": "string", - "format": "int64" - }, - "created": { - "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id.", - "type": "boolean" - }, - "events": { - "type": "array", - "items": { - "$ref": "#/definitions/mvccpbEvent" - } - }, - "fragment": { - "description": "framgment is true if large watch response was split over multiple responses.", - "type": "boolean" - }, - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "watch_id": { - "description": "watch_id is the ID of the watcher that corresponds to the response.", - "type": "string", - "format": "int64" - } - } - }, - "mvccpbEvent": { - "type": "object", - "properties": { - "kv": { - "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion.", - "$ref": "#/definitions/mvccpbKeyValue" - }, - "prev_kv": { - "description": "prev_kv holds the key-value pair before the event happens.", - "$ref": "#/definitions/mvccpbKeyValue" - }, - "type": { - "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted.", - "$ref": "#/definitions/EventEventType" - } - } - }, - "mvccpbKeyValue": { - "type": "object", - "properties": { - "create_revision": { - "description": "create_revision is the revision of last creation on this key.", - "type": "string", - "format": "int64" - }, - "key": { - "description": "key is the key in bytes. An empty key is not allowed.", - "type": "string", - "format": "byte" - }, - "lease": { - "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key.", - "type": "string", - "format": "int64" - }, - "mod_revision": { - "description": "mod_revision is the revision of last modification on this key.", - "type": "string", - "format": "int64" - }, - "value": { - "description": "value is the value held by the key, in bytes.", - "type": "string", - "format": "byte" - }, - "version": { - "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version.", - "type": "string", - "format": "int64" - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "code": { - "type": "integer", - "format": "int32" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - }, - "error": { - "type": "string" - }, - "message": { - "type": "string" - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - }, - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "http_status": { - "type": "string" - }, - "message": { - "type": "string" - } - } - } - }, - "securityDefinitions": { - "ApiKey": { - "type": "apiKey", - "name": "Authorization", - "in": "header" - } - }, - "security": [ - { - "ApiKey": [] - } - ] -} \ No newline at end of file diff --git a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json b/Documentation/dev-guide/apispec/swagger/v3election.swagger.json deleted file mode 100644 index 7238a44e792..00000000000 --- a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json +++ /dev/null @@ -1,427 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "server/etcdserver/api/v3election/v3electionpb/v3election.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v3/election/campaign": { - "post": { - "summary": "Campaign waits to acquire leadership in an election, returning a LeaderKey\nrepresenting the leadership if successful. The LeaderKey can then be used\nto issue new values on the election, transactionally guard API requests on\nleadership still being held, and resign from the election.", - "operationId": "Election_Campaign", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3electionpbCampaignResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3electionpbCampaignRequest" - } - } - ], - "tags": [ - "Election" - ] - } - }, - "/v3/election/leader": { - "post": { - "summary": "Leader returns the current election proclamation, if any.", - "operationId": "Election_Leader", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3electionpbLeaderResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3electionpbLeaderRequest" - } - } - ], - "tags": [ - "Election" - ] - } - }, - "/v3/election/observe": { - "post": { - "summary": "Observe streams election proclamations in-order as made by the election's\nelected leaders.", - "operationId": "Election_Observe", - "responses": { - "200": { - "description": "A successful response.(streaming responses)", - "schema": { - "type": "object", - "properties": { - "result": { - "$ref": "#/definitions/v3electionpbLeaderResponse" - }, - "error": { - "$ref": "#/definitions/runtimeStreamError" - } - }, - "title": "Stream result of v3electionpbLeaderResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3electionpbLeaderRequest" - } - } - ], - "tags": [ - "Election" - ] - } - }, - "/v3/election/proclaim": { - "post": { - "summary": "Proclaim updates the leader's posted value with a new value.", - "operationId": "Election_Proclaim", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3electionpbProclaimResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3electionpbProclaimRequest" - } - } - ], - "tags": [ - "Election" - ] - } - }, - "/v3/election/resign": { - "post": { - "summary": "Resign releases election leadership so other campaigners may acquire\nleadership on the election.", - "operationId": "Election_Resign", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3electionpbResignResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3electionpbResignRequest" - } - } - ], - "tags": [ - "Election" - ] - } - } - }, - "definitions": { - "etcdserverpbResponseHeader": { - "type": "object", - "properties": { - "cluster_id": { - "type": "string", - "format": "uint64", - "description": "cluster_id is the ID of the cluster which sent the response." - }, - "member_id": { - "type": "string", - "format": "uint64", - "description": "member_id is the ID of the member which sent the response." - }, - "revision": { - "type": "string", - "format": "int64", - "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number." - }, - "raft_term": { - "type": "string", - "format": "uint64", - "description": "raft_term is the raft term when the request was applied." - } - } - }, - "mvccpbKeyValue": { - "type": "object", - "properties": { - "key": { - "type": "string", - "format": "byte", - "description": "key is the key in bytes. An empty key is not allowed." - }, - "create_revision": { - "type": "string", - "format": "int64", - "description": "create_revision is the revision of last creation on this key." - }, - "mod_revision": { - "type": "string", - "format": "int64", - "description": "mod_revision is the revision of last modification on this key." - }, - "version": { - "type": "string", - "format": "int64", - "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version." - }, - "value": { - "type": "string", - "format": "byte", - "description": "value is the value held by the key, in bytes." - }, - "lease": { - "type": "string", - "format": "int64", - "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "runtimeStreamError": { - "type": "object", - "properties": { - "grpc_code": { - "type": "integer", - "format": "int32" - }, - "http_code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "http_status": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "v3electionpbCampaignRequest": { - "type": "object", - "properties": { - "name": { - "type": "string", - "format": "byte", - "description": "name is the election's identifier for the campaign." - }, - "lease": { - "type": "string", - "format": "int64", - "description": "lease is the ID of the lease attached to leadership of the election. If the\nlease expires or is revoked before resigning leadership, then the\nleadership is transferred to the next campaigner, if any." - }, - "value": { - "type": "string", - "format": "byte", - "description": "value is the initial proclaimed value set when the campaigner wins the\nelection." - } - } - }, - "v3electionpbCampaignResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "leader": { - "$ref": "#/definitions/v3electionpbLeaderKey", - "description": "leader describes the resources used for holding leadereship of the election." - } - } - }, - "v3electionpbLeaderKey": { - "type": "object", - "properties": { - "name": { - "type": "string", - "format": "byte", - "description": "name is the election identifier that correponds to the leadership key." - }, - "key": { - "type": "string", - "format": "byte", - "description": "key is an opaque key representing the ownership of the election. If the key\nis deleted, then leadership is lost." - }, - "rev": { - "type": "string", - "format": "int64", - "description": "rev is the creation revision of the key. It can be used to test for ownership\nof an election during transactions by testing the key's creation revision\nmatches rev." - }, - "lease": { - "type": "string", - "format": "int64", - "description": "lease is the lease ID of the election leader." - } - } - }, - "v3electionpbLeaderRequest": { - "type": "object", - "properties": { - "name": { - "type": "string", - "format": "byte", - "description": "name is the election identifier for the leadership information." - } - } - }, - "v3electionpbLeaderResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "kv": { - "$ref": "#/definitions/mvccpbKeyValue", - "description": "kv is the key-value pair representing the latest leader update." - } - } - }, - "v3electionpbProclaimRequest": { - "type": "object", - "properties": { - "leader": { - "$ref": "#/definitions/v3electionpbLeaderKey", - "description": "leader is the leadership hold on the election." - }, - "value": { - "type": "string", - "format": "byte", - "description": "value is an update meant to overwrite the leader's current value." - } - } - }, - "v3electionpbProclaimResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - }, - "v3electionpbResignRequest": { - "type": "object", - "properties": { - "leader": { - "$ref": "#/definitions/v3electionpbLeaderKey", - "description": "leader is the leadership to relinquish by resignation." - } - } - }, - "v3electionpbResignResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - } - } -} diff --git a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json b/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json deleted file mode 100644 index 5a45bdd9b2a..00000000000 --- a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json +++ /dev/null @@ -1,187 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "server/etcdserver/api/v3lock/v3lockpb/v3lock.proto", - "version": "version not set" - }, - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "paths": { - "/v3/lock/lock": { - "post": { - "summary": "Lock acquires a distributed shared lock on a given named lock.\nOn success, it will return a unique key that exists so long as the\nlock is held by the caller. This key can be used in conjunction with\ntransactions to safely ensure updates to etcd only occur while holding\nlock ownership. The lock is held until Unlock is called on the key or the\nlease associate with the owner expires.", - "operationId": "Lock_Lock", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3lockpbLockResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3lockpbLockRequest" - } - } - ], - "tags": [ - "Lock" - ] - } - }, - "/v3/lock/unlock": { - "post": { - "summary": "Unlock takes a key returned by Lock and releases the hold on lock. The\nnext Lock caller waiting for the lock will then be woken up and given\nownership of the lock.", - "operationId": "Lock_Unlock", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v3lockpbUnlockResponse" - } - }, - "default": { - "description": "An unexpected error response.", - "schema": { - "$ref": "#/definitions/runtimeError" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/v3lockpbUnlockRequest" - } - } - ], - "tags": [ - "Lock" - ] - } - } - }, - "definitions": { - "etcdserverpbResponseHeader": { - "type": "object", - "properties": { - "cluster_id": { - "type": "string", - "format": "uint64", - "description": "cluster_id is the ID of the cluster which sent the response." - }, - "member_id": { - "type": "string", - "format": "uint64", - "description": "member_id is the ID of the member which sent the response." - }, - "revision": { - "type": "string", - "format": "int64", - "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number." - }, - "raft_term": { - "type": "string", - "format": "uint64", - "description": "raft_term is the raft term when the request was applied." - } - } - }, - "protobufAny": { - "type": "object", - "properties": { - "type_url": { - "type": "string" - }, - "value": { - "type": "string", - "format": "byte" - } - } - }, - "runtimeError": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "code": { - "type": "integer", - "format": "int32" - }, - "message": { - "type": "string" - }, - "details": { - "type": "array", - "items": { - "$ref": "#/definitions/protobufAny" - } - } - } - }, - "v3lockpbLockRequest": { - "type": "object", - "properties": { - "name": { - "type": "string", - "format": "byte", - "description": "name is the identifier for the distributed shared lock to be acquired." - }, - "lease": { - "type": "string", - "format": "int64", - "description": "lease is the ID of the lease that will be attached to ownership of the\nlock. If the lease expires or is revoked and currently holds the lock,\nthe lock is automatically released. Calls to Lock with the same lease will\nbe treated as a single acquisition; locking twice with the same lease is a\nno-op." - } - } - }, - "v3lockpbLockResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "key": { - "type": "string", - "format": "byte", - "description": "key is a key that will exist on etcd for the duration that the Lock caller\nowns the lock. Users should not modify this key or the lock may exhibit\nundefined behavior." - } - } - }, - "v3lockpbUnlockRequest": { - "type": "object", - "properties": { - "key": { - "type": "string", - "format": "byte", - "description": "key is the lock ownership key granted by Lock." - } - } - }, - "v3lockpbUnlockResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - } - } - } - } -} diff --git a/Documentation/postmortems/v3.5-data-inconsistency.md b/Documentation/postmortems/v3.5-data-inconsistency.md deleted file mode 100644 index 718097657b4..00000000000 --- a/Documentation/postmortems/v3.5-data-inconsistency.md +++ /dev/null @@ -1,142 +0,0 @@ -# v3.5 data inconsistency postmortem - -| | | -|---------|------------| -| Authors | serathius@ | -| Date | 2022-04-20 | -| Status | published | - -## Summary - -| | | -|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Summary | Code refactor in v3.5.0 resulted in consistent index not being saved atomically. Independent crash could lead to committed transactions are not reflected on all the members. | -| Impact | No user reported problems in production as triggering the issue required frequent crashes, however issue was critical enough to motivate a public statement. Main impact comes from loosing user trust into etcd reliability. | - -## Background - -etcd v3 state is preserved on disk in two forms write ahead log (WAL) and database state (DB). -etcd v3.5 also still maintains v2 state, however it's deprecated and not relevant to the issue in this postmortem. - -WAL stores history of changes for etcd state and database represents state at one point. -To know which point of history database is representing, it stores consistent index (CI). -It's a special metadata field that points to last entry in WAL that it has seen. - -When etcd is updating database state, it replays entries from WAL and updates the consistent index to point to new entry. -This operation is required to be [atomic](https://en.wikipedia.org/wiki/Atomic_commit). -A partial fail would mean that database and WAL would no longer match, so some entries would be either skipped (if only CI is updated) or executed twice (if only changes are applied). -This is especially important for distributed system like etcd, where there are multiple cluster members, each applying the WAL entries to their database. -Correctness of the system depends on assumption that every member of the cluster, while replying WAL entries, will reach the same state. - -## Root cause - -To simplify managing consistency index, etcd has introduced backend hooks in https://github.com/etcd-io/etcd/pull/12855. -Goal was to ensure that consistency index is always updated, by automatically triggering update during commit. -Implementation was as follows, before applying the WAL entries, etcd updated in memory value of consistent index. -As part of transaction commit process, a database hook would read the value of consistent index and store it to database. - -Problem is that in memory value of consistent index is shared, and there might be other in flight transactions apart from serial WAL apply flow. -So if we imagine scenario: -1. etcd server starts an apply workflow, and it just sets a new consistent index value. -2. The periodic commit is triggered, and it executes the backend hook and saves consistent index from apply workflow. -3. etcd server finished an apply workflow, saves new changes and saves same value of consistent index again. - -Between second and third point there is a very small window where consistent index is increased without applying entry from WAL. - -## Trigger - -If etcd crashed after consistency index is saved, but before to apply workflow finished it would lead to data inconsistency. -When recovering the data etcd would skip executing changes from failed apply workflow, assuming they have been already executed. - -This follows the issue reports and code used to reproduce the issue where trigger was etcd crashing under high request load. -Etcd v3.5.0 was released with bug (https://github.com/etcd-io/etcd/pull/13505) that could cause etcd to crash that was fixed in v3.5.1. -Apart from that all reports described etcd running under high memory pressure, causing it to go out of memory from time to time. -Reproduction run etcd under high stress and randomly killed one of the members using SIGKILL signal (not recoverable immediate process death). - -## Detection - -For single member cluster it is totally undetectable. -There is no mechanism or tool for verifying that state database matches WAL. - -In cluster with multiple members it would mean that one of the members that crashed, will missing changes from failed apply workflow. -This means that it will have different state of database and will return different hash via `HashKV` grpc call. - -There is an automatic mechanism to detect data inconsistency. -It can be executed during etcd start via `--experimental-initial-corrupt-check` and periodically via `--experimental-corrupt-check-time`. -Both checks however have a flaw, they depend on `HashKV` grpc method, which might fail causing the check to pass. - -In multi member etcd cluster, each member can run with different performance and be at different stage of applying the WAL log. -Comparing database hashes between multiple etcd members requires all hashes to be calculated at the same change. -This is done by requesting hash for the same `revision` (version of key value store). -However, it will not work if the provided revision is not available on the members. -This can happen on very slow members, or in cases where corruption has lead revision numbers to diverge. - -This means that for this issue, the corrupt check is only reliable during etcd start just after etcd crashes. - -## Impact - -We are not aware any cases of users reporting a data corruption in production environment. - -However, issue was critical enough to motivate a public statement. -Main impact comes from loosing user trust into etcd reliability. - -## Lessons learned - -### What went well - -* Multiple maintainers were able to work effectively on reproducing and fixing the issue. As they are in different timezones, there was always someone working on the issue. -* When fixing the main data inconsistency we have found multiple other edge cases that could lead to data corruption (https://github.com/etcd-io/etcd/issues/13514, https://github.com/etcd-io/etcd/issues/13922, https://github.com/etcd-io/etcd/issues/13937). - -### What went wrong - -* No users enable data corruption detection as it is still an experimental feature introduced in v3.3. All reported cases where detected manually making it almost impossible to reproduce. -* etcd has functional tests designed to detect such problems, however they are unmaintained, flaky and are missing crucial scenarios. -* etcd v3.5 release was not qualified as comprehensive as previous ones. Older maintainers run manual qualification process that is no longer known or executed. -* etcd apply code is so complicated that fixing the data inconsistency took almost 2 weeks and multiple tries. Fix needed to be so complicated that we needed to develop automatic validation for it (https://github.com/etcd-io/etcd/pull/13885). -* etcd v3.5 was recommended for production without enough insight on the production adoption. Production ready recommendations based on after some internal feedback... to get diverse usage, but the user's hold on till someone else will discover issues. - -### Where we got lucky - -* We reproduced the issue using etcd functional only because weird partition setup on workstation. Functional tests store etcd data under `/tmp` usually mounted to in memory filesystem. Problem was reproduced only because one of the maintainers has `/tmp` mounted to standard disk. - -## Action items - -Action items should directly address items listed in lessons learned. -We should double down on things that went well, fix things that went wrong, and stop depending on luck. - -Action fall under three types, and we should have at least one item per type. Types: -* Prevent - Prevent similar issues from occurring. In this case, what testing we should introduce to find data inconsistency issues before release, preventing publishing broken release. -* Detect - Be more effective in detecting when similar issues occur. In this case, improve mechanism to detect data inconsistency issue so users will be automatically informed. -* Mitigate - Reduce time to recovery for users. In this case, how we ensure that users are able to quickly fix data inconsistency. - -Actions should not be restricted to fixing the immediate issues and also propose long term strategic improvements. -To reflect this action items should have assigned priority: -* P0 - Critical for reliability of the v3.5 release. Should be prioritized this over all other work and backported to v3.5. -* P1 - Important for long term success of the project. Blocks v3.6 release. -* P2 - Stretch goals that would be nice to have for v3.6, however should not be blocking. - -| Action Item | Type | Priority | Bug | -|-------------------------------------------------------------------------------------|----------|----------|----------------------------------------------| -| etcd testing can reproduce historical data inconsistency issues | Prevent | P0 | https://github.com/etcd-io/etcd/issues/14045 | -| etcd detects data corruption by default | Detect | P0 | https://github.com/etcd-io/etcd/issues/14039 | -| etcd testing is high quality, easy to maintain and expand | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13637 | -| etcd apply code should be easy to understand and validate correctness | Prevent | P1 | | -| Critical etcd features are not abandoned when contributors move on | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13775 | -| etcd is continuously qualified with failure injection | Prevent | P1 | | -| etcd can reliably detect data corruption (hash is linearizable) | Detect | P1 | | -| etcd checks consistency of snapshots sent between leader and followers | Detect | P1 | https://github.com/etcd-io/etcd/issues/13973 | -| etcd recovery from data inconsistency procedures are documented and tested | Mitigate | P1 | | -| etcd can imminently detect and recover from data corruption (implement Merkle root) | Mitigate | P2 | https://github.com/etcd-io/etcd/issues/13839 | - -## Timeline - -| Date | Event | -|------------|-----------------------------------------------------------------------------------------------------------------------| -| 2021-05-08 | Pull request that caused data corruption was merged - https://github.com/etcd-io/etcd/pull/12855 | -| 2021-06-16 | Release v3.5.0 with data corruption was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.0 | -| 2021-12-01 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13514 | -| 2021-01-28 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13654 | -| 2022-03-08 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13766 | -| 2022-03-25 | Corruption confirmed by one of the maintainers - https://github.com/etcd-io/etcd/issues/13766#issuecomment-1078897588 | -| 2022-03-29 | Statement about the corruption was sent to etcd-dev@googlegroups.com and dev@kubernetes.io | -| 2022-04-24 | Release v3.5.3 with fix was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.3 | diff --git a/GOVERNANCE.md b/GOVERNANCE.md deleted file mode 100644 index 8b7cad3b5aa..00000000000 --- a/GOVERNANCE.md +++ /dev/null @@ -1,101 +0,0 @@ -# etcd Governance - -## Principles - -The etcd community adheres to the following principles: - -- Open: etcd is open source. -- Welcoming and respectful: See [Code of Conduct](code-of-conduct.md). -- Transparent and accessible: Changes to the etcd code repository and CNCF related -activities (e.g. level, involvement, etc) are done in public. -- Merit: Ideas and contributions are accepted according to their technical merit for -the betterment of the project. For specific guidance on practical contribution steps -please see [CONTRIBUTING](./CONTRIBUTING.md) guide. - -## Maintainers - -Maintainers are first and foremost contributors that have shown they -are committed to the long term success of a project. Maintainership is about building -trust with the current maintainers of the project and being a person that they can -depend on to make decisions in the best interest of the project in a consistent manner. -The maintainers role can be a top-level or restricted to certain package/feature -depending upon their commitment in fulfilling the expected responsibilities as explained -below. - -### Top-level maintainer - -- Running the etcd release processes -- Ownership of test and debug infrastructure -- Triage GitHub issues to keep the issue count low (goal: under 100) -- Regularly review GitHub pull requests across all pkgs -- Providing cross pkg design review -- Monitor email aliases -- Participate when called upon in the [security disclosure and release process](security/README.md) -- General project maintenance - -### Package/feature maintainer - -- Ownership of test and debug failures in a pkg/feature -- Resolution of bugs triaged to a package/feature -- Regularly review pull requests to the pkg subsystem - -### Nomination and retiring of maintainers - -[Maintainers](./MAINTAINERS) file on the `main` branch reflects the latest -state of project maintainers. List of existing maintainers should be kept up to -date by existing maintainers to properly reflect community health and to gain -better understanding of recruiting need for new maintainers. Changes to list of -maintainers should be done by opening a pull request and CCing all the existing -maintainers. - -Contributors who are interested in becoming a maintainer, if performing relevant -responsibilities, should discuss their interest with the existing maintainers. -New maintainers must be nominated by an existing maintainer and must be elected -by a supermajority of maintainers with a fallback on lazy consensus after three -business weeks inactive voting period and as long as two maintainers are on board. - -Life priorities, interests, and passions can change. Maintainers can retire and -move to the [emeritus status](./README.md#etcd-emeritus-maintainers). If a -maintainer needs to step down, they should inform other maintainers, if possible, -help find someone to pick up the related work. At the very least, ensure the -related work can be continued. Afterward they can remove themselves from list of -existing maintainers. - -If a maintainer is not been performing their duties for period of 12 months, -they can be removed by other maintainers. In that case inactive maintainer will -be first notified via an email. If situation doesn't improve, they will be -removed. If an emeritus maintainer wants to regain an active role, they can do -so by renewing their contributions. Active maintainers should welcome such a move. -Retiring of other maintainers or regaining the status should require approval -of at least two active maintainers. - -## Reviewers - -[Reviewers](./MAINTAINERS) are contributors who have demonstrated greater skill in -reviewing the code contribution from other contributors. Their LGTM counts towards -merging a code change into the project. A reviewer is generally on the ladder towards -maintainership. New reviewers must be nominated by an existing maintainer and must be -elected by a supermajority of maintainers with a fallback on lazy consensus after three -business weeks inactive voting period and as long as two maintainers are on board. -Reviewers can be removed by a supermajority of the maintainers or can resign by notifying -the maintainers. - -## Decision making process - -Decisions are built on consensus between maintainers publicly. Proposals and ideas -can either be submitted for agreement via a GitHub issue or PR, or by sending an email -to `etcd-maintainers@googlegroups.com`. - -## Conflict resolution - -In general, we prefer that technical issues and maintainer membership are amicably -worked out between the persons involved. However, any technical dispute that has -reached an impasse with a subset of the community, any contributor may open a GitHub -issue or PR or send an email to `etcd-maintainers@googlegroups.com`. If the -maintainers themselves cannot decide an issue, the issue will be resolved by a -supermajority of the maintainers with a fallback on lazy consensus after three business -weeks inactive voting period and as long as two maintainers are on board. - -## Changes in Governance - -Changes in project governance could be initiated by opening a GitHub PR. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index 253c562fda6..00000000000 --- a/MAINTAINERS +++ /dev/null @@ -1,20 +0,0 @@ -# The official list of maintainers and reviewers for the project maintenance. -# -# Refer to the GOVERNANCE.md for description of the roles. -# -# Names should be added to this file like so: -# Individual's name (@GITHUB_HANDLE) pkg:* -# Individual's name (@GITHUB_HANDLE) pkg:* -# -# Please keep the list sorted. - -# MAINTAINERS -Benjamin Wang (ahrtr@) pkg:* -Hitoshi Mitake (@mitake) pkg:* -Marek Siarkowicz (@serathius) pkg:* -Piotr Tabor (@ptabor) pkg:* -Sahdev Zala (@spzala) pkg:* -Sam Batschelet (@hexfusion) pkg:* -Tobias Grieger (@tbg) pkg:go.etcd.io/etcd/raft - -# REVIEWERS diff --git a/Makefile b/Makefile deleted file mode 100644 index c9d0ac44247..00000000000 --- a/Makefile +++ /dev/null @@ -1,172 +0,0 @@ -.PHONY: build -build: - GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v" ./scripts/build.sh - ./bin/etcd --version - ./bin/etcdctl version - ./bin/etcdutl version - -.PHONY: tools -tools: - GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v" ./scripts/build_tools.sh - -# Tests - -GO_TEST_FLAGS?= - -.PHONY: test -test: - PASSES="unit integration release e2e" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: test-unit -test-unit: - PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: test-integration -test-integration: - PASSES="integration" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: test-e2e -test-e2e: build - PASSES="e2e" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: test-e2e-release -test-e2e-release: build - PASSES="release e2e" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: test-linearizability -test-linearizability: - PASSES="linearizability" ./scripts/test.sh $(GO_TEST_FLAGS) - -.PHONY: fuzz -fuzz: - ./scripts/fuzzing.sh - -# Static analysis - -verify: verify-gofmt verify-bom verify-lint verify-dep verify-shellcheck verify-goword \ - verify-govet verify-license-header verify-receiver-name verify-mod-tidy verify-shellcheck \ - verify-shellws verify-proto-annotations verify-genproto -fix: fix-bom fix-lint - ./scripts/fix.sh - -.PHONY: verify-gofmt -verify-gofmt: - PASSES="gofmt" ./scripts/test.sh - -.PHONY: verify-bom -verify-bom: - PASSES="bom" ./scripts/test.sh - -.PHONY: update-bom -fix-bom: - ./scripts/updatebom.sh - -.PHONY: verify-dep -verify-dep: - PASSES="dep" ./scripts/test.sh - -.PHONY: verify-lint -verify-lint: - golangci-lint run - -.PHONY: update-lint -fix-lint: - golangci-lint run --fix - -.PHONY: verify-shellcheck -verify-shellcheck: - PASSES="shellcheck" ./scripts/test.sh - -.PHONY: verify-goword -verify-goword: - PASSES="goword" ./scripts/test.sh - -.PHONY: verify-govet -verify-govet: - PASSES="govet" ./scripts/test.sh - -.PHONY: verify-license-header -verify-license-header: - PASSES="license_header" ./scripts/test.sh - -.PHONY: verify-receiver-name -verify-receiver-name: - PASSES="receiver_name" ./scripts/test.sh - -.PHONY: verify-mod-tidy -verify-mod-tidy: - PASSES="mod_tidy" ./scripts/test.sh - -.PHONY: verify-shellws -verify-shellws: - PASSES="shellws" ./scripts/test.sh - -.PHONY: verify-proto-annotations -verify-proto-annotations: - PASSES="proto_annotations" ./scripts/test.sh - -.PHONY: verify-genproto -verify-genproto: - PASSES="genproto" ./scripts/test.sh - -# Failpoints - -GOFAIL_VERSION = $(shell cd tools/mod && go list -m -f {{.Version}} go.etcd.io/gofail) - -.PHONY: gofail-enable -gofail-enable: install-gofail - gofail enable server/etcdserver/ server/storage/backend/ server/storage/mvcc/ server/storage/wal/ - cd ./server && go get go.etcd.io/gofail@${GOFAIL_VERSION} - cd ./etcdutl && go get go.etcd.io/gofail@${GOFAIL_VERSION} - cd ./etcdctl && go get go.etcd.io/gofail@${GOFAIL_VERSION} - cd ./tests && go get go.etcd.io/gofail@${GOFAIL_VERSION} - -.PHONY: gofail-disable -gofail-disable: install-gofail - gofail disable server/etcdserver/ server/storage/backend/ server/storage/mvcc/ server/storage/wal/ - cd ./server && go mod tidy - cd ./etcdutl && go mod tidy - cd ./etcdctl && go mod tidy - cd ./tests && go mod tidy - -.PHONY: install-gofail -install-gofail: - cd tools/mod; go install go.etcd.io/gofail@${GOFAIL_VERSION} - -build-failpoints-release-3.5: - rm -rf /tmp/etcd-release-3.5/ - mkdir -p /tmp/etcd-release-3.5/ - cd /tmp/etcd-release-3.5/; \ - git clone --depth 1 --branch release-3.5 https://github.com/etcd-io/etcd.git .; \ - go get go.etcd.io/gofail@${GOFAIL_VERSION}; \ - (cd server; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \ - (cd etcdctl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \ - (cd etcdutl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \ - FAILPOINTS=true ./build; - mkdir -p ./bin - cp /tmp/etcd-release-3.5/bin/etcd ./bin/etcd - -build-failpoints-release-3.4: - rm -rf /tmp/etcd-release-3.4/ - mkdir -p /tmp/etcd-release-3.4/ - cd /tmp/etcd-release-3.4/; \ - git clone --depth 1 --branch release-3.4 https://github.com/etcd-io/etcd.git .; \ - go get go.etcd.io/gofail@${GOFAIL_VERSION}; \ - FAILPOINTS=true ./build; - mkdir -p ./bin - cp /tmp/etcd-release-3.4/bin/etcd ./bin/etcd - -# Cleanup - -clean: - rm -f ./codecov - rm -rf ./covdir - rm -f ./bin/Dockerfile-release* - rm -rf ./bin/etcd* - rm -rf ./default.etcd - rm -rf ./tests/e2e/default.etcd - rm -rf ./release - rm -rf ./coverage/*.err ./coverage/*.out - rm -rf ./tests/e2e/default.proxy - rm -rf ./bin/shellcheck* - find ./ -name "127.0.0.1:*" -o -name "localhost:*" -o -name "*.log" -o -name "agent-*" -o -name "*.coverprofile" -o -name "testname-proxy-*" -delete diff --git a/Procfile b/Procfile index 92ef3763958..bf4c502506d 100644 --- a/Procfile +++ b/Procfile @@ -1,9 +1,7 @@ -# Use goreman to run `go install github.com/mattn/goreman@latest` +# Use goreman to run `go get github.com/mattn/goreman` # Change the path of bin/etcd if etcd is located elsewhere - -etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr -etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr -etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr -#proxy: bin/etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof - -# A learner node can be started using Procfile.learner +etcd1: go run ./etcd_backend/main.go --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof +etcd2: go run ./etcd_backend/main.go --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof +etcd3: go run ./etcd_backend/main.go --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof +# in future, use proxy to listen on 2379 +#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof diff --git a/Procfile.learner b/Procfile.learner deleted file mode 100644 index 1517d3f2be8..00000000000 --- a/Procfile.learner +++ /dev/null @@ -1,12 +0,0 @@ -# Use goreman to run `go install github.com/mattn/goreman@latest` - -# 1. Start the cluster using Procfile -# 2. Add learner node to the cluster -# % etcdctl member add infra4 --peer-urls="http://127.0.0.1:42380" --learner=true - -# 3. Start learner node with goreman -# Change the path of bin/etcd if etcd is located elsewhere -etcd4: bin/etcd --name infra4 --listen-client-urls http://127.0.0.1:42379 --advertise-client-urls http://127.0.0.1:42379 --listen-peer-urls http://127.0.0.1:42380 --initial-advertise-peer-urls http://127.0.0.1:42380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra4=http://127.0.0.1:42380,infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state existing --enable-pprof --logger=zap --log-outputs=stderr - -# 4. The learner node can be promoted to voting member by the command -# % etcdctl member promote diff --git a/Procfile.v2 b/Procfile.v2 deleted file mode 100644 index c68511e56d4..00000000000 --- a/Procfile.v2 +++ /dev/null @@ -1,7 +0,0 @@ -# Use goreman to run `go install github.com/mattn/goreman@latest` -# Change the path of bin/etcd if etcd is located elsewhere -etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof -etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof -etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof -# in future, use proxy to listen on 2379 -#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof diff --git a/README.md b/README.md index 3169e49bcc9..4aee0f16c1c 100644 --- a/README.md +++ b/README.md @@ -1,192 +1,675 @@ # etcd -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/etcd?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/etcd) -[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/main/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd) -[![Tests](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml) -[![codeql-analysis](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml) -[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/etcd) -[![Releases](https://img.shields.io/github/release/etcd-io/etcd/all.svg?style=flat-square)](https://github.com/etcd-io/etcd/releases) -[![LICENSE](https://img.shields.io/github/license/etcd-io/etcd.svg?style=flat-square)](https://github.com/etcd-io/etcd/blob/main/LICENSE) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd/badge)](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd) +v3.5.2 Etcd是分布式系统中最关键的数据的可靠的分布式键值存储,其重点是: -**Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases][github-release]. +自己看源码 用 2379:为客户端提供通讯 2380:为服务器间提供通讯 +![etcd ](./images/raft.png) -![etcd Logo](logos/etcd-horizontal-color.svg) +### 配置 -etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being: +``` +peer-cert-allowed-cn 允许的客户端证书CommonName your name or your server's hostname +``` -* *Simple*: well-defined, user-facing API (gRPC) -* *Secure*: automatic TLS with optional client cert authentication -* *Fast*: benchmarked 10,000 writes/sec -* *Reliable*: properly distributed using Raft +## 空间占用整理 -etcd is written in Go and uses the [Raft][] consensus algorithm to manage a highly-available replicated log. +```模拟 +设置etcd存储大小 +etcd --quota-backend-bytes=$((16*1024*1024)) -etcd is used [in production by many companies](./ADOPTERS.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [locksmith][], [vulcand][], [Doorman][], and many others. Reliability is further ensured by [**rigorous testing**](https://github.com/etcd-io/etcd/tree/main/tests/functional). +写爆磁盘 +while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break;done -See [etcdctl][etcdctl] for a simple command line client. +查看endpoint状态 +etcdctl --write-out=table endpoint status -[raft]: https://raft.github.io/ -[k8s]: http://kubernetes.io/ -[doorman]: https://github.com/youtube/doorman -[locksmith]: https://github.com/coreos/locksmith -[vulcand]: https://github.com/vulcand/vulcand -[etcdctl]: https://github.com/etcd-io/etcd/tree/main/etcdctl +查看alarm +etcdctl alarm list -## Maintainers +清理碎片 +etcdctl defrag -[MAINTAINERS](MAINTAINERS) strive to shape an inclusive open source project culture where users are heard and contributors feel respected and empowered. MAINTAINERS maintain productive relationships across different companies and disciplines. Read more about [MAINTAINERS role and responsibilities](GOVERNANCE.md#maintainers). +清理alarm +etcdctl alarm disarm -## Getting started +获取当前etcd数据的修订版本(revision) +rev=$(etcdctl -w json endpoint status | egrep -o -i '"revision":[0-9]*' | egrep -o '[0-9]*') -### Getting etcd +# 获取etcd当前版本号 +$ rev=$(etcdctl endpoint status --write-out="json" | egrep -o -i '"revision":[0-9]*' | egrep -o '[0-9].*') +整合压缩旧版本数据 执行压缩操作,指定压缩的版本号为当前版本号 +etcdctl compact $rev +执行碎片整理 +etcdctl defrag +解除告警 +etcdctl alarm disarm +备份以及查看备份数据信息 +etcdctl snapshot save backup.db +etcdctl snapshot status backup.db +``` -The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, and Docker on the [release page][github-release]. +``` +//--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000; +//--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口. +👁etcd_backend/embed/config_test.go:TestAutoCompactionModeParse -For more installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/latest/op-guide). +- 只保存一个小时的历史版本`etcd --auto-compaction-retention=1` +- 只保留最近的3个版本`etcdctl compact 3` +- 碎片整理`etcdctl defrag` +``` -[github-release]: https://github.com/etcd-io/etcd/releases -[branch-management]: https://etcd.io/docs/latest/branch_management +### URL -### Running etcd +``` +http://127.0.0.1:2379/members -First start a single-member cluster of etcd. +``` -If etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below: +### msgType + +| 消息类型 | 处理方 | 描述 | +| :--- | :--- | :--- | +| MsgHup | 节点支持 | 本地:开启选举,---->会触发vote或pre-vote | +| MsgBeat | Leader |本地:心跳,---->给peers发送Msghearbeat | +| MsgProp | Leader、Candidate、Follower | 本地:Propose -----> MsgApp | +| MsgApp | Candidate、Follower | 非本地:操作日志【复制、配置变更 req】 | +| MsgAppResp | Leader | 非本地:操作日志【复制 res】 | +| MsgVote | 节点支持 | 非本地:投票请求 | +| MsgVoteResp | Candidate | 非本地:投票相应 | +| MsgPreVote | 节点支持 | 非本地:预投票请求 | +| MsgPreVoteResp | Candidate | 非本地:预投票相应 | +| MsgSnap | Candidate、Follower | 非本地:leader向follower拷贝快照,响应是MsgAppResp,告诉leader继续复制之后的值 | +| MsgHeartbeat | Candidate、Follower | | +| MsgHeartbeatResp | Leader | | +| MsgUnreachable | Leader | 非本地:etcdserver通过这个消息告诉raft状态机某个follower不可达,让其发送消息的方式由pipeline切成ping-pong模式 | +| MsgSnapStatus | Leader | 非本地:etcdserver通过这个消息告诉raft状态机快照发送成功还是失败 | +| MsgCheckQuorum | Leader | | +| MsgTransferLeader | Leader、Follower | 非本地: | +| MsgTimeoutNow | Candidate、Follower | 非本地: | +| MsgReadIndex | Leader、Follower | 非本地: | +| MsgReadIndexResp | Follower | 非本地: | + +### issue + +- 1、CertFile与ClientCertFile KeyFile与ClientKeyFile的区别 + ``` + 在运行的过程中是配置的相同的; + 一般情况下,client与server是使用相同的ca进行的签发, 所有server端可以使用自己的私钥与证书验证client证书 + 但如果不是同一个ca签发的; 那么就需要一个与client相同ca签发的证书文件与key + + ``` +- 2、url + ``` + + ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") + ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined") + + --data-dir 指定节点的数据存储目录,这些数据包括节点ID,集群ID,集群初始化配置,Snapshot文件,若未指定—wal-dir,还会存储WAL文件; + --wal-dir 指定节点的was文件的存储目录,若指定了该参数,wal文件会和其他数据文件分开存储. + # member + 这个参数是etcd服务器自己监听时用的,也就是说,监听本机上的哪个网卡,哪个端口 + --listen-client-urls DefaultListenClientURLs = "http://192.168.1.100:2379" + 和成员之间通信的地址.用于监听其他etcd member的url + --listen-peer-urls DefaultListenPeerURLs = "http://192.168.1.100:2380" + + # cluster + 就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url + --advertise-client-urls http://127.0.0.1:2379,http://192.168.1.100:2379,http://10.10.10.10:2379 + 集群成员的 URL地址.且会通告群集的其余成员节点. + --initial-advertise-peer-urls http://127.0.0.1:12380 告知集群其他节点url. + # 集群中所有节点的信息 + --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' + + + 请求流程: + etcdctl endpoints=http://192.168.1.100:2379 --debug ls + 首先与endpoints建立链接, 获取配置在advertise-client-urls的参数 + 然后依次与每一个地址建立链接,直到操作成功 + + + --advertise-client-urls=https://192.168.1.100:2379 + --cert-file=/etc/kubernetes/pki/etcd/server.crt + --client-cert-auth=true + + --initial-advertise-peer-urls=https://192.168.1.100:2380 + --initial-cluster=k8s-master01=https://192.168.1.100:2380 + + --key-file=/etc/kubernetes/pki/etcd/server.key + --listen-client-urls=https://127.0.0.1:2379,https://192.168.1.100:2379 + --listen-metrics-urls=http://127.0.0.1:2381 + --listen-peer-urls=https://192.168.1.100:2380 + + --name=k8s-master01 + + --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt + --peer-client-cert-auth=true + --peer-key-file=/etc/kubernetes/pki/etcd/peer.key + + --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt + --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt + initial-advertise-peer-urls与initial-cluster要都包含 + + ``` +- 3 JournalLogOutput 日志 + ``` + systemd-journal是syslog 的补充,收集来自内核、启动过程早期阶段、标准输出、系统日志、守护进程启动和运行期间错误的信息, + 它会默认把日志记录到/run/log/journal中,仅保留一个月的日志,且系统重启后也会消失. + 但是当新建 /var/log/journal 目录后,它又会把日志记录到这个目录中,永久保存. + ``` + + +- checkquorum 过半机制: + ``` + 每隔一段时间,leader节点会尝试连接集群中的节点(发送心跳),如果发现自己可以连接到的节点个数没有超过半数,则主动切换成follower状态. + 这样在网络分区的情况下,旧的leader节点可以很快的知道自己已经过期了. + ``` + + +- PreVote优化 + ``` + 当follower节点准备发起选举时候,先连接其他节点,并询问它们是否愿意参与选举(其他人是否能正常收到leader节点的信息),当有半数以上节点响应并参与则可以发起新一轮选举. + 解决分区之后节点重新恢复但term过大导致的leader选举问题 + ``` +- WAL + ``` + WAL全称是Write Ahead Log,是数据库中常用的持久化数据的方法.比如我们更新数据库的一条数据,如果直接找到这条数据并更新, + 可能会耗费比较长的时间.更快更安全的方式是先写一条Log数据到文件中,然后由后台线程来完成最终数据的更新,这条log中通常包含的是一条指令. + ``` +- 发送心跳消息的时候leader是怎么设置各个follower的commit? + +- leader收到follower的心跳响应之后会怎么去修改对应的follower元数据呢? + +- 快照 follower 当数据远落后于leader , leader会将快照发送过来 但由于网络原因,这一过程很慢 ,但是leader又生成了新的快照,wal没有旧的数据, 这时follower同步完,leader将最新新消息 + 发送follower , follower reject ,但是此时wal已经没有对应的wal 又会发送新的快照, 这就会陷入死循环.....how? 看完源码再说吧 + ![](./images/MsgReadIndex.png) +- JointConfig 为什么是两个 +- 哪些场景会出现 Follower 日志与 Leader 冲突? + ``` + leader崩溃的情况下可能(如老的leader可能还没有完全复制所有的日志条目),如果leader和follower出现持续崩溃会加剧这个现象. + follower可能会丢失一些在新的leader中有的日志条目,他也可能拥有一些leader没有的日志条目,或者两者都发生. + ``` +- follower如何删除无效日志? + ``` + leader处理不一致是通过强制follower直接复制自己的日志来解决了.因此在follower中的冲突的日志条目会被leader的日志覆盖. + leader会记录follower的日志复制进度nextIndex,如果follower在追加日志时一致性检查失败,就会拒绝请求,此时leader就会减小 nextIndex 值并进行重试,最终在某个位置让follower跟leader一致. + ``` +- 为什么WAL日志模块只通过追加,也能删除已持久化冲突的日志条目呢? + ``` + 其实这里 etcd 在实现上采用了一些比较有技巧的方法,在 WAL 日志中的确没删除废弃的日志条目,你可以在其中搜索到冲突的日志条目. + 只是 etcd 加载 WAL 日志时,发现一个 raft log index 位置上有多个日志条目的时候,会通过覆盖的方式,将最后写入的日志条目追加到 raft log 中, + 实现了删除冲突日志条目效果 + https://github.com/etcd-io/etcd/issues/12589 + ``` + +(2) electionElapsed + +当 electionElapsed 超时,发送 MsgCheckQuorum 给当前节点,当前节点收到消息之后,进行自我检查,判断是否能继续维持 Leader 状态,如果不能切换为Follower.同时如果节点正在进行 Leader 切换( +切换其他节点为Leader),当 electionElapsed 超时,说明 Leader 节点转移超时,会终止切换. -```bash -/tmp/etcd-download-test/etcd ``` +curl --------http---------> gateway ------------> etcd grpc server 2379 + 将http转换成了grpc -The etcd command can be simply run as such if it is moved to the system path as below: -```bash -mv /tmp/etcd-download-test/etcd /usr/local/bin/ -etcd + +127.0.0.1:2379 +1、HTTP2 +2、HTTP1 + ``` -This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication. +### module -Next, let's set a single key, and then retrieve it: +- github.com/soheilhy/cmux 可以在同一个listener上监听不同协议的请求 +- ``` -etcdctl put mykey "this is awesome" -etcdctl get mykey +etcdServer 会单独处理 Propose消息, 其余消息交给raft.step 来处理 [该函数,会随着节点角色的改变而发生改变] [会首先判断任期、索引,在判断消息类型] + +StartEtcd + 1、etcdserver.NewServer -> + heartbeat := time.Duration(cfg.TickMs) * time.Millisecond + MySelfStartRaft + newRaftNode + r.ticker = time.NewTicker(r.heartbeat) 创建定时器、心跳 + startNode -> + raft.StartNode -> + go n.run() + rd = n.rn.readyWithoutAccept() 获取待发送消息,会获取到r.msgs + readyc = n.readyc 待发送消息channel + - case pm := <-propc 网络发来的消息、除Propose消息 + - case m := <-n.recvc G 处理来自peer的消息 + - case cc := <-n.confc + - case <-n.tickc F取出数据 + n.rn.Tick() + rn.raft.tick() 根据角色调用自己的函数 + - r.tickElection + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) 该函数是处理所有到来消息的入口 + r.send(pb.Message + r.msgs = append(r.msgs, m) 放入要发送的消息 + - r.tickHeartbeat + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + - case readyc <- rd A放入数据 + - case <-advancec: + - case c := <-n.status: + - case <-n.stop: + tr.AddPeer + startPeer 与每个peer都建立一个链接 + r.Process + s.Process + s.r.Step(ctx, m) + n.step + stepWithWaitOption + case n.recvc <- m G 接收来自peer的消息 + + 2、e.Server.Start -> + EtcdServer.strat -> + s.start() + go s.run() + --> | # s.r=raftNode + --> | s.r.start(rh) + --> | go func() + --> | - case <-r.ticker.C: 接收定时器信号 + --> | r.tick() + --> | r.Tick() + --> | case n.tickc <- struct{}{} F放入数据、不会阻塞,有size + --> | - case rd := <-r.Ready() 获取可以发送的数据 A取出数据 + case r.applyc <- ap B放入数据 + r.transport.Send(msgs) 发出响应数据 + --> | - case <-r.stopped: + + - case ap := <-s.r.apply() B取出数据 + 读取applyc的数据,封装为JOB,放入调度器 + - case leases := <-expiredLeaseC + 处理过期租约 + - case err := <-s.errorc + 处理运行过程中出现的err,直接退出 + - getSyncC + - case <-s.stop: + 启动过程中失败 + + 3、e.servePeers + 4、e.serveClients + 5、e.serveMetrics + ``` -etcd is now running and serving client requests. For more, please check out: +![iShot2021-07-15 23.46.37](./images/unstable_index.png) -- [Interactive etcd playground](http://play.etcd.io) -- [Animated quick demo](https://etcd.io/docs/latest/demo) +``` +快照 + storage + unstable 的区别 +compacted <--- compacted <--- applied <--- committed <--- stable <--- unstable +WAL 日志 +11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 +-------------------------------------------------------------------|--MemoryStorage|file--|----------------- +-----压缩---------|---------------压缩---------------------|------------------------------------------------- +----快照----------------- |----------------快照--------------------- | storage: 落盘的 | unstable 内存中的 +----快照----------------- |----------------快照--------------------- | | 在没有被持久化之前如果遇到了换届选举,这个日志可能会被相同索引值的新日志覆盖 -### etcd TCP ports -The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. +每一条日志Entry需要经过unstable、stable、committed、applied、compacted五个阶段,接下来总结一下日志的状态转换过程: -[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt +刚刚收到一条日志会被存储在unstable中,日志在没有被持久化之前如果遇到了换届选举,这个日志可能会被相同索引值的新日志覆盖,这个一点可以在raftLog.maybeAppend()和unstable.truncateAndAppend()找到相关的处理逻辑. +unstable中存储的日志会被使用者写入持久存储(文件)中,这些持久化的日志就会从unstable转移到MemoryStorage中. +读者可能会问MemoryStorage并不是持久存储啊,其实日志是被双写了,文件和MemoryStorage各存储了一份,而raft包只能访问MemoryStorage中的内容.这样设计的目的是用内存缓冲文件中的日志,在频繁操作日志的时候性能会更高. +此处需要注意的是,MemoryStorage中的日志仅仅代表日志是可靠的,与提交和应用没有任何关系. +leader会搜集所有peer的接收日志状态,只要日志被超过半数以上的peer接收,那么就会提交该日志,peer接收到leader的数据包更新自己的已提交的最大索引值,这样小于等于该索引值的日志就是可以被提交的日志. +已经被提交的日志会被使用者获得,并逐条应用,进而影响使用者的数据状态. +已经被应用的日志意味着使用者已经把状态持久化在自己的存储中了,这条日志就可以删除了,避免日志一直追加造成存储无限增大的问题.不要忘了所有的日志都存储在MemoryStorage中,不删除已应用的日志对于内存是一种浪费,这也就是日志的compacted. -### Running a local etcd cluster +每次用户提交日志,该日志会保存到 MemoryStorage 以及wal里,每当raft发送给上层程序一批已经commited日志,就会触发maybeTriggerSnapshot,当用户apply以后 +判断是否进行触发 MemoryStorage 打快照,当打了快照以后,会把当前快照点10000条以前的记录从 MemoryStorage.ents去除掉 【俗称压缩】 -First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications. -Our [Procfile script](./Procfile) will set up a local example cluster. Start it with: -```bash -goreman start +MemoryStorage并不是持久存储啊,其实日志是被双写了,文件和MemoryStorage各存储了一份, +而raft包只能访问MemoryStorage中的内容.这样设计的目的是用内存缓冲文件中的日志,在频繁操作日志的时候性能会更高. +此处需要注意的是,MemoryStorage中的日志仅仅代表日志是可靠的,与提交和应用没有任何关系. ``` -This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and optionally etcd `grpc-proxy`, which runs locally and composes a cluster. +## Compact + +``` +1、新建Snapshot之后,一般会调用MemoryStorage.Compact()方法将MemoryStorage.ents中指定索引之前的Entry记录全部抛弃, +从而实现压缩MemoryStorage.ents 的目的,具体实现如下: [GC] +func (ms *MemoryStorage) Compact(compactIndex uint64) + +2、清除kvindex的修订版本,以及bolt.db里的历史数据 -Every cluster member and proxy accepts key value reads and key value writes. -Follow the steps in [Procfile.learner](./Procfile.learner) to add a learner node to the cluster. Start the learner node with: -```bash -goreman -f ./Procfile.learner start ``` -### Install etcd client v3 +### WAl数据日志数据 -```bash -go get go.etcd.io/etcd/client/v3 +``` +type Record struct { + Type int64 + Crc uint32 + Data []byte +} +- metadataType :1 元数据类型,元数据会保存当前的node id和cluster id. + type Metadata struct { + NodeID uint64 + ClusterID uint64 + } +- entryType :2 日志条目 + type Entry struct { + Term uint64 + Index uint64 + Type EntryType + EntryNormal + # msgType + EntryConfChange + EntryConfChangeV2 + Data []byte + msg 👆🏻 + } +- stateType :3 当前Term,当前竞选者、当前已经commit的日志. +- crcType :4 存放crc校验字段 Data为nil +- snapshotType :5 快照的、日志的Index和Term + type Snapshot struct { + Index uint64 + Term uint64 + ConfState *raftpb.ConfState + } ``` -### Next steps +``` +raft commit->apply 的数据 封装在ready结构体里 <-r.Ready() + raftNode拿到该ready做一些处理,过滤出操作日志 publishEntries + 上层应用拿到过滤后的,将其应用到kvstore【【 +``` -Now it's time to dig into the full etcd API and other guides. +### 集群节点变更 -- Read the full [documentation][]. -- Explore the full gRPC [API][]. -- Set up a [multi-machine cluster][clustering]. -- Learn the [config format, env variables and flags][configuration]. -- Find [language bindings and tools][integrations]. -- Use TLS to [secure an etcd cluster][security]. -- [Tune etcd][tuning]. +``` +1、先检查是否有待应用的变更 +2、将变更信息放入raft unstable 等待发送----->发送,等到apply +3、apply 该变更 +case rd := <-r.Ready(): 从raft拿到要apply的消息 + case r.applyc <- ap: + go: + - ap := <-s.r.apply() + - s.applyAll(&ep, &ap) + - s.applyEntries(ep, apply) + - s.apply(ents, &ep.confState) + - case raftpb.EntryConfChange: + - s.applyConfChange(cc, confState, shouldApplyV3) + - *s.r.ApplyConfChange(cc) 获取应用配置变更之后的集群状态 + - cs := r.applyConfChange(cc) 返回应用配置变更之后的集群状态,已生效,只更新了quorum.JointConfig与peer信息 + - r.switchToConfig(cfg, prs) + - + | s.cluster.PromoteMember + | s.cluster.AddMember -----> 更新v2store[memory node tree]、backend[bolt.db] + | s.cluster.RemoveMember |---> 触发watcher + | s.cluster.UpdateRaftAttributes + + +r.Advance() -[documentation]: https://etcd.io/docs/latest -[api]: https://etcd.io/docs/latest/learning/api -[clustering]: https://etcd.io/docs/latest/op-guide/clustering -[configuration]: https://etcd.io/docs/latest/op-guide/configuration -[integrations]: https://etcd.io/docs/latest/integrations -[security]: https://etcd.io/docs/latest/op-guide/security -[tuning]: https://etcd.io/docs/latest/tuning +``` -## Contact +curl -H "X-Etcd-Cluster-ID:cdf818194e3a8c32" -H "X-PeerURLs:http://127.0.0.1:12345" -H "X-Min-Cluster-Version: 3.5.2" +-H "X-Server-Version:3.5.2" http://localhost:2380/raft/stream/message/8e9e05c52164694d +curl -X "POST" -H "X-Server-From:8e9e05c52164694d" "-H "X-Etcd-Cluster-ID:cdf818194e3a8c32" -H " +X-PeerURLs:http://127.0.0.1:12345" -H "X-Min-Cluster-Version: 3.5.2" -H "X-Server-Version:3.5.2" +-d "" http://localhost:2380/raft/stream/snapshot -- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) -- Slack: [#etcd](https://kubernetes.slack.com/messages/C3HD8ARJ5/details/) channel on Kubernetes ([get an invite](http://slack.kubernetes.io/)) -- [Community meetings](#Community-meetings) +humanize.Bytes net.SplitHostPort([2001:db8:1f70::999:de8:7648:6e8]:100)->[2001:db8:1f70::999:de8:7648:6e8] -### Community meetings +BoltDB本身已经实现了事务的隔离性、原子性、持久化、一致性,并提供了并发的单写+多读 -etcd contributors and maintainers have monthly (every four weeks) meetings at 11:00 AM (USA Pacific) on Thursday. +Linearizable、Serializable Linearizable Read (线性读),通俗地讲,就是读请求需要读到最新的已经提交的数据,不会读到旧数据 -An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas. +V3和V2版本的对比 etcd的v2版本有下面的一些问题 Watch 机制可靠性问题 etcd v2 是内存型、不支持保存 key 历史版本的数据库,只在内存中使用滑动窗口保存了最近的 1000 条变更事件,当 etcd server +写请求较多、网络波动时等场景,很容易出现事件丢失问题,进而又触发 client 数据全量拉取,产生大量 expensive request,甚至导致 etcd 雪崩. 性能瓶颈问题 1、etcd v2早起使用的是 HTTP/1.x +API.HTTP/1.x 协议没有压缩机制,大量的请求可能导致 etcd 出现 CPU 高负载、OOM、丢包等问题; 2、etcd v2 client 会通过 HTTP 长连接轮询 Watch 事件,当 watcher 较多的时候,因 +HTTP/1.x 不支持多路复用,会创建大量的连接,消耗 server 端过多的 socket 和内存资源; 3、对于 key 中的 TTL过期时间,如果大量 key TTL 一样,也需要分别为每个 key 发起续期操作,当 key +较多的时候,这会显著增加集群负载、导致集群性能显著下降; 内存开销问题 etcd v2 在内存维护了一颗树来保存所有节点 key 及 value.在数据量场景略大的场景,如配置项较多、存储了大量 Kubernetes Events, +它会导致较大的内存开销,同时 etcd 需要定时把全量内存树持久化到磁盘.这会消耗大量的 CPU 和磁盘 I/O 资源,对系统的稳定性造成一定影响. etcd v3 的出现就是为了解决以上稳定性、扩展性、性能问题 1、在内存开销、Watch +事件可靠性、功能局限上,它通过引入 B-tree、boltdb 实现一个 MVCC 数据库,数据模型从层次型目录结构改成扁平的 key-value,提供稳定可靠的事件通知,实现了事务,支持多 key 原子更新,同时基于 boltdb +的持久化存储,显著降低了 etcd 的内存占用、避免了 etcd v2 定期生成快照时的昂贵的资源开销; 2、etcd v3 使用了 gRPC API,使用 protobuf 定义消息,消息编解码性能相比 JSON 超过 2 倍以上,并通过 +HTTP/2.0 多路复用机制,减少了大量 watcher 等场景下的连接数; 3、使用 Lease 优化 TTL 机制,每个 Lease 具有一个 TTL,相同的 TTL 的 key 关联一个 Lease,Lease +过期的时候自动删除相关联的所有 key,不再需要为每个 key 单独续期; 4、etcd v3 支持范围、分页查询,可避免大包等 expensive request. -Meeting recordings are uploaded to official etcd [YouTube channel]. +pb.Message.Entries = [ pb.InternalRaftRequest ] -Get calendar invitation by joining [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) mailing group. +etcd中每新建一个key ,会为其分配一个主版本,同时还有一个sub版本,长度17byte 格式: 8byte_8byte 例如[00000002_00000000]---> 转换成bolt.db的键值就是 +00000000000000025f0000000000000000 -Join Hangouts Meet: [meet.google.com/umg-nrxn-qvs](https://meet.google.com/umg-nrxn-qvs) +### 线性一致性读流程 -Join by phone: +1 405-792-0633‬ PIN: ‪299 906‬# +``` +localNode.run() 一直死循环 + 判断是否有ready的数据,其中 r.readStates就是一项指标 + n.readyc <- ready + +--------------------------------- +raftNode.start + case rd := <-r.Ready(): 消费端: 获取ready数据,包含r.ReadStates = r.readStates + select { + case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: // 发送响应数据 + case <-time.After(internalTimeout): + r.lg.Warn("发送读状态超时", zap.Duration("timeout", internalTimeout)) + case <-r.stopped: + return + } +--------------------------------- +leader: + stepLeader; + case pb.MsgReadIndex: + 1、集群只有一个节点 + r.readStates = append(r.readStates, ReadState{Index:r.raftLog.committed, RequestCtx: 自增ID}) + 2、 + 引入pendingReadIndex、readIndexQueue 心跳广播 自增ID 等待大多数据集群节点回应 自增ID + case pb.MsgHeartbeatResp: + rss := r.readOnly.advance(m) + +rd.ReadStates +---------------------------------- +linearizableReadLoop 发送MsgReadIndex消息, + s.requestCurrentIndex + 1、s.sendReadIndex(自增ID) + s.r.ReadIndex 发送pb.MsgReadIndex消息,数据是自增ID + 2、case rs := <-s.r.readStateC: 等待响应 得到ReadState{Index:r.raftLog.committed, RequestCtx: 自增ID} + return r.raftLog.committed + r.raftLog.committed >= s.getAppliedIndex() 如果满足这个条件 + nr.notify(nil) 相当于往nc.c发消息 +-------------- +get +linearizeReadNotify 线性读,触发linearizableReadLoop,并等待结果 + 1、case s.readwaitc <- struct{}{}: 触发线性读 + 2、case <-nc.c: 等待结果 + return nc.err +继续在本节点读取数据 + +``` -[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit -[YouTube channel]: https://www.youtube.com/channel/UC7tUWR24I5AR9NMsG-NYBlg +### 租约检查点机制 -## Contributing +``` +bug: + 如果租约在到期前,Leader切换, 那么它的租约会重置 +如何解决这个问题 + 每过一段时间,将每个key剩余多长时间同步到其他节点的db中,这样如果发生leader切换,租约的误差也只是 这个间隔 + - 定期同步租约剩余时间至其他节点的db +issue: + 第一发送 ttl 10s remainingTtl 5s + 假设将该消息发送到对端,因为网络问题花了3秒, 那么当对端收到时,实际remainingTtl应该是2s,但还是变成了5s +- 如果时间这么长,那这个节点肯定出问题了,那么也不会成为leader + + +作者回复: +从原理上我们知道lease是leader在内存中维护过期最小堆的,因此续期操作client是必须要直接发送给leader的, +如果follower节点收到了keepalive请求,会转发给leader节点.续期操作不经过raft协议处理同步, +而leaseGrant/Revoke请求会经过raft协议同步给各个节点,因此任意节点都可以处理它. +``` -See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow. +``` +curl -L http://127.0.0.1:2379/version +curl -L http://192.168.59.156:2379/metrics +etcdctl cluster-health +etcdutl backup --data-dir /var/lib/etcd --backup-dir /tmp/etcd + +# no crt +etcdctl snap save a.db +etcdctl snapshot restore a.db +# crt +etcdctl snap save --cert=./cert/server.crt --cacert=./cert/ca.crt --key=./cert/server.key a.db +``` -## Reporting bugs +### leader trans -See [reporting bugs](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md) for details about reporting any issues. +``` +Step() + case pb.MsgVote, pb.MsgPreVote: + 变更leader,....等操作 + + stepLeader() + case pb.MsgTransferLeader: + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) // 给指定的节点发消息 + stepFollower() + case pb.MsgTimeoutNow: + r.hup(campaignTransfer) + # 给每一个节点发送 + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) -## Reporting a security vulnerability +``` -See [security disclosure and release process](security/README.md) for details on how to report a security vulnerability and how the etcd team manages it. +证书解析认证性能极低 -## Issue and PR management +``` -See [issue triage guidelines](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_issues.md) for details on how issues are managed. +#创建一个admin role +etcdctl role add admin --user root:root +#分配一个可读写[hello,helly)范围数据的权限给admin role +etcdctl role grant-permission admin readwrite hello helly --user root:root +# 将用户alice和admin role关联起来,赋予admin权限给user +etcdctl user grant-role alice admin --user root:root +``` -See [PR management](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_prs.md) for guidelines on how pull requests are managed. +etcd 保存用户 key 与版本号映射关系的数据结构 B-tree,为什么 etcd 使用它而不使用哈希表、平衡二叉树? -## etcd Emeritus Maintainers +``` +从 etcd 的功能特性上分析, 因 etcd 支持范围查询,因此保存索引的数据结构也必须支持范围查询才行.所以哈希表不适合,而 B-tree 支持范围查询. +从性能上分析,平横二叉树每个节点只能容纳一个数据、导致树的高度较高,而 B-tree 每个节点可以容纳多个数据, +树的高度更低,更扁平,涉及的查找次数更少,具有优越的增、删、改、查性能. +``` + +你认为 etcd 为什么删除使用 lazy delete 方式呢? 相比同步 delete, 各有什么优缺点? + +``` +采用延迟删除 +1、为了保证key对应的watcher能够获取到key的所有状态信息,留给watcher时间做相应的处理. +2、实时从boltdb删除key,会可能触发树的不平衡,影响其他读写请求的性能. + +etcd要保存key的历史版本,直接删除就不能支持revision查询了; +lazy方式性能更高,空闲空间可以再利用; +``` + +当你突然删除大量 key 后,db 大小是立刻增加还是减少呢? -These emeritus maintainers dedicated a part of their career to etcd and reviewed code, triaged bugs and pushed the project forward over a substantial period of time. Their contribution is greatly appreciated. +``` +应该会增大,etcd不会立即把空间返回系统而是维护起来后续使用,维护空闲页面应该需要一些内存; +``` + +``` -* Fanmin Shi -* Anthony Romano -* Brandon Philips -* Joe Betz -* Gyuho Lee -* Jingyi Hu -* Wenjia Zhang -* Xiang Li -* Ben Darnell +$ etcdctl txn -i +compares: +mod("Alice") = "2" +mod("Bob") = "3" -### License +success requests (get, put, del): +put Alice 100 +put Bob 300 + +success requests (get, put, del): //对应Then语句 +put Alice 100 //Alice账号初始资金200减100 +put Bob 300 //Bob账号初始资金200加100 -etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. +failure requests (get, put, del): //对应Else语句 +get Alice +get Bob + + +``` + + +``` +若 etcd 节点内存不足,可能会导致 db 文件对应的内存页被换出. +当读请求命中的页未在内存中时,就会产生缺页异常,导致读过程中产生磁盘 IO. +这样虽然避免了 etcd 进程 OOM,但是此过程会产生较大的延时. +``` + + +./benchmark --conns=100 --clients=1000 range hello --consistency=l --total=500000 +./benchmark --conns=100 --clients=1000 range hello --consistency=s --total=500000 + +# 256byte +./benchmark --conns=100 --clients=1000 put --key-size=8 --sequential-keys --total=10000000 --val-size=256 +# 1m +./benchmark --conns=100 --clients=1000 put --key-size=8 --sequential-keys --total=500 --val-size=1024000 + + + + + +``` +{ + "Key":"a", + "Modified":{ + "Main":8, + "Sub":0 + }, + "Generations":[ + { + "VersionCount":4, + "Created":{ + "Main":2, + "Sub":0 + }, + "Revs":[ + { + "Main":2, + "Sub":0 + }, + { + "Main":3, + "Sub":0 + }, + { + "Main":4, + "Sub":0 + }, + { + "Main":5, + "Sub":0 + } + ] + }, + { + "VersionCount":3, + "Created":{ + "Main":6, + "Sub":0 + }, + "Revs":[ + { + "Main":6, + "Sub":0 + }, + { + "Main":7, + "Sub":0 + }, + { + "Main":8, + "Sub":0 + } + ] + } + ] +} +``` \ No newline at end of file diff --git a/api/LICENSE b/api/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/api/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/api/authpb/auth.pb.go b/api/authpb/auth.pb.go deleted file mode 100644 index 16affcd62cf..00000000000 --- a/api/authpb/auth.pb.go +++ /dev/null @@ -1,1158 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: auth.proto - -package authpb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Permission_Type int32 - -const ( - READ Permission_Type = 0 - WRITE Permission_Type = 1 - READWRITE Permission_Type = 2 -) - -var Permission_Type_name = map[int32]string{ - 0: "READ", - 1: "WRITE", - 2: "READWRITE", -} - -var Permission_Type_value = map[string]int32{ - "READ": 0, - "WRITE": 1, - "READWRITE": 2, -} - -func (x Permission_Type) String() string { - return proto.EnumName(Permission_Type_name, int32(x)) -} - -func (Permission_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{2, 0} -} - -type UserAddOptions struct { - NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } -func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } -func (*UserAddOptions) ProtoMessage() {} -func (*UserAddOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{0} -} -func (m *UserAddOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserAddOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserAddOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserAddOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAddOptions.Merge(m, src) -} -func (m *UserAddOptions) XXX_Size() int { - return m.Size() -} -func (m *UserAddOptions) XXX_DiscardUnknown() { - xxx_messageInfo_UserAddOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAddOptions proto.InternalMessageInfo - -// User is a single entry in the bucket authUsers -type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"` - Options *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_User.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(m, src) -} -func (m *User) XXX_Size() int { - return m.Size() -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -// Permission is a single entity -type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{2} -} -func (m *Permission) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Permission.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Permission) XXX_Merge(src proto.Message) { - xxx_messageInfo_Permission.Merge(m, src) -} -func (m *Permission) XXX_Size() int { - return m.Size() -} -func (m *Permission) XXX_DiscardUnknown() { - xxx_messageInfo_Permission.DiscardUnknown(m) -} - -var xxx_messageInfo_Permission proto.InternalMessageInfo - -// Role is a single entry in the bucket authRoles -type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{3} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Role.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) - proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") - proto.RegisterType((*User)(nil), "authpb.User") - proto.RegisterType((*Permission)(nil), "authpb.Permission") - proto.RegisterType((*Role)(nil), "authpb.Role") -} - -func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } - -var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 338 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, - 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, - 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, - 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, - 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, - 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, - 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, - 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, - 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, - 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, - 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, - 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, - 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, - 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, - 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, - 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, - 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, - 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, - 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, - 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, - 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, - 0x00, 0x00, -} - -func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserAddOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.NoPassword { - i-- - if m.NoPassword { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *User) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *User) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuth(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Permission) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if m.PermType != 0 { - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.KeyPermission) > 0 { - for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuth(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - offset -= sovAuth(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *UserAddOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NoPassword { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *User) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovAuth(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Permission) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PermType != 0 { - n += 1 + sovAuth(uint64(m.PermType)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Role) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.KeyPermission) > 0 { - for _, e := range m.KeyPermission { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovAuth(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *UserAddOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoPassword = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *User) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: User: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) - if m.Password == nil { - m.Password = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &UserAddOptions{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Permission) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Permission: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) - } - m.PermType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PermType |= Permission_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyPermission = append(m.KeyPermission, &Permission{}) - if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAuth - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAuth - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAuth - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/etcdserverpb/etcdserver.pb.go b/api/etcdserverpb/etcdserver.pb.go deleted file mode 100644 index 38434d09c56..00000000000 --- a/api/etcdserverpb/etcdserver.pb.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: etcdserver.proto - -package etcdserverpb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(m, src) -} -func (m *Request) XXX_Size() int { - return m.Size() -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Request)(nil), "etcdserverpb.Request") - proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") -} - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } - -var fileDescriptor_09ffbeb3bebbce7e = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} - -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Refresh != nil { - i-- - if *m.Refresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - i-- - if m.Stream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - i-- - dAtA[i] = 0x78 - i-- - if m.Quorum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - i-- - if m.Sorted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - i-- - dAtA[i] = 0x58 - i-- - if m.Wait { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - i-- - dAtA[i] = 0x48 - if m.PrevExist != nil { - i-- - if *m.PrevExist { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - i-- - dAtA[i] = 0x38 - i -= len(m.PrevValue) - copy(dAtA[i:], m.PrevValue) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i-- - dAtA[i] = 0x32 - i-- - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - i -= len(m.Val) - copy(dAtA[i:], m.Val) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i-- - dAtA[i] = 0x22 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x12 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - i-- - dAtA[i] = 0x10 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - offset -= sovEtcdserver(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Request) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.ID)) - l = len(m.Method) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Path) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Val) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 2 - l = len(m.PrevValue) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 1 + sovEtcdserver(uint64(m.PrevIndex)) - if m.PrevExist != nil { - n += 2 - } - n += 1 + sovEtcdserver(uint64(m.Expiration)) - n += 2 - n += 1 + sovEtcdserver(uint64(m.Since)) - n += 2 - n += 2 - n += 2 - n += 1 + sovEtcdserver(uint64(m.Time)) - n += 3 - if m.Refresh != nil { - n += 3 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.NodeID)) - n += 1 + sovEtcdserver(uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEtcdserver(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEtcdserver(x uint64) (n int) { - return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Request) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Val = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Dir = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) - } - m.PrevIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PrevExist = &b - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) - } - m.Expiration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Expiration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Wait = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) - } - m.Since = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Since |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sorted = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Quorum = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - m.Time = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Time |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stream = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Refresh = &b - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) - } - m.ClusterID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEtcdserver(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEtcdserver - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEtcdserver - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/etcdserverpb/raft_internal.pb.go b/api/etcdserverpb/raft_internal.pb.go deleted file mode 100644 index d59e65813f4..00000000000 --- a/api/etcdserverpb/raft_internal.pb.go +++ /dev/null @@ -1,2677 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: raft_internal.proto - -package etcdserverpb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - membershippb "go.etcd.io/etcd/api/v3/membershippb" - _ "go.etcd.io/etcd/api/v3/versionpb" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type RequestHeader struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // username is a username that is associated with an auth token of gRPC connection - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{0} -} -func (m *RequestHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestHeader.Merge(m, src) -} -func (m *RequestHeader) XXX_Size() int { - return m.Size() -} -func (m *RequestHeader) XXX_DiscardUnknown() { - xxx_messageInfo_RequestHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestHeader proto.InternalMessageInfo - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` - LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` - AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` - ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"` - ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"` - DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{1} -} -func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalRaftRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalRaftRequest.Merge(m, src) -} -func (m *InternalRaftRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalRaftRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo - -type EmptyResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{2} -} -func (m *EmptyResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EmptyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EmptyResponse.Merge(m, src) -} -func (m *EmptyResponse) XXX_Size() int { - return m.Size() -} -func (m *EmptyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EmptyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -type InternalAuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } -func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*InternalAuthenticateRequest) ProtoMessage() {} -func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{3} -} -func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src) -} -func (m *InternalAuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") - proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") - proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") - proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") -} - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } - -var fileDescriptor_b4c9a9be0cfca103 = []byte{ - // 1054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0x5d, 0x6f, 0x1b, 0x45, - 0x14, 0xad, 0xd3, 0x34, 0x89, 0xc7, 0x49, 0x9a, 0x4e, 0x52, 0x3a, 0x38, 0x92, 0x71, 0x03, 0x2d, - 0x01, 0x8a, 0x53, 0x1c, 0x78, 0xe1, 0x05, 0x5c, 0x3b, 0x4a, 0x83, 0x4a, 0x15, 0x6d, 0x0b, 0xaa, - 0x84, 0xd0, 0x32, 0xde, 0xbd, 0xb1, 0xb7, 0x59, 0xef, 0x2e, 0x33, 0x63, 0x37, 0x7d, 0xe5, 0x91, - 0x67, 0x40, 0xfc, 0x0c, 0x3e, 0xff, 0x43, 0x85, 0xf8, 0x28, 0xf0, 0x07, 0x20, 0xbc, 0xf0, 0x0e, - 0xbc, 0xa3, 0xf9, 0xd8, 0x5d, 0xaf, 0x3d, 0xce, 0xdb, 0xfa, 0xde, 0x73, 0xcf, 0x39, 0x33, 0x73, - 0xef, 0x78, 0xd0, 0x3a, 0xa3, 0x47, 0xc2, 0x0d, 0x22, 0x01, 0x2c, 0xa2, 0x61, 0x23, 0x61, 0xb1, - 0x88, 0xf1, 0x32, 0x08, 0xcf, 0xe7, 0xc0, 0x46, 0xc0, 0x92, 0x6e, 0x75, 0xa3, 0x17, 0xf7, 0x62, - 0x95, 0xd8, 0x91, 0x5f, 0x1a, 0x53, 0x5d, 0xcb, 0x31, 0x26, 0x52, 0x66, 0x89, 0x67, 0x3e, 0xeb, - 0x32, 0xb9, 0x43, 0x93, 0x60, 0x67, 0x04, 0x8c, 0x07, 0x71, 0x94, 0x74, 0xd3, 0x2f, 0x83, 0xb8, - 0x9e, 0x21, 0x06, 0x30, 0xe8, 0x02, 0xe3, 0xfd, 0x20, 0x49, 0xba, 0x63, 0x3f, 0x34, 0x6e, 0x8b, - 0xa1, 0x15, 0x07, 0x3e, 0x1e, 0x02, 0x17, 0xb7, 0x81, 0xfa, 0xc0, 0xf0, 0x2a, 0x9a, 0x3b, 0xe8, - 0x90, 0x52, 0xbd, 0xb4, 0x3d, 0xef, 0xcc, 0x1d, 0x74, 0x70, 0x15, 0x2d, 0x0d, 0xb9, 0x34, 0x3f, - 0x00, 0x32, 0x57, 0x2f, 0x6d, 0x97, 0x9d, 0xec, 0x37, 0xbe, 0x81, 0x56, 0xe8, 0x50, 0xf4, 0x5d, - 0x06, 0xa3, 0x40, 0x6a, 0x93, 0xf3, 0xb2, 0xec, 0xd6, 0xe2, 0xa7, 0xdf, 0x93, 0xf3, 0xbb, 0x8d, - 0xd7, 0x9c, 0x65, 0x99, 0x75, 0x4c, 0xf2, 0xcd, 0xc5, 0x4f, 0x54, 0xf8, 0xe6, 0xd6, 0x0f, 0x18, - 0xad, 0x1f, 0x98, 0x1d, 0x71, 0xe8, 0x91, 0x30, 0x06, 0xf0, 0x2e, 0x5a, 0xe8, 0x2b, 0x13, 0xc4, - 0xaf, 0x97, 0xb6, 0x2b, 0xcd, 0xcd, 0xc6, 0xf8, 0x3e, 0x35, 0x0a, 0x3e, 0x1d, 0x03, 0x9d, 0xf2, - 0x7b, 0x0d, 0xcd, 0x8d, 0x9a, 0xca, 0x69, 0xa5, 0x79, 0xd9, 0x4a, 0xe0, 0xcc, 0x8d, 0x9a, 0xf8, - 0x26, 0xba, 0xc0, 0x68, 0xd4, 0x03, 0x65, 0xb9, 0xd2, 0xac, 0x4e, 0x20, 0x65, 0x2a, 0x85, 0x6b, - 0x20, 0x7e, 0x19, 0x9d, 0x4f, 0x86, 0x82, 0xcc, 0x2b, 0x3c, 0x29, 0xe2, 0x0f, 0x87, 0xe9, 0x22, - 0x1c, 0x09, 0xc2, 0x6d, 0xb4, 0xec, 0x43, 0x08, 0x02, 0x5c, 0x2d, 0x72, 0x41, 0x15, 0xd5, 0x8b, - 0x45, 0x1d, 0x85, 0x28, 0x48, 0x55, 0xfc, 0x3c, 0x26, 0x05, 0xc5, 0x49, 0x44, 0x16, 0x6c, 0x82, - 0xf7, 0x4f, 0xa2, 0x4c, 0x50, 0x9c, 0x44, 0xf8, 0x2d, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x4f, 0xc8, - 0x63, 0x58, 0x54, 0x25, 0xcf, 0x15, 0x4b, 0xda, 0x59, 0x3e, 0xad, 0x1c, 0x2b, 0xc1, 0x6f, 0xa3, - 0x4a, 0x08, 0x94, 0x83, 0xdb, 0x63, 0x34, 0x12, 0x64, 0xc9, 0xc6, 0x70, 0x47, 0x02, 0xf6, 0x65, - 0x3e, 0x63, 0x08, 0xb3, 0x90, 0x5c, 0xb3, 0x66, 0x60, 0x30, 0x8a, 0x8f, 0x81, 0x94, 0x6d, 0x6b, - 0x56, 0x14, 0x8e, 0x02, 0x64, 0x6b, 0x0e, 0xf3, 0x98, 0x3c, 0x16, 0x1a, 0x52, 0x36, 0x20, 0xc8, - 0x76, 0x2c, 0x2d, 0x99, 0xca, 0x8e, 0x45, 0x01, 0xf1, 0x03, 0xb4, 0xa6, 0x65, 0xbd, 0x3e, 0x78, - 0xc7, 0x49, 0x1c, 0x44, 0x82, 0x54, 0x54, 0xf1, 0x0b, 0x16, 0xe9, 0x76, 0x06, 0x32, 0x34, 0x69, - 0xb3, 0xbe, 0xee, 0x5c, 0x0c, 0x8b, 0x00, 0xdc, 0x42, 0x15, 0xd5, 0xdd, 0x10, 0xd1, 0x6e, 0x08, - 0xe4, 0x6f, 0xeb, 0xae, 0xb6, 0x86, 0xa2, 0xbf, 0xa7, 0x00, 0xd9, 0x9e, 0xd0, 0x2c, 0x84, 0x3b, - 0x48, 0x8d, 0x80, 0xeb, 0x07, 0x5c, 0x71, 0xfc, 0xb3, 0x68, 0xdb, 0x14, 0xc9, 0xd1, 0xd1, 0x88, - 0x6c, 0x53, 0x68, 0x1e, 0xc3, 0xef, 0x18, 0x23, 0x5c, 0x50, 0x31, 0xe4, 0xe4, 0xbf, 0x99, 0x46, - 0xee, 0x29, 0xc0, 0xc4, 0xca, 0xde, 0xd0, 0x8e, 0x74, 0x0e, 0xdf, 0xd5, 0x8e, 0x20, 0x12, 0x81, - 0x47, 0x05, 0x90, 0x7f, 0x35, 0xd9, 0x4b, 0x45, 0xb2, 0x74, 0x3a, 0x5b, 0x63, 0xd0, 0xd4, 0x5a, - 0xa1, 0x1e, 0xef, 0x99, 0x2b, 0x40, 0xde, 0x09, 0x2e, 0xf5, 0x7d, 0xf2, 0xe3, 0xd2, 0xac, 0x25, - 0xbe, 0xc7, 0x81, 0xb5, 0x7c, 0xbf, 0xb0, 0x44, 0x13, 0xc3, 0x77, 0xd1, 0x5a, 0x4e, 0xa3, 0x87, - 0x80, 0xfc, 0xa4, 0x99, 0x9e, 0xb7, 0x33, 0x99, 0xe9, 0x31, 0x64, 0xab, 0xb4, 0x10, 0x2e, 0xda, - 0xea, 0x81, 0x20, 0x3f, 0x9f, 0x69, 0x6b, 0x1f, 0xc4, 0x94, 0xad, 0x7d, 0x10, 0xb8, 0x87, 0x9e, - 0xcd, 0x69, 0xbc, 0xbe, 0x1c, 0x4b, 0x37, 0xa1, 0x9c, 0x3f, 0x8a, 0x99, 0x4f, 0x7e, 0xd1, 0x94, - 0xaf, 0xd8, 0x29, 0xdb, 0x0a, 0x7d, 0x68, 0xc0, 0x29, 0xfb, 0x33, 0xd4, 0x9a, 0xc6, 0x0f, 0xd0, - 0xc6, 0x98, 0x5f, 0x39, 0x4f, 0x2e, 0x8b, 0x43, 0x20, 0x4f, 0xb5, 0xc6, 0xf5, 0x19, 0xb6, 0xd5, - 0x2c, 0xc6, 0x79, 0xdb, 0x5c, 0xa2, 0x93, 0x19, 0xfc, 0x01, 0xba, 0x9c, 0x33, 0xeb, 0xd1, 0xd4, - 0xd4, 0xbf, 0x6a, 0xea, 0x17, 0xed, 0xd4, 0x66, 0x46, 0xc7, 0xb8, 0x31, 0x9d, 0x4a, 0xe1, 0xdb, - 0x68, 0x35, 0x27, 0x0f, 0x03, 0x2e, 0xc8, 0x6f, 0x9a, 0xf5, 0xaa, 0x9d, 0xf5, 0x4e, 0xc0, 0x45, - 0xa1, 0x8f, 0xd2, 0x60, 0xc6, 0x24, 0xad, 0x69, 0xa6, 0xdf, 0x67, 0x32, 0x49, 0xe9, 0x29, 0xa6, - 0x34, 0x98, 0x1d, 0xbd, 0x62, 0x92, 0x1d, 0xf9, 0x55, 0x79, 0xd6, 0xd1, 0xcb, 0x9a, 0xc9, 0x8e, - 0x34, 0xb1, 0xac, 0x23, 0x15, 0x8d, 0xe9, 0xc8, 0xaf, 0xcb, 0xb3, 0x3a, 0x52, 0x56, 0x59, 0x3a, - 0x32, 0x0f, 0x17, 0x6d, 0xc9, 0x8e, 0xfc, 0xe6, 0x4c, 0x5b, 0x93, 0x1d, 0x69, 0x62, 0xf8, 0x21, - 0xaa, 0x8e, 0xd1, 0xa8, 0x46, 0x49, 0x80, 0x0d, 0x02, 0xae, 0xfe, 0x7f, 0xbf, 0xd5, 0x9c, 0x37, - 0x66, 0x70, 0x4a, 0xf8, 0x61, 0x86, 0x4e, 0xf9, 0xaf, 0x50, 0x7b, 0x1e, 0x0f, 0xd0, 0x66, 0xae, - 0x65, 0x5a, 0x67, 0x4c, 0xec, 0x3b, 0x2d, 0xf6, 0xaa, 0x5d, 0x4c, 0x77, 0xc9, 0xb4, 0x1a, 0xa1, - 0x33, 0x00, 0xf8, 0x23, 0xb4, 0xee, 0x85, 0x43, 0x2e, 0x80, 0xb9, 0xe6, 0x2d, 0xe3, 0x72, 0x10, - 0xe4, 0x33, 0x64, 0x46, 0x60, 0xfc, 0x21, 0xd3, 0x68, 0x6b, 0xe4, 0xfb, 0x1a, 0x78, 0x0f, 0xc4, - 0xd4, 0xad, 0x77, 0xc9, 0x9b, 0x84, 0xe0, 0x87, 0xe8, 0x4a, 0xaa, 0xa0, 0xc9, 0x5c, 0x2a, 0x04, - 0x53, 0x2a, 0x9f, 0x23, 0x73, 0x0f, 0xda, 0x54, 0xde, 0x55, 0xb1, 0x96, 0x10, 0xcc, 0x26, 0xb4, - 0xe1, 0x59, 0x50, 0xf8, 0x43, 0x84, 0xfd, 0xf8, 0x51, 0xd4, 0x63, 0xd4, 0x07, 0x37, 0x88, 0x8e, - 0x62, 0x25, 0xf3, 0x85, 0x96, 0xb9, 0x56, 0x94, 0xe9, 0xa4, 0xc0, 0x83, 0xe8, 0x28, 0xb6, 0x49, - 0xac, 0xf9, 0x13, 0x88, 0xfc, 0x31, 0x75, 0x11, 0xad, 0xec, 0x0d, 0x12, 0xf1, 0xd8, 0x01, 0x9e, - 0xc4, 0x11, 0x87, 0xad, 0xc7, 0x68, 0xf3, 0x8c, 0xeb, 0x1b, 0x63, 0x34, 0xaf, 0xde, 0x72, 0x25, - 0xf5, 0x96, 0x53, 0xdf, 0xf2, 0x8d, 0x97, 0xdd, 0x6a, 0xe6, 0x8d, 0x97, 0xfe, 0xc6, 0x57, 0xd1, - 0x32, 0x0f, 0x06, 0x49, 0x08, 0xae, 0x88, 0x8f, 0x41, 0x3f, 0xf1, 0xca, 0x4e, 0x45, 0xc7, 0xee, - 0xcb, 0x50, 0xe6, 0xe5, 0xd6, 0xc6, 0x93, 0x3f, 0x6b, 0xe7, 0x9e, 0x9c, 0xd6, 0x4a, 0x4f, 0x4f, - 0x6b, 0xa5, 0x3f, 0x4e, 0x6b, 0xa5, 0x2f, 0xff, 0xaa, 0x9d, 0xeb, 0x2e, 0xa8, 0x97, 0xe6, 0xee, - 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x30, 0x36, 0x53, 0xc6, 0x0b, 0x0b, 0x00, 0x00, -} - -func (m *RequestHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.AuthRevision != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - i-- - dAtA[i] = 0x18 - } - if len(m.Username) > 0 { - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0x12 - } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.DowngradeInfoSet != nil { - { - size, err := m.DowngradeInfoSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x51 - i-- - dAtA[i] = 0xb2 - } - if m.ClusterMemberAttrSet != nil { - { - size, err := m.ClusterMemberAttrSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x51 - i-- - dAtA[i] = 0xaa - } - if m.ClusterVersionSet != nil { - { - size, err := m.ClusterVersionSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x51 - i-- - dAtA[i] = 0xa2 - } - if m.AuthRoleRevokePermission != nil { - { - size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0xa2 - } - if m.AuthRoleGrantPermission != nil { - { - size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x9a - } - if m.AuthRoleGet != nil { - { - size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x92 - } - if m.AuthRoleDelete != nil { - { - size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x8a - } - if m.AuthRoleAdd != nil { - { - size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x82 - } - if m.AuthRoleList != nil { - { - size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x9a - } - if m.AuthUserList != nil { - { - size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x92 - } - if m.AuthUserRevokeRole != nil { - { - size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x8a - } - if m.AuthUserGrantRole != nil { - { - size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x82 - } - if m.AuthUserChangePassword != nil { - { - size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xfa - } - if m.AuthUserGet != nil { - { - size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xf2 - } - if m.AuthUserDelete != nil { - { - size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xea - } - if m.AuthUserAdd != nil { - { - size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xe2 - } - if m.AuthStatus != nil { - { - size, err := m.AuthStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3f - i-- - dAtA[i] = 0xaa - } - if m.Authenticate != nil { - { - size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3f - i-- - dAtA[i] = 0xa2 - } - if m.AuthDisable != nil { - { - size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3f - i-- - dAtA[i] = 0x9a - } - if m.AuthEnable != nil { - { - size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 - } - if m.LeaseCheckpoint != nil { - { - size, err := m.LeaseCheckpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.Alarm != nil { - { - size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.LeaseRevoke != nil { - { - size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.LeaseGrant != nil { - { - size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Compaction != nil { - { - size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.Txn != nil { - { - size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.DeleteRange != nil { - { - size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Put != nil { - { - size, err := m.Put.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Range != nil { - { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.V2 != nil { - { - size, err := m.V2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SimpleToken) > 0 { - i -= len(m.SimpleToken) - copy(dAtA[i:], m.SimpleToken) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i-- - dAtA[i] = 0x1a - } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - offset -= sovRaftInternal(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RequestHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRevision != 0 { - n += 1 + sovRaftInternal(uint64(m.AuthRevision)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InternalRaftRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - if m.V2 != nil { - l = m.V2.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Put != nil { - l = m.Put.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.DeleteRange != nil { - l = m.DeleteRange.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Txn != nil { - l = m.Txn.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Compaction != nil { - l = m.Compaction.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseGrant != nil { - l = m.LeaseGrant.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseRevoke != nil { - l = m.LeaseRevoke.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Alarm != nil { - l = m.Alarm.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseCheckpoint != nil { - l = m.LeaseCheckpoint.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Header != nil { - l = m.Header.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthEnable != nil { - l = m.AuthEnable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthDisable != nil { - l = m.AuthDisable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.Authenticate != nil { - l = m.Authenticate.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthStatus != nil { - l = m.AuthStatus.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserAdd != nil { - l = m.AuthUserAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserDelete != nil { - l = m.AuthUserDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGet != nil { - l = m.AuthUserGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserChangePassword != nil { - l = m.AuthUserChangePassword.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGrantRole != nil { - l = m.AuthUserGrantRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserRevokeRole != nil { - l = m.AuthUserRevokeRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserList != nil { - l = m.AuthUserList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleList != nil { - l = m.AuthRoleList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleAdd != nil { - l = m.AuthRoleAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleDelete != nil { - l = m.AuthRoleDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGet != nil { - l = m.AuthRoleGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGrantPermission != nil { - l = m.AuthRoleGrantPermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleRevokePermission != nil { - l = m.AuthRoleRevokePermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.ClusterVersionSet != nil { - l = m.ClusterVersionSet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.ClusterMemberAttrSet != nil { - l = m.ClusterMemberAttrSet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.DowngradeInfoSet != nil { - l = m.DowngradeInfoSet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *EmptyResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InternalAuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.SimpleToken) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRaftInternal(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRaftInternal(x uint64) (n int) { - return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) - } - m.AuthRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AuthRevision |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.V2 == nil { - m.V2 = &Request{} - } - if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &RangeRequest{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Put == nil { - m.Put = &PutRequest{} - } - if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteRange == nil { - m.DeleteRange = &DeleteRangeRequest{} - } - if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Txn == nil { - m.Txn = &TxnRequest{} - } - if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Compaction == nil { - m.Compaction = &CompactionRequest{} - } - if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseGrant == nil { - m.LeaseGrant = &LeaseGrantRequest{} - } - if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseRevoke == nil { - m.LeaseRevoke = &LeaseRevokeRequest{} - } - if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Alarm == nil { - m.Alarm = &AlarmRequest{} - } - if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseCheckpoint == nil { - m.LeaseCheckpoint = &LeaseCheckpointRequest{} - } - if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &RequestHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthEnable == nil { - m.AuthEnable = &AuthEnableRequest{} - } - if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1011: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthDisable == nil { - m.AuthDisable = &AuthDisableRequest{} - } - if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1012: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Authenticate == nil { - m.Authenticate = &InternalAuthenticateRequest{} - } - if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1013: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthStatus == nil { - m.AuthStatus = &AuthStatusRequest{} - } - if err := m.AuthStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserAdd == nil { - m.AuthUserAdd = &AuthUserAddRequest{} - } - if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserDelete == nil { - m.AuthUserDelete = &AuthUserDeleteRequest{} - } - if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGet == nil { - m.AuthUserGet = &AuthUserGetRequest{} - } - if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1103: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserChangePassword == nil { - m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} - } - if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1104: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGrantRole == nil { - m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} - } - if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1105: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserRevokeRole == nil { - m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} - } - if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1106: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserList == nil { - m.AuthUserList = &AuthUserListRequest{} - } - if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1107: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleList == nil { - m.AuthRoleList = &AuthRoleListRequest{} - } - if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1200: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleAdd == nil { - m.AuthRoleAdd = &AuthRoleAddRequest{} - } - if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1201: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleDelete == nil { - m.AuthRoleDelete = &AuthRoleDeleteRequest{} - } - if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1202: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGet == nil { - m.AuthRoleGet = &AuthRoleGetRequest{} - } - if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1203: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGrantPermission == nil { - m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} - } - if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1204: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleRevokePermission == nil { - m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} - } - if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1300: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersionSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClusterVersionSet == nil { - m.ClusterVersionSet = &membershippb.ClusterVersionSetRequest{} - } - if err := m.ClusterVersionSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1301: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterMemberAttrSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClusterMemberAttrSet == nil { - m.ClusterMemberAttrSet = &membershippb.ClusterMemberAttrSetRequest{} - } - if err := m.ClusterMemberAttrSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1302: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfoSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DowngradeInfoSet == nil { - m.DowngradeInfoSet = &membershippb.DowngradeInfoSetRequest{} - } - if err := m.DowngradeInfoSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SimpleToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaftInternal(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRaftInternal - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRaftInternal - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/etcdserverpb/raft_internal_stringer_test.go b/api/etcdserverpb/raft_internal_stringer_test.go deleted file mode 100644 index f6280e91351..00000000000 --- a/api/etcdserverpb/raft_internal_stringer_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserverpb_test - -import ( - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -// TestInvalidGoTypeIntPanic tests conditions that caused -// panic: invalid Go type int for field k8s_io.kubernetes.vendor.go_etcd_io.etcd.etcdserver.etcdserverpb.loggablePutRequest.value_size -// See https://github.com/kubernetes/kubernetes/issues/91937 for more details -func TestInvalidGoTypeIntPanic(t *testing.T) { - result := pb.NewLoggablePutRequest(&pb.PutRequest{}).String() - if result != "" { - t.Errorf("Got result: %s, expected empty string", result) - } -} diff --git a/api/etcdserverpb/rpc.pb.go b/api/etcdserverpb/rpc.pb.go deleted file mode 100644 index 0b68fe5a3e1..00000000000 --- a/api/etcdserverpb/rpc.pb.go +++ /dev/null @@ -1,26026 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rpc.proto - -package etcdserverpb - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - authpb "go.etcd.io/etcd/api/v3/authpb" - mvccpb "go.etcd.io/etcd/api/v3/mvccpb" - _ "go.etcd.io/etcd/api/v3/versionpb" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type AlarmType int32 - -const ( - AlarmType_NONE AlarmType = 0 - AlarmType_NOSPACE AlarmType = 1 - AlarmType_CORRUPT AlarmType = 2 -) - -var AlarmType_name = map[int32]string{ - 0: "NONE", - 1: "NOSPACE", - 2: "CORRUPT", -} - -var AlarmType_value = map[string]int32{ - "NONE": 0, - "NOSPACE": 1, - "CORRUPT": 2, -} - -func (x AlarmType) String() string { - return proto.EnumName(AlarmType_name, int32(x)) -} - -func (AlarmType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} - -type RangeRequest_SortOrder int32 - -const ( - RangeRequest_NONE RangeRequest_SortOrder = 0 - RangeRequest_ASCEND RangeRequest_SortOrder = 1 - RangeRequest_DESCEND RangeRequest_SortOrder = 2 -) - -var RangeRequest_SortOrder_name = map[int32]string{ - 0: "NONE", - 1: "ASCEND", - 2: "DESCEND", -} - -var RangeRequest_SortOrder_value = map[string]int32{ - "NONE": 0, - "ASCEND": 1, - "DESCEND": 2, -} - -func (x RangeRequest_SortOrder) String() string { - return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) -} - -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 0} -} - -type RangeRequest_SortTarget int32 - -const ( - RangeRequest_KEY RangeRequest_SortTarget = 0 - RangeRequest_VERSION RangeRequest_SortTarget = 1 - RangeRequest_CREATE RangeRequest_SortTarget = 2 - RangeRequest_MOD RangeRequest_SortTarget = 3 - RangeRequest_VALUE RangeRequest_SortTarget = 4 -) - -var RangeRequest_SortTarget_name = map[int32]string{ - 0: "KEY", - 1: "VERSION", - 2: "CREATE", - 3: "MOD", - 4: "VALUE", -} - -var RangeRequest_SortTarget_value = map[string]int32{ - "KEY": 0, - "VERSION": 1, - "CREATE": 2, - "MOD": 3, - "VALUE": 4, -} - -func (x RangeRequest_SortTarget) String() string { - return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) -} - -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 1} -} - -type Compare_CompareResult int32 - -const ( - Compare_EQUAL Compare_CompareResult = 0 - Compare_GREATER Compare_CompareResult = 1 - Compare_LESS Compare_CompareResult = 2 - Compare_NOT_EQUAL Compare_CompareResult = 3 -) - -var Compare_CompareResult_name = map[int32]string{ - 0: "EQUAL", - 1: "GREATER", - 2: "LESS", - 3: "NOT_EQUAL", -} - -var Compare_CompareResult_value = map[string]int32{ - "EQUAL": 0, - "GREATER": 1, - "LESS": 2, - "NOT_EQUAL": 3, -} - -func (x Compare_CompareResult) String() string { - return proto.EnumName(Compare_CompareResult_name, int32(x)) -} - -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 0} -} - -type Compare_CompareTarget int32 - -const ( - Compare_VERSION Compare_CompareTarget = 0 - Compare_CREATE Compare_CompareTarget = 1 - Compare_MOD Compare_CompareTarget = 2 - Compare_VALUE Compare_CompareTarget = 3 - Compare_LEASE Compare_CompareTarget = 4 -) - -var Compare_CompareTarget_name = map[int32]string{ - 0: "VERSION", - 1: "CREATE", - 2: "MOD", - 3: "VALUE", - 4: "LEASE", -} - -var Compare_CompareTarget_value = map[string]int32{ - "VERSION": 0, - "CREATE": 1, - "MOD": 2, - "VALUE": 3, - "LEASE": 4, -} - -func (x Compare_CompareTarget) String() string { - return proto.EnumName(Compare_CompareTarget_name, int32(x)) -} - -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 1} -} - -type WatchCreateRequest_FilterType int32 - -const ( - // filter out put event. - WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 - // filter out delete event. - WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 -) - -var WatchCreateRequest_FilterType_name = map[int32]string{ - 0: "NOPUT", - 1: "NODELETE", -} - -var WatchCreateRequest_FilterType_value = map[string]int32{ - "NOPUT": 0, - "NODELETE": 1, -} - -func (x WatchCreateRequest_FilterType) String() string { - return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) -} - -func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21, 0} -} - -type AlarmRequest_AlarmAction int32 - -const ( - AlarmRequest_GET AlarmRequest_AlarmAction = 0 - AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 - AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 -) - -var AlarmRequest_AlarmAction_name = map[int32]string{ - 0: "GET", - 1: "ACTIVATE", - 2: "DEACTIVATE", -} - -var AlarmRequest_AlarmAction_value = map[string]int32{ - "GET": 0, - "ACTIVATE": 1, - "DEACTIVATE": 2, -} - -func (x AlarmRequest_AlarmAction) String() string { - return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) -} - -func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54, 0} -} - -type DowngradeRequest_DowngradeAction int32 - -const ( - DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0 - DowngradeRequest_ENABLE DowngradeRequest_DowngradeAction = 1 - DowngradeRequest_CANCEL DowngradeRequest_DowngradeAction = 2 -) - -var DowngradeRequest_DowngradeAction_name = map[int32]string{ - 0: "VALIDATE", - 1: "ENABLE", - 2: "CANCEL", -} - -var DowngradeRequest_DowngradeAction_value = map[string]int32{ - "VALIDATE": 0, - "ENABLE": 1, - "CANCEL": 2, -} - -func (x DowngradeRequest_DowngradeAction) String() string { - return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x)) -} - -func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57, 0} -} - -type ResponseHeader struct { - // cluster_id is the ID of the cluster which sent the response. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // member_id is the ID of the member which sent the response. - MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - // revision is the key-value store revision when the request was applied, and it's - // unset (so 0) in case of calls not interacting with key-value store. - // For watch progress responses, the header.revision indicates progress. All future events - // received in this stream are guaranteed to have a higher revision number than the - // header.revision number. - Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` - // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} -func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseHeader.Merge(m, src) -} -func (m *ResponseHeader) XXX_Size() int { - return m.Size() -} -func (m *ResponseHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo - -func (m *ResponseHeader) GetClusterId() uint64 { - if m != nil { - return m.ClusterId - } - return 0 -} - -func (m *ResponseHeader) GetMemberId() uint64 { - if m != nil { - return m.MemberId - } - return 0 -} - -func (m *ResponseHeader) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *ResponseHeader) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -type RangeRequest struct { - // key is the first key for the range. If range_end is not given, the request only looks up key. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` - // sort_order is the order for returned sorted results. - SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` - // sort_target is the key-value field to use for sorting. - SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` - // keys_only when set returns only the keys and not the values. - KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` - // count_only when set returns only the count of the keys in the range. - CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create revisions will be filtered away. - MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} -} -func (m *RangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeRequest.Merge(m, src) -} -func (m *RangeRequest) XXX_Size() int { - return m.Size() -} -func (m *RangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RangeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RangeRequest proto.InternalMessageInfo - -func (m *RangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *RangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *RangeRequest) GetLimit() int64 { - if m != nil { - return m.Limit - } - return 0 -} - -func (m *RangeRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { - if m != nil { - return m.SortOrder - } - return RangeRequest_NONE -} - -func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { - if m != nil { - return m.SortTarget - } - return RangeRequest_KEY -} - -func (m *RangeRequest) GetSerializable() bool { - if m != nil { - return m.Serializable - } - return false -} - -func (m *RangeRequest) GetKeysOnly() bool { - if m != nil { - return m.KeysOnly - } - return false -} - -func (m *RangeRequest) GetCountOnly() bool { - if m != nil { - return m.CountOnly - } - return false -} - -func (m *RangeRequest) GetMinModRevision() int64 { - if m != nil { - return m.MinModRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxModRevision() int64 { - if m != nil { - return m.MaxModRevision - } - return 0 -} - -func (m *RangeRequest) GetMinCreateRevision() int64 { - if m != nil { - return m.MinCreateRevision - } - return 0 -} - -func (m *RangeRequest) GetMaxCreateRevision() int64 { - if m != nil { - return m.MaxCreateRevision - } - return 0 -} - -type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"` - // more indicates if there are more keys to return in the requested range. - More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` - // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{2} -} -func (m *RangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeResponse.Merge(m, src) -} -func (m *RangeResponse) XXX_Size() int { - return m.Size() -} -func (m *RangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RangeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RangeResponse proto.InternalMessageInfo - -func (m *RangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { - if m != nil { - return m.Kvs - } - return nil -} - -func (m *RangeResponse) GetMore() bool { - if m != nil { - return m.More - } - return false -} - -func (m *RangeResponse) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -type PutRequest struct { - // key is the key, in bytes, to put into the key-value store. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // value is the value, in bytes, to associate with the key in the key-value store. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{3} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(m, src) -} -func (m *PutRequest) XXX_Size() int { - return m.Size() -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutRequest proto.InternalMessageInfo - -func (m *PutRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *PutRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *PutRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -func (m *PutRequest) GetIgnoreValue() bool { - if m != nil { - return m.IgnoreValue - } - return false -} - -func (m *PutRequest) GetIgnoreLease() bool { - if m != nil { - return m.IgnoreLease - } - return false -} - -type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{4} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(m, src) -} -func (m *PutResponse) XXX_Size() int { - return m.Size() -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PutResponse proto.InternalMessageInfo - -func (m *PutResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { - if m != nil { - return m.PrevKv - } - return nil -} - -type DeleteRangeRequest struct { - // key is the first key to delete in the range. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{5} -} -func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeRequest.Merge(m, src) -} -func (m *DeleteRangeRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo - -func (m *DeleteRangeRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRangeRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *DeleteRangeRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // deleted is the number of keys deleted by the delete range request. - Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` - // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{6} -} -func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeResponse.Merge(m, src) -} -func (m *DeleteRangeResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo - -func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRangeResponse) GetDeleted() int64 { - if m != nil { - return m.Deleted - } - return 0 -} - -func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { - if m != nil { - return m.PrevKvs - } - return nil -} - -type RequestOp struct { - // request is a union of request types accepted by a transaction. - // - // Types that are valid to be assigned to Request: - // *RequestOp_RequestRange - // *RequestOp_RequestPut - // *RequestOp_RequestDeleteRange - // *RequestOp_RequestTxn - Request isRequestOp_Request `protobuf_oneof:"request"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} -} -func (m *RequestOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestOp.Merge(m, src) -} -func (m *RequestOp) XXX_Size() int { - return m.Size() -} -func (m *RequestOp) XXX_DiscardUnknown() { - xxx_messageInfo_RequestOp.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestOp proto.InternalMessageInfo - -type isRequestOp_Request interface { - isRequestOp_Request() - MarshalTo([]byte) (int, error) - Size() int -} - -type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"` -} -type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"` -} -type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"` -} -type RequestOp_RequestTxn struct { - RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"` -} - -func (*RequestOp_RequestRange) isRequestOp_Request() {} -func (*RequestOp_RequestPut) isRequestOp_Request() {} -func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} -func (*RequestOp_RequestTxn) isRequestOp_Request() {} - -func (m *RequestOp) GetRequest() isRequestOp_Request { - if m != nil { - return m.Request - } - return nil -} - -func (m *RequestOp) GetRequestRange() *RangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { - return x.RequestRange - } - return nil -} - -func (m *RequestOp) GetRequestPut() *PutRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { - return x.RequestPut - } - return nil -} - -func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { - return x.RequestDeleteRange - } - return nil -} - -func (m *RequestOp) GetRequestTxn() *TxnRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { - return x.RequestTxn - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RequestOp) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RequestOp_RequestRange)(nil), - (*RequestOp_RequestPut)(nil), - (*RequestOp_RequestDeleteRange)(nil), - (*RequestOp_RequestTxn)(nil), - } -} - -type ResponseOp struct { - // response is a union of response types returned by a transaction. - // - // Types that are valid to be assigned to Response: - // *ResponseOp_ResponseRange - // *ResponseOp_ResponsePut - // *ResponseOp_ResponseDeleteRange - // *ResponseOp_ResponseTxn - Response isResponseOp_Response `protobuf_oneof:"response"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{8} -} -func (m *ResponseOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseOp.Merge(m, src) -} -func (m *ResponseOp) XXX_Size() int { - return m.Size() -} -func (m *ResponseOp) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseOp.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseOp proto.InternalMessageInfo - -type isResponseOp_Response interface { - isResponseOp_Response() - MarshalTo([]byte) (int, error) - Size() int -} - -type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"` -} -type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"` -} -type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"` -} -type ResponseOp_ResponseTxn struct { - ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"` -} - -func (*ResponseOp_ResponseRange) isResponseOp_Response() {} -func (*ResponseOp_ResponsePut) isResponseOp_Response() {} -func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} -func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} - -func (m *ResponseOp) GetResponse() isResponseOp_Response { - if m != nil { - return m.Response - } - return nil -} - -func (m *ResponseOp) GetResponseRange() *RangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { - return x.ResponseRange - } - return nil -} - -func (m *ResponseOp) GetResponsePut() *PutResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { - return x.ResponsePut - } - return nil -} - -func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { - return x.ResponseDeleteRange - } - return nil -} - -func (m *ResponseOp) GetResponseTxn() *TxnResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { - return x.ResponseTxn - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ResponseOp) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ResponseOp_ResponseRange)(nil), - (*ResponseOp_ResponsePut)(nil), - (*ResponseOp_ResponseDeleteRange)(nil), - (*ResponseOp_ResponseTxn)(nil), - } -} - -type Compare struct { - // result is logical comparison operation for this comparison. - Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` - // target is the key-value field to inspect for the comparison. - Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` - // key is the subject key for the comparison operation. - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - // Types that are valid to be assigned to TargetUnion: - // *Compare_Version - // *Compare_CreateRevision - // *Compare_ModRevision - // *Compare_Value - // *Compare_Lease - TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9} -} -func (m *Compare) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Compare.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Compare) XXX_Merge(src proto.Message) { - xxx_messageInfo_Compare.Merge(m, src) -} -func (m *Compare) XXX_Size() int { - return m.Size() -} -func (m *Compare) XXX_DiscardUnknown() { - xxx_messageInfo_Compare.DiscardUnknown(m) -} - -var xxx_messageInfo_Compare proto.InternalMessageInfo - -type isCompare_TargetUnion interface { - isCompare_TargetUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type Compare_Version struct { - Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"` -} -type Compare_CreateRevision struct { - CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"` -} -type Compare_ModRevision struct { - ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"` -} -type Compare_Value struct { - Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"` -} -type Compare_Lease struct { - Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"` -} - -func (*Compare_Version) isCompare_TargetUnion() {} -func (*Compare_CreateRevision) isCompare_TargetUnion() {} -func (*Compare_ModRevision) isCompare_TargetUnion() {} -func (*Compare_Value) isCompare_TargetUnion() {} -func (*Compare_Lease) isCompare_TargetUnion() {} - -func (m *Compare) GetTargetUnion() isCompare_TargetUnion { - if m != nil { - return m.TargetUnion - } - return nil -} - -func (m *Compare) GetResult() Compare_CompareResult { - if m != nil { - return m.Result - } - return Compare_EQUAL -} - -func (m *Compare) GetTarget() Compare_CompareTarget { - if m != nil { - return m.Target - } - return Compare_VERSION -} - -func (m *Compare) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Compare) GetVersion() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Version); ok { - return x.Version - } - return 0 -} - -func (m *Compare) GetCreateRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { - return x.CreateRevision - } - return 0 -} - -func (m *Compare) GetModRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { - return x.ModRevision - } - return 0 -} - -func (m *Compare) GetValue() []byte { - if x, ok := m.GetTargetUnion().(*Compare_Value); ok { - return x.Value - } - return nil -} - -func (m *Compare) GetLease() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { - return x.Lease - } - return 0 -} - -func (m *Compare) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Compare) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Compare_Version)(nil), - (*Compare_CreateRevision)(nil), - (*Compare_ModRevision)(nil), - (*Compare_Value)(nil), - (*Compare_Lease)(nil), - } -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -type TxnRequest struct { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"` - // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"` - // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{10} -} -func (m *TxnRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnRequest.Merge(m, src) -} -func (m *TxnRequest) XXX_Size() int { - return m.Size() -} -func (m *TxnRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TxnRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TxnRequest proto.InternalMessageInfo - -func (m *TxnRequest) GetCompare() []*Compare { - if m != nil { - return m.Compare - } - return nil -} - -func (m *TxnRequest) GetSuccess() []*RequestOp { - if m != nil { - return m.Success - } - return nil -} - -func (m *TxnRequest) GetFailure() []*RequestOp { - if m != nil { - return m.Failure - } - return nil -} - -type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // succeeded is set to true if the compare evaluated to true or false otherwise. - Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{11} -} -func (m *TxnResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnResponse.Merge(m, src) -} -func (m *TxnResponse) XXX_Size() int { - return m.Size() -} -func (m *TxnResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TxnResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TxnResponse proto.InternalMessageInfo - -func (m *TxnResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TxnResponse) GetSucceeded() bool { - if m != nil { - return m.Succeeded - } - return false -} - -func (m *TxnResponse) GetResponses() []*ResponseOp { - if m != nil { - return m.Responses - } - return nil -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -type CompactionRequest struct { - // revision is the key-value store revision for the compaction operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{12} -} -func (m *CompactionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionRequest.Merge(m, src) -} -func (m *CompactionRequest) XXX_Size() int { - return m.Size() -} -func (m *CompactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo - -func (m *CompactionRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -func (m *CompactionRequest) GetPhysical() bool { - if m != nil { - return m.Physical - } - return false -} - -type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{13} -} -func (m *CompactionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionResponse.Merge(m, src) -} -func (m *CompactionResponse) XXX_Size() int { - return m.Size() -} -func (m *CompactionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo - -func (m *CompactionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type HashRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{14} -} -func (m *HashRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashRequest.Merge(m, src) -} -func (m *HashRequest) XXX_Size() int { - return m.Size() -} -func (m *HashRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_HashRequest proto.InternalMessageInfo - -type HashKVRequest struct { - // revision is the key-value store revision for the hash operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } -func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } -func (*HashKVRequest) ProtoMessage() {} -func (*HashKVRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{15} -} -func (m *HashKVRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVRequest.Merge(m, src) -} -func (m *HashKVRequest) XXX_Size() int { - return m.Size() -} -func (m *HashKVRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo - -func (m *HashKVRequest) GetRevision() int64 { - if m != nil { - return m.Revision - } - return 0 -} - -type HashKVResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - // hash_revision is the revision up to which the hash is calculated. - HashRevision int64 `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } -func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } -func (*HashKVResponse) ProtoMessage() {} -func (*HashKVResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{16} -} -func (m *HashKVResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVResponse.Merge(m, src) -} -func (m *HashKVResponse) XXX_Size() int { - return m.Size() -} -func (m *HashKVResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo - -func (m *HashKVResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashKVResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -func (m *HashKVResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -func (m *HashKVResponse) GetHashRevision() int64 { - if m != nil { - return m.HashRevision - } - return 0 -} - -type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // hash is the hash value computed from the responding member's KV's backend. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{17} -} -func (m *HashResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashResponse.Merge(m, src) -} -func (m *HashResponse) XXX_Size() int { - return m.Size() -} -func (m *HashResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_HashResponse proto.InternalMessageInfo - -func (m *HashResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *HashResponse) GetHash() uint32 { - if m != nil { - return m.Hash - } - return 0 -} - -type SnapshotRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{18} -} -func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotRequest.Merge(m, src) -} -func (m *SnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *SnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo - -type SnapshotResponse struct { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // remaining_bytes is the number of blob bytes to be sent after this message - RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` - // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` - // local version of server that created the snapshot. - // In cluster with binaries with different version, each cluster can return different result. - // Informs which etcd server version should be used when restoring the snapshot. - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{19} -} -func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotResponse.Merge(m, src) -} -func (m *SnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *SnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo - -func (m *SnapshotResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *SnapshotResponse) GetRemainingBytes() uint64 { - if m != nil { - return m.RemainingBytes - } - return 0 -} - -func (m *SnapshotResponse) GetBlob() []byte { - if m != nil { - return m.Blob - } - return nil -} - -func (m *SnapshotResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -type WatchRequest struct { - // request_union is a request to either create a new watcher or cancel an existing watcher. - // - // Types that are valid to be assigned to RequestUnion: - // *WatchRequest_CreateRequest - // *WatchRequest_CancelRequest - // *WatchRequest_ProgressRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{20} -} -func (m *WatchRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchRequest.Merge(m, src) -} -func (m *WatchRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchRequest proto.InternalMessageInfo - -type isWatchRequest_RequestUnion interface { - isWatchRequest_RequestUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"` -} -type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"` -} -type WatchRequest_ProgressRequest struct { - ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"` -} - -func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} -func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} -func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {} - -func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { - if m != nil { - return m.RequestUnion - } - return nil -} - -func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { - return x.CreateRequest - } - return nil -} - -func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { - return x.CancelRequest - } - return nil -} - -func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok { - return x.ProgressRequest - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*WatchRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*WatchRequest_CreateRequest)(nil), - (*WatchRequest_CancelRequest)(nil), - (*WatchRequest_ProgressRequest)(nil), - } -} - -type WatchCreateRequest struct { - // key is the key to register for watching. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` - // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - // If watch_id is provided and non-zero, it will be assigned to this watcher. - // Since creating a watcher in etcd is not a synchronous operation, - // this can be used ensure that ordering is correct when creating multiple - // watchers on the same stream. Creating a watcher with an ID already in - // use on the stream will cause an error to be returned. - WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - // fragment enables splitting large revisions into multiple watch responses. - Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21} -} -func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCreateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCreateRequest.Merge(m, src) -} -func (m *WatchCreateRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCreateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo - -func (m *WatchCreateRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *WatchCreateRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -func (m *WatchCreateRequest) GetStartRevision() int64 { - if m != nil { - return m.StartRevision - } - return 0 -} - -func (m *WatchCreateRequest) GetProgressNotify() bool { - if m != nil { - return m.ProgressNotify - } - return false -} - -func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { - if m != nil { - return m.Filters - } - return nil -} - -func (m *WatchCreateRequest) GetPrevKv() bool { - if m != nil { - return m.PrevKv - } - return false -} - -func (m *WatchCreateRequest) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -func (m *WatchCreateRequest) GetFragment() bool { - if m != nil { - return m.Fragment - } - return false -} - -type WatchCancelRequest struct { - // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{22} -} -func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCancelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCancelRequest.Merge(m, src) -} -func (m *WatchCancelRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCancelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo - -func (m *WatchCancelRequest) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -// Requests the a watch stream progress status be sent in the watch response stream as soon as -// possible. -type WatchProgressRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } -func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } -func (*WatchProgressRequest) ProtoMessage() {} -func (*WatchProgressRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{23} -} -func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchProgressRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchProgressRequest.Merge(m, src) -} -func (m *WatchProgressRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchProgressRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo - -type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // watch_id is the ID of the watcher that corresponds to the response. - WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - // cancel_reason indicates the reason for canceling the watcher. - CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` - // framgment is true if large watch response was split over multiple responses. - Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{24} -} -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) -} -func (m *WatchResponse) XXX_Size() int { - return m.Size() -} -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo - -func (m *WatchResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *WatchResponse) GetWatchId() int64 { - if m != nil { - return m.WatchId - } - return 0 -} - -func (m *WatchResponse) GetCreated() bool { - if m != nil { - return m.Created - } - return false -} - -func (m *WatchResponse) GetCanceled() bool { - if m != nil { - return m.Canceled - } - return false -} - -func (m *WatchResponse) GetCompactRevision() int64 { - if m != nil { - return m.CompactRevision - } - return 0 -} - -func (m *WatchResponse) GetCancelReason() string { - if m != nil { - return m.CancelReason - } - return "" -} - -func (m *WatchResponse) GetFragment() bool { - if m != nil { - return m.Fragment - } - return false -} - -func (m *WatchResponse) GetEvents() []*mvccpb.Event { - if m != nil { - return m.Events - } - return nil -} - -type LeaseGrantRequest struct { - // TTL is the advisory time-to-live in seconds. Expired lease will return -1. - TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{25} -} -func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantRequest.Merge(m, src) -} -func (m *LeaseGrantRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo - -func (m *LeaseGrantRequest) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // ID is the lease ID for the granted lease. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{26} -} -func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantResponse.Merge(m, src) -} -func (m *LeaseGrantResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo - -func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseGrantResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseGrantResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseGrantResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type LeaseRevokeRequest struct { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{27} -} -func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeRequest.Merge(m, src) -} -func (m *LeaseRevokeRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseRevokeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo - -func (m *LeaseRevokeRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{28} -} -func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeResponse.Merge(m, src) -} -func (m *LeaseRevokeResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseRevokeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo - -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseCheckpoint struct { - // ID is the lease ID to checkpoint. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // Remaining_TTL is the remaining time until expiry of the lease. - Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } -func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } -func (*LeaseCheckpoint) ProtoMessage() {} -func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{29} -} -func (m *LeaseCheckpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseCheckpoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseCheckpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseCheckpoint.Merge(m, src) -} -func (m *LeaseCheckpoint) XXX_Size() int { - return m.Size() -} -func (m *LeaseCheckpoint) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseCheckpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseCheckpoint proto.InternalMessageInfo - -func (m *LeaseCheckpoint) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { - if m != nil { - return m.Remaining_TTL - } - return 0 -} - -type LeaseCheckpointRequest struct { - Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } -func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseCheckpointRequest) ProtoMessage() {} -func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{30} -} -func (m *LeaseCheckpointRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseCheckpointRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseCheckpointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseCheckpointRequest.Merge(m, src) -} -func (m *LeaseCheckpointRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseCheckpointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseCheckpointRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseCheckpointRequest proto.InternalMessageInfo - -func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { - if m != nil { - return m.Checkpoints - } - return nil -} - -type LeaseCheckpointResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } -func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseCheckpointResponse) ProtoMessage() {} -func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{31} -} -func (m *LeaseCheckpointResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseCheckpointResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseCheckpointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseCheckpointResponse.Merge(m, src) -} -func (m *LeaseCheckpointResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseCheckpointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseCheckpointResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseCheckpointResponse proto.InternalMessageInfo - -func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseKeepAliveRequest struct { - // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{32} -} -func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src) -} -func (m *LeaseKeepAliveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo - -func (m *LeaseKeepAliveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{33} -} -func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src) -} -func (m *LeaseKeepAliveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo - -func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseKeepAliveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseKeepAliveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -type LeaseTimeToLiveRequest struct { - // ID is the lease ID for the lease. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{34} -} -func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src) -} -func (m *LeaseTimeToLiveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo - -func (m *LeaseTimeToLiveRequest) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveRequest) GetKeys() bool { - if m != nil { - return m.Keys - } - return false -} - -type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` - // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{35} -} -func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src) -} -func (m *LeaseTimeToLiveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo - -func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseTimeToLiveResponse) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetTTL() int64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { - if m != nil { - return m.GrantedTTL - } - return 0 -} - -func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { - if m != nil { - return m.Keys - } - return nil -} - -type LeaseLeasesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } -func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesRequest) ProtoMessage() {} -func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{36} -} -func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesRequest.Merge(m, src) -} -func (m *LeaseLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo - -type LeaseStatus struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } -func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } -func (*LeaseStatus) ProtoMessage() {} -func (*LeaseStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{37} -} -func (m *LeaseStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseStatus.Merge(m, src) -} -func (m *LeaseStatus) XXX_Size() int { - return m.Size() -} -func (m *LeaseStatus) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo - -func (m *LeaseStatus) GetID() int64 { - if m != nil { - return m.ID - } - return 0 -} - -type LeaseLeasesResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } -func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesResponse) ProtoMessage() {} -func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{38} -} -func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesResponse.Merge(m, src) -} -func (m *LeaseLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo - -func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { - if m != nil { - return m.Leases - } - return nil -} - -type Member struct { - // ID is the member ID for this member. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` - // isLearner indicates if the member is raft learner. - IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{39} -} -func (m *Member) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Member.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Member) XXX_Merge(src proto.Message) { - xxx_messageInfo_Member.Merge(m, src) -} -func (m *Member) XXX_Size() int { - return m.Size() -} -func (m *Member) XXX_DiscardUnknown() { - xxx_messageInfo_Member.DiscardUnknown(m) -} - -var xxx_messageInfo_Member proto.InternalMessageInfo - -func (m *Member) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Member) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Member) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -func (m *Member) GetClientURLs() []string { - if m != nil { - return m.ClientURLs - } - return nil -} - -func (m *Member) GetIsLearner() bool { - if m != nil { - return m.IsLearner - } - return false -} - -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - // isLearner indicates if the added member is raft learner. - IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{40} -} -func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddRequest.Merge(m, src) -} -func (m *MemberAddRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo - -func (m *MemberAddRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -func (m *MemberAddRequest) GetIsLearner() bool { - if m != nil { - return m.IsLearner - } - return false -} - -type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"` - // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{41} -} -func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddResponse.Merge(m, src) -} -func (m *MemberAddResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo - -func (m *MemberAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberAddResponse) GetMember() *Member { - if m != nil { - return m.Member - } - return nil -} - -func (m *MemberAddResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberRemoveRequest struct { - // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{42} -} -func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveRequest.Merge(m, src) -} -func (m *MemberRemoveRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo - -func (m *MemberRemoveRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{43} -} -func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveResponse.Merge(m, src) -} -func (m *MemberRemoveResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo - -func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberRemoveResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberUpdateRequest struct { - // ID is the member ID of the member to update. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{44} -} -func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateRequest.Merge(m, src) -} -func (m *MemberUpdateRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo - -func (m *MemberUpdateRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *MemberUpdateRequest) GetPeerURLs() []string { - if m != nil { - return m.PeerURLs - } - return nil -} - -type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{45} -} -func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateResponse.Merge(m, src) -} -func (m *MemberUpdateResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo - -func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberUpdateResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberListRequest struct { - Linearizable bool `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{46} -} -func (m *MemberListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListRequest.Merge(m, src) -} -func (m *MemberListRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo - -func (m *MemberListRequest) GetLinearizable() bool { - if m != nil { - return m.Linearizable - } - return false -} - -type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{47} -} -func (m *MemberListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListResponse.Merge(m, src) -} -func (m *MemberListResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo - -func (m *MemberListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberListResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type MemberPromoteRequest struct { - // ID is the member ID of the member to promote. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } -func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } -func (*MemberPromoteRequest) ProtoMessage() {} -func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{48} -} -func (m *MemberPromoteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberPromoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberPromoteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberPromoteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberPromoteRequest.Merge(m, src) -} -func (m *MemberPromoteRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberPromoteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberPromoteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberPromoteRequest proto.InternalMessageInfo - -func (m *MemberPromoteRequest) GetID() uint64 { - if m != nil { - return m.ID - } - return 0 -} - -type MemberPromoteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // members is a list of all members after promoting the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } -func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } -func (*MemberPromoteResponse) ProtoMessage() {} -func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49} -} -func (m *MemberPromoteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberPromoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberPromoteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberPromoteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberPromoteResponse.Merge(m, src) -} -func (m *MemberPromoteResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberPromoteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberPromoteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MemberPromoteResponse proto.InternalMessageInfo - -func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberPromoteResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type DefragmentRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{50} -} -func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DefragmentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentRequest.Merge(m, src) -} -func (m *DefragmentRequest) XXX_Size() int { - return m.Size() -} -func (m *DefragmentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo - -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{51} -} -func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DefragmentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentResponse.Merge(m, src) -} -func (m *DefragmentResponse) XXX_Size() int { - return m.Size() -} -func (m *DefragmentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo - -func (m *DefragmentResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type MoveLeaderRequest struct { - // targetID is the node ID for the new leader. - TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } -func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderRequest) ProtoMessage() {} -func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{52} -} -func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderRequest.Merge(m, src) -} -func (m *MoveLeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo - -func (m *MoveLeaderRequest) GetTargetID() uint64 { - if m != nil { - return m.TargetID - } - return 0 -} - -type MoveLeaderResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } -func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderResponse) ProtoMessage() {} -func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{53} -} -func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderResponse.Merge(m, src) -} -func (m *MoveLeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo - -func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AlarmRequest struct { - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54} -} -func (m *AlarmRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmRequest.Merge(m, src) -} -func (m *AlarmRequest) XXX_Size() int { - return m.Size() -} -func (m *AlarmRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo - -func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { - if m != nil { - return m.Action - } - return AlarmRequest_GET -} - -func (m *AlarmRequest) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmRequest) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmMember struct { - // memberID is the ID of the member associated with the raised alarm. - MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{55} -} -func (m *AlarmMember) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmMember) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmMember.Merge(m, src) -} -func (m *AlarmMember) XXX_Size() int { - return m.Size() -} -func (m *AlarmMember) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmMember.DiscardUnknown(m) -} - -var xxx_messageInfo_AlarmMember proto.InternalMessageInfo - -func (m *AlarmMember) GetMemberID() uint64 { - if m != nil { - return m.MemberID - } - return 0 -} - -func (m *AlarmMember) GetAlarm() AlarmType { - if m != nil { - return m.Alarm - } - return AlarmType_NONE -} - -type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{56} -} -func (m *AlarmResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmResponse.Merge(m, src) -} -func (m *AlarmResponse) XXX_Size() int { - return m.Size() -} -func (m *AlarmResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo - -func (m *AlarmResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AlarmResponse) GetAlarms() []*AlarmMember { - if m != nil { - return m.Alarms - } - return nil -} - -type DowngradeRequest struct { - // action is the kind of downgrade request to issue. The action may - // VALIDATE the target version, DOWNGRADE the cluster version, - // or CANCEL the current downgrading job. - Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"` - // version is the target version to downgrade. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DowngradeRequest) Reset() { *m = DowngradeRequest{} } -func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) } -func (*DowngradeRequest) ProtoMessage() {} -func (*DowngradeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57} -} -func (m *DowngradeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DowngradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DowngradeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DowngradeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DowngradeRequest.Merge(m, src) -} -func (m *DowngradeRequest) XXX_Size() int { - return m.Size() -} -func (m *DowngradeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DowngradeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DowngradeRequest proto.InternalMessageInfo - -func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction { - if m != nil { - return m.Action - } - return DowngradeRequest_VALIDATE -} - -func (m *DowngradeRequest) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -type DowngradeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // version is the current cluster version. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DowngradeResponse) Reset() { *m = DowngradeResponse{} } -func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) } -func (*DowngradeResponse) ProtoMessage() {} -func (*DowngradeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{58} -} -func (m *DowngradeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DowngradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DowngradeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DowngradeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DowngradeResponse.Merge(m, src) -} -func (m *DowngradeResponse) XXX_Size() int { - return m.Size() -} -func (m *DowngradeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DowngradeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DowngradeResponse proto.InternalMessageInfo - -func (m *DowngradeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DowngradeResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -type StatusRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{59} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo - -type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // version is the cluster protocol version used by the responding member. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. - DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` - // leader is the member ID which the responding member believes is the current leader. - Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft committed index of the responding member. - RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` - // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` - // raftAppliedIndex is the current raft applied index of the responding member. - RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` - // errors contains alarm/health information and status. - Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"` - // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. - DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` - // isLearner indicates if the member is raft learner. - IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` - // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version. - StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{60} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo - -func (m *StatusResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *StatusResponse) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *StatusResponse) GetDbSize() int64 { - if m != nil { - return m.DbSize - } - return 0 -} - -func (m *StatusResponse) GetLeader() uint64 { - if m != nil { - return m.Leader - } - return 0 -} - -func (m *StatusResponse) GetRaftIndex() uint64 { - if m != nil { - return m.RaftIndex - } - return 0 -} - -func (m *StatusResponse) GetRaftTerm() uint64 { - if m != nil { - return m.RaftTerm - } - return 0 -} - -func (m *StatusResponse) GetRaftAppliedIndex() uint64 { - if m != nil { - return m.RaftAppliedIndex - } - return 0 -} - -func (m *StatusResponse) GetErrors() []string { - if m != nil { - return m.Errors - } - return nil -} - -func (m *StatusResponse) GetDbSizeInUse() int64 { - if m != nil { - return m.DbSizeInUse - } - return 0 -} - -func (m *StatusResponse) GetIsLearner() bool { - if m != nil { - return m.IsLearner - } - return false -} - -func (m *StatusResponse) GetStorageVersion() string { - if m != nil { - return m.StorageVersion - } - return "" -} - -type AuthEnableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{61} -} -func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthEnableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableRequest.Merge(m, src) -} -func (m *AuthEnableRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo - -type AuthDisableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62} -} -func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthDisableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableRequest.Merge(m, src) -} -func (m *AuthDisableRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthDisableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo - -type AuthStatusRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthStatusRequest) Reset() { *m = AuthStatusRequest{} } -func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) } -func (*AuthStatusRequest) ProtoMessage() {} -func (*AuthStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{63} -} -func (m *AuthStatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthStatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthStatusRequest.Merge(m, src) -} -func (m *AuthStatusRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthStatusRequest proto.InternalMessageInfo - -type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64} -} -func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateRequest.Merge(m, src) -} -func (m *AuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo - -func (m *AuthenticateRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthenticateRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - HashedPassword string `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{65} -} -func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddRequest.Merge(m, src) -} -func (m *AuthUserAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo - -func (m *AuthUserAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserAddRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *AuthUserAddRequest) GetHashedPassword() string { - if m != nil { - return m.HashedPassword - } - return "" -} - -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{66} -} -func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetRequest.Merge(m, src) -} -func (m *AuthUserGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo - -func (m *AuthUserGetRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserDeleteRequest struct { - // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{67} -} -func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src) -} -func (m *AuthUserDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo - -func (m *AuthUserDeleteRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthUserChangePasswordRequest struct { - // name is the name of the user whose password is being changed. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // password is the new password for the user. Note that this field will be removed in the API layer. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. - HashedPassword string `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } -func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordRequest) ProtoMessage() {} -func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{68} -} -func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src) -} -func (m *AuthUserChangePasswordRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo - -func (m *AuthUserChangePasswordRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserChangePasswordRequest) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -func (m *AuthUserChangePasswordRequest) GetHashedPassword() string { - if m != nil { - return m.HashedPassword - } - return "" -} - -type AuthUserGrantRoleRequest struct { - // user is the name of the user which should be granted a given role. - User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{69} -} -func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src) -} -func (m *AuthUserGrantRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo - -func (m *AuthUserGrantRoleRequest) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *AuthUserGrantRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{70} -} -func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src) -} -func (m *AuthUserRevokeRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo - -func (m *AuthUserRevokeRoleRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthUserRevokeRoleRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleAddRequest struct { - // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{71} -} -func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddRequest.Merge(m, src) -} -func (m *AuthRoleAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo - -func (m *AuthRoleAddRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72} -} -func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetRequest.Merge(m, src) -} -func (m *AuthRoleGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo - -func (m *AuthRoleGetRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthUserListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{73} -} -func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListRequest.Merge(m, src) -} -func (m *AuthUserListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo - -type AuthRoleListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74} -} -func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListRequest.Merge(m, src) -} -func (m *AuthRoleListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo - -type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{75} -} -func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src) -} -func (m *AuthRoleDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo - -func (m *AuthRoleDeleteRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -type AuthRoleGrantPermissionRequest struct { - // name is the name of the role which will be granted the permission. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } -func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} -func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{76} -} -func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo - -func (m *AuthRoleGrantPermissionRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } -func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} -func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{77} -} -func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo - -func (m *AuthRoleRevokePermissionRequest) GetRole() string { - if m != nil { - return m.Role - } - return "" -} - -func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { - if m != nil { - return m.RangeEnd - } - return nil -} - -type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{78} -} -func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthEnableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableResponse.Merge(m, src) -} -func (m *AuthEnableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo - -func (m *AuthEnableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{79} -} -func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthDisableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableResponse.Merge(m, src) -} -func (m *AuthDisableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthDisableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo - -func (m *AuthDisableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthStatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` - // authRevision is the current revision of auth store - AuthRevision uint64 `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthStatusResponse) Reset() { *m = AuthStatusResponse{} } -func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) } -func (*AuthStatusResponse) ProtoMessage() {} -func (*AuthStatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{80} -} -func (m *AuthStatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthStatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthStatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthStatusResponse.Merge(m, src) -} -func (m *AuthStatusResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthStatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthStatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthStatusResponse proto.InternalMessageInfo - -func (m *AuthStatusResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthStatusResponse) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *AuthStatusResponse) GetAuthRevision() uint64 { - if m != nil { - return m.AuthRevision - } - return 0 -} - -type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{81} -} -func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateResponse.Merge(m, src) -} -func (m *AuthenticateResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo - -func (m *AuthenticateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthenticateResponse) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - -type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{82} -} -func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddResponse.Merge(m, src) -} -func (m *AuthUserAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo - -func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{83} -} -func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetResponse.Merge(m, src) -} -func (m *AuthUserGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo - -func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserGetResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{84} -} -func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src) -} -func (m *AuthUserDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo - -func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } -func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordResponse) ProtoMessage() {} -func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{85} -} -func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src) -} -func (m *AuthUserChangePasswordResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo - -func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{86} -} -func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src) -} -func (m *AuthUserGrantRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo - -func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{87} -} -func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src) -} -func (m *AuthUserRevokeRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo - -func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{88} -} -func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddResponse.Merge(m, src) -} -func (m *AuthRoleAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo - -func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{89} -} -func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetResponse.Merge(m, src) -} -func (m *AuthRoleGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo - -func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{90} -} -func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListResponse.Merge(m, src) -} -func (m *AuthRoleListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo - -func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleListResponse) GetRoles() []string { - if m != nil { - return m.Roles - } - return nil -} - -type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{91} -} -func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListResponse.Merge(m, src) -} -func (m *AuthUserListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo - -func (m *AuthUserListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthUserListResponse) GetUsers() []string { - if m != nil { - return m.Users - } - return nil -} - -type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{92} -} -func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src) -} -func (m *AuthRoleDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo - -func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } -func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} -func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{93} -} -func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo - -func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } -func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} -func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{94} -} -func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo - -func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) - proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value) - proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") - proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") - proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") - proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") - proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") - proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") - proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") - proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") - proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") - proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") - proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") - proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") - proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") - proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") - proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") - proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") - proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") - proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") - proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") - proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") - proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") - proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") - proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") - proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest") - proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") - proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") - proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") - proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") - proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") - proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") - proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") - proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") - proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") - proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") - proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") - proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") - proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") - proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") - proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") - proto.RegisterType((*Member)(nil), "etcdserverpb.Member") - proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") - proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") - proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") - proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") - proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") - proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") - proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") - proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") - proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") - proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") - proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") - proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") - proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") - proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") - proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") - proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") - proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") - proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest") - proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse") - proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") - proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") - proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") - proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest") - proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") - proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") - proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") - proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") - proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") - proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") - proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") - proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") - proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") - proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") - proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") - proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") - proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") - proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") - proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") - proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") - proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse") - proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") - proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") - proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") - proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") - proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") - proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") - proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") - proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") - proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") - proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") - proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") - proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") - proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") - proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") -} - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } - -var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 4424 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0xdf, 0x6f, 0x1c, 0x49, - 0x5a, 0xee, 0x19, 0xcf, 0x8c, 0xe7, 0x9b, 0xf1, 0x78, 0x5c, 0x71, 0xb2, 0x93, 0xd9, 0xc4, 0xf1, - 0x76, 0x36, 0xbb, 0xd9, 0xec, 0xae, 0x9d, 0xd8, 0xc9, 0x2d, 0x04, 0xed, 0x72, 0x13, 0x7b, 0x36, - 0x31, 0x71, 0x6c, 0x5f, 0x7b, 0x92, 0xbd, 0x5d, 0xa4, 0x1b, 0xda, 0x33, 0x15, 0xbb, 0xcf, 0x33, - 0xdd, 0x73, 0xdd, 0x3d, 0x8e, 0x7d, 0x3c, 0xdc, 0x71, 0x70, 0x9c, 0x0e, 0xa4, 0x93, 0x38, 0x24, - 0x74, 0x42, 0xf0, 0x82, 0x90, 0xe0, 0xe1, 0x40, 0xf0, 0xc0, 0x03, 0x02, 0x89, 0x07, 0x78, 0x80, - 0x07, 0x24, 0x24, 0xfe, 0x01, 0x58, 0xee, 0x89, 0x3f, 0x02, 0xa1, 0xfa, 0xd5, 0x55, 0xdd, 0x5d, - 0x6d, 0x7b, 0xcf, 0x5e, 0xdd, 0xcb, 0x66, 0xba, 0xea, 0xfb, 0x55, 0xdf, 0x57, 0xdf, 0xf7, 0x55, - 0x7d, 0x5f, 0x79, 0xa1, 0xec, 0x8f, 0x7a, 0x8b, 0x23, 0xdf, 0x0b, 0x3d, 0x54, 0xc5, 0x61, 0xaf, - 0x1f, 0x60, 0xff, 0x10, 0xfb, 0xa3, 0xdd, 0xe6, 0xdc, 0x9e, 0xb7, 0xe7, 0xd1, 0x89, 0x25, 0xf2, - 0x8b, 0xc1, 0x34, 0x1b, 0x04, 0x66, 0xc9, 0x1e, 0x39, 0x4b, 0xc3, 0xc3, 0x5e, 0x6f, 0xb4, 0xbb, - 0x74, 0x70, 0xc8, 0x67, 0x9a, 0xd1, 0x8c, 0x3d, 0x0e, 0xf7, 0x47, 0xbb, 0xf4, 0x1f, 0x3e, 0xb7, - 0x10, 0xcd, 0x1d, 0x62, 0x3f, 0x70, 0x3c, 0x77, 0xb4, 0x2b, 0x7e, 0x71, 0x88, 0x6b, 0x7b, 0x9e, - 0xb7, 0x37, 0xc0, 0x0c, 0xdf, 0x75, 0xbd, 0xd0, 0x0e, 0x1d, 0xcf, 0x0d, 0xd8, 0xac, 0xf9, 0x23, - 0x03, 0x6a, 0x16, 0x0e, 0x46, 0x9e, 0x1b, 0xe0, 0x27, 0xd8, 0xee, 0x63, 0x1f, 0x5d, 0x07, 0xe8, - 0x0d, 0xc6, 0x41, 0x88, 0xfd, 0xae, 0xd3, 0x6f, 0x18, 0x0b, 0xc6, 0xed, 0x49, 0xab, 0xcc, 0x47, - 0xd6, 0xfb, 0xe8, 0x75, 0x28, 0x0f, 0xf1, 0x70, 0x97, 0xcd, 0xe6, 0xe8, 0xec, 0x14, 0x1b, 0x58, - 0xef, 0xa3, 0x26, 0x4c, 0xf9, 0xf8, 0xd0, 0x21, 0xec, 0x1b, 0xf9, 0x05, 0xe3, 0x76, 0xde, 0x8a, - 0xbe, 0x09, 0xa2, 0x6f, 0xbf, 0x0c, 0xbb, 0x21, 0xf6, 0x87, 0x8d, 0x49, 0x86, 0x48, 0x06, 0x3a, - 0xd8, 0x1f, 0x3e, 0x2c, 0x7d, 0xef, 0xef, 0x1a, 0xf9, 0x95, 0xc5, 0xbb, 0xe6, 0x3f, 0x17, 0xa0, - 0x6a, 0xd9, 0xee, 0x1e, 0xb6, 0xf0, 0xb7, 0xc6, 0x38, 0x08, 0x51, 0x1d, 0xf2, 0x07, 0xf8, 0x98, - 0xca, 0x51, 0xb5, 0xc8, 0x4f, 0x46, 0xc8, 0xdd, 0xc3, 0x5d, 0xec, 0x32, 0x09, 0xaa, 0x84, 0x90, - 0xbb, 0x87, 0xdb, 0x6e, 0x1f, 0xcd, 0x41, 0x61, 0xe0, 0x0c, 0x9d, 0x90, 0xb3, 0x67, 0x1f, 0x31, - 0xb9, 0x26, 0x13, 0x72, 0xad, 0x02, 0x04, 0x9e, 0x1f, 0x76, 0x3d, 0xbf, 0x8f, 0xfd, 0x46, 0x61, - 0xc1, 0xb8, 0x5d, 0x5b, 0x7e, 0x73, 0x51, 0xb5, 0xd8, 0xa2, 0x2a, 0xd0, 0xe2, 0x8e, 0xe7, 0x87, - 0x5b, 0x04, 0xd6, 0x2a, 0x07, 0xe2, 0x27, 0xfa, 0x18, 0x2a, 0x94, 0x48, 0x68, 0xfb, 0x7b, 0x38, - 0x6c, 0x14, 0x29, 0x95, 0x5b, 0xa7, 0x50, 0xe9, 0x50, 0x60, 0x8b, 0xb2, 0x67, 0xbf, 0x91, 0x09, - 0xd5, 0x00, 0xfb, 0x8e, 0x3d, 0x70, 0xbe, 0x6d, 0xef, 0x0e, 0x70, 0xa3, 0xb4, 0x60, 0xdc, 0x9e, - 0xb2, 0x62, 0x63, 0x64, 0xfd, 0x07, 0xf8, 0x38, 0xe8, 0x7a, 0xee, 0xe0, 0xb8, 0x31, 0x45, 0x01, - 0xa6, 0xc8, 0xc0, 0x96, 0x3b, 0x38, 0xa6, 0xd6, 0xf3, 0xc6, 0x6e, 0xc8, 0x66, 0xcb, 0x74, 0xb6, - 0x4c, 0x47, 0xe8, 0xf4, 0x3d, 0xa8, 0x0f, 0x1d, 0xb7, 0x3b, 0xf4, 0xfa, 0xdd, 0x48, 0x21, 0x40, - 0x14, 0xf2, 0xa8, 0xf4, 0x7b, 0xd4, 0x02, 0xf7, 0xac, 0xda, 0xd0, 0x71, 0x9f, 0x79, 0x7d, 0x4b, - 0xe8, 0x87, 0xa0, 0xd8, 0x47, 0x71, 0x94, 0x4a, 0x12, 0xc5, 0x3e, 0x52, 0x51, 0x3e, 0x80, 0x4b, - 0x84, 0x4b, 0xcf, 0xc7, 0x76, 0x88, 0x25, 0x56, 0x35, 0x8e, 0x35, 0x3b, 0x74, 0xdc, 0x55, 0x0a, - 0x12, 0x43, 0xb4, 0x8f, 0x52, 0x88, 0xd3, 0x49, 0x44, 0xfb, 0x28, 0x8e, 0x68, 0x7e, 0x00, 0xe5, - 0xc8, 0x2e, 0x68, 0x0a, 0x26, 0x37, 0xb7, 0x36, 0xdb, 0xf5, 0x09, 0x04, 0x50, 0x6c, 0xed, 0xac, - 0xb6, 0x37, 0xd7, 0xea, 0x06, 0xaa, 0x40, 0x69, 0xad, 0xcd, 0x3e, 0x72, 0xcd, 0xd2, 0x8f, 0xf9, - 0x7e, 0x7b, 0x0a, 0x20, 0x4d, 0x81, 0x4a, 0x90, 0x7f, 0xda, 0xfe, 0xb4, 0x3e, 0x41, 0x80, 0x5f, - 0xb4, 0xad, 0x9d, 0xf5, 0xad, 0xcd, 0xba, 0x41, 0xa8, 0xac, 0x5a, 0xed, 0x56, 0xa7, 0x5d, 0xcf, - 0x11, 0x88, 0x67, 0x5b, 0x6b, 0xf5, 0x3c, 0x2a, 0x43, 0xe1, 0x45, 0x6b, 0xe3, 0x79, 0xbb, 0x3e, - 0x19, 0x11, 0x93, 0xbb, 0xf8, 0x4f, 0x0c, 0x98, 0xe6, 0xe6, 0x66, 0xbe, 0x85, 0xee, 0x43, 0x71, - 0x9f, 0xfa, 0x17, 0xdd, 0xc9, 0x95, 0xe5, 0x6b, 0x89, 0xbd, 0x11, 0xf3, 0x41, 0x8b, 0xc3, 0x22, - 0x13, 0xf2, 0x07, 0x87, 0x41, 0x23, 0xb7, 0x90, 0xbf, 0x5d, 0x59, 0xae, 0x2f, 0xb2, 0xc8, 0xb0, - 0xf8, 0x14, 0x1f, 0xbf, 0xb0, 0x07, 0x63, 0x6c, 0x91, 0x49, 0x84, 0x60, 0x72, 0xe8, 0xf9, 0x98, - 0x6e, 0xf8, 0x29, 0x8b, 0xfe, 0x26, 0x5e, 0x40, 0x6d, 0xce, 0x37, 0x3b, 0xfb, 0x90, 0xe2, 0xfd, - 0xbb, 0x01, 0xb0, 0x3d, 0x0e, 0xb3, 0x5d, 0x6c, 0x0e, 0x0a, 0x87, 0x84, 0x03, 0x77, 0x2f, 0xf6, - 0x41, 0x7d, 0x0b, 0xdb, 0x01, 0x8e, 0x7c, 0x8b, 0x7c, 0xa0, 0x05, 0x28, 0x8d, 0x7c, 0x7c, 0xd8, - 0x3d, 0x38, 0xa4, 0xdc, 0xa6, 0xa4, 0x9d, 0x8a, 0x64, 0xfc, 0xe9, 0x21, 0xba, 0x03, 0x55, 0x67, - 0xcf, 0xf5, 0x7c, 0xdc, 0x65, 0x44, 0x0b, 0x2a, 0xd8, 0xb2, 0x55, 0x61, 0x93, 0x74, 0x49, 0x0a, - 0x2c, 0x63, 0x55, 0xd4, 0xc2, 0x6e, 0x90, 0x39, 0xb9, 0x9e, 0xef, 0x1a, 0x50, 0xa1, 0xeb, 0x39, - 0x97, 0xb2, 0x97, 0xe5, 0x42, 0x72, 0x14, 0x2d, 0xa5, 0xf0, 0xd4, 0xd2, 0xa4, 0x08, 0x2e, 0xa0, - 0x35, 0x3c, 0xc0, 0x21, 0x3e, 0x4f, 0xf0, 0x52, 0x54, 0x99, 0xd7, 0xaa, 0x52, 0xf2, 0xfb, 0x73, - 0x03, 0x2e, 0xc5, 0x18, 0x9e, 0x6b, 0xe9, 0x0d, 0x28, 0xf5, 0x29, 0x31, 0x26, 0x53, 0xde, 0x12, - 0x9f, 0xe8, 0x3e, 0x4c, 0x71, 0x91, 0x82, 0x46, 0x5e, 0xbf, 0x0d, 0xa5, 0x94, 0x25, 0x26, 0x65, - 0x20, 0xc5, 0xfc, 0x87, 0x1c, 0x94, 0xb9, 0x32, 0xb6, 0x46, 0xa8, 0x05, 0xd3, 0x3e, 0xfb, 0xe8, - 0xd2, 0x35, 0x73, 0x19, 0x9b, 0xd9, 0x71, 0xf2, 0xc9, 0x84, 0x55, 0xe5, 0x28, 0x74, 0x18, 0xfd, - 0x0a, 0x54, 0x04, 0x89, 0xd1, 0x38, 0xe4, 0x86, 0x6a, 0xc4, 0x09, 0xc8, 0xad, 0xfd, 0x64, 0xc2, - 0x02, 0x0e, 0xbe, 0x3d, 0x0e, 0x51, 0x07, 0xe6, 0x04, 0x32, 0x5b, 0x1f, 0x17, 0x23, 0x4f, 0xa9, - 0x2c, 0xc4, 0xa9, 0xa4, 0xcd, 0xf9, 0x64, 0xc2, 0x42, 0x1c, 0x5f, 0x99, 0x44, 0x6b, 0x52, 0xa4, - 0xf0, 0x88, 0xe5, 0x97, 0x94, 0x48, 0x9d, 0x23, 0x97, 0x13, 0x11, 0xda, 0x5a, 0x51, 0x64, 0xeb, - 0x1c, 0xb9, 0x91, 0xca, 0x1e, 0x95, 0xa1, 0xc4, 0x87, 0xcd, 0x7f, 0xcb, 0x01, 0x08, 0x8b, 0x6d, - 0x8d, 0xd0, 0x1a, 0xd4, 0x7c, 0xfe, 0x15, 0xd3, 0xdf, 0xeb, 0x5a, 0xfd, 0x71, 0x43, 0x4f, 0x58, - 0xd3, 0x02, 0x89, 0x89, 0xfb, 0x11, 0x54, 0x23, 0x2a, 0x52, 0x85, 0x57, 0x35, 0x2a, 0x8c, 0x28, - 0x54, 0x04, 0x02, 0x51, 0xe2, 0x27, 0x70, 0x39, 0xc2, 0xd7, 0x68, 0xf1, 0x8d, 0x13, 0xb4, 0x18, - 0x11, 0xbc, 0x24, 0x28, 0xa8, 0x7a, 0x7c, 0xac, 0x08, 0x26, 0x15, 0x79, 0x55, 0xa3, 0x48, 0x06, - 0xa4, 0x6a, 0x32, 0x92, 0x30, 0xa6, 0x4a, 0x20, 0x69, 0x9f, 0x8d, 0x9b, 0x7f, 0x39, 0x09, 0xa5, - 0x55, 0x6f, 0x38, 0xb2, 0x7d, 0xb2, 0x89, 0x8a, 0x3e, 0x0e, 0xc6, 0x83, 0x90, 0x2a, 0xb0, 0xb6, - 0x7c, 0x33, 0xce, 0x83, 0x83, 0x89, 0x7f, 0x2d, 0x0a, 0x6a, 0x71, 0x14, 0x82, 0xcc, 0xb3, 0x7c, - 0xee, 0x0c, 0xc8, 0x3c, 0xc7, 0x73, 0x14, 0x11, 0x10, 0xf2, 0x32, 0x20, 0x34, 0xa1, 0xc4, 0x0f, - 0x6c, 0x2c, 0x58, 0x3f, 0x99, 0xb0, 0xc4, 0x00, 0x7a, 0x07, 0x66, 0x92, 0xa9, 0xb0, 0xc0, 0x61, - 0x6a, 0xbd, 0x78, 0xe6, 0xbc, 0x09, 0xd5, 0x58, 0x86, 0x2e, 0x72, 0xb8, 0xca, 0x50, 0xc9, 0xcb, - 0x57, 0x44, 0x58, 0x27, 0xc7, 0x8a, 0xea, 0x93, 0x09, 0x11, 0xd8, 0x6f, 0x88, 0xc0, 0x3e, 0xa5, - 0x26, 0x5a, 0xa2, 0x57, 0x1e, 0xe3, 0xdf, 0x54, 0xa3, 0xd6, 0x57, 0x09, 0x72, 0x04, 0x24, 0xc3, - 0x97, 0x69, 0xc1, 0x74, 0x4c, 0x65, 0x24, 0x47, 0xb6, 0xbf, 0xf6, 0xbc, 0xb5, 0xc1, 0x12, 0xea, - 0x63, 0x9a, 0x43, 0xad, 0xba, 0x41, 0x12, 0xf4, 0x46, 0x7b, 0x67, 0xa7, 0x9e, 0x43, 0x57, 0xa0, - 0xbc, 0xb9, 0xd5, 0xe9, 0x32, 0xa8, 0x7c, 0xb3, 0xf4, 0xc7, 0x2c, 0x92, 0xc8, 0xfc, 0xfc, 0x69, - 0x44, 0x93, 0xa7, 0x68, 0x25, 0x33, 0x4f, 0x28, 0x99, 0xd9, 0x10, 0x99, 0x39, 0x27, 0x33, 0x73, - 0x1e, 0x21, 0x28, 0x6c, 0xb4, 0x5b, 0x3b, 0x34, 0x49, 0x33, 0xd2, 0x2b, 0xe9, 0x6c, 0xfd, 0xa8, - 0x06, 0x55, 0x66, 0x9e, 0xee, 0xd8, 0x25, 0x87, 0x89, 0x9f, 0x1a, 0x00, 0xd2, 0x61, 0xd1, 0x12, - 0x94, 0x7a, 0x4c, 0x84, 0x86, 0x41, 0x23, 0xe0, 0x65, 0xad, 0xc5, 0x2d, 0x01, 0x85, 0xee, 0x41, - 0x29, 0x18, 0xf7, 0x7a, 0x38, 0x10, 0x99, 0xfb, 0xb5, 0x64, 0x10, 0xe6, 0x01, 0xd1, 0x12, 0x70, - 0x04, 0xe5, 0xa5, 0xed, 0x0c, 0xc6, 0x34, 0x8f, 0x9f, 0x8c, 0xc2, 0xe1, 0x64, 0x8c, 0xfd, 0x33, - 0x03, 0x2a, 0x8a, 0x5b, 0xfc, 0x9c, 0x29, 0xe0, 0x1a, 0x94, 0xa9, 0x30, 0xb8, 0xcf, 0x93, 0xc0, - 0x94, 0x25, 0x07, 0xd0, 0x57, 0xa0, 0x2c, 0x3c, 0x49, 0xe4, 0x81, 0x86, 0x9e, 0xec, 0xd6, 0xc8, - 0x92, 0xa0, 0x52, 0xc8, 0x0e, 0xcc, 0x52, 0x3d, 0xf5, 0xc8, 0xed, 0x43, 0x68, 0x56, 0x3d, 0x96, - 0x1b, 0x89, 0x63, 0x79, 0x13, 0xa6, 0x46, 0xfb, 0xc7, 0x81, 0xd3, 0xb3, 0x07, 0x5c, 0x9c, 0xe8, - 0x5b, 0x52, 0xdd, 0x01, 0xa4, 0x52, 0x3d, 0x8f, 0x02, 0x24, 0xd1, 0x2b, 0x50, 0x79, 0x62, 0x07, - 0xfb, 0x5c, 0x48, 0x39, 0x7e, 0x1f, 0xa6, 0xc9, 0xf8, 0xd3, 0x17, 0x67, 0x10, 0x5f, 0x60, 0xad, - 0x98, 0xff, 0x68, 0x40, 0x4d, 0xa0, 0x9d, 0xcb, 0x40, 0x08, 0x26, 0xf7, 0xed, 0x60, 0x9f, 0x2a, - 0x63, 0xda, 0xa2, 0xbf, 0xd1, 0x3b, 0x50, 0xef, 0xb1, 0xf5, 0x77, 0x13, 0xf7, 0xae, 0x19, 0x3e, - 0x1e, 0xf9, 0xfe, 0x7b, 0x30, 0x4d, 0x50, 0xba, 0xf1, 0x7b, 0x90, 0x70, 0xe3, 0xaf, 0x58, 0xd5, - 0x7d, 0xba, 0xe6, 0xa4, 0xf8, 0x36, 0x54, 0x99, 0x32, 0x2e, 0x5a, 0x76, 0xa9, 0xd7, 0x26, 0xcc, - 0xec, 0xb8, 0xf6, 0x28, 0xd8, 0xf7, 0xc2, 0x84, 0xce, 0x57, 0xcc, 0xbf, 0x35, 0xa0, 0x2e, 0x27, - 0xcf, 0x25, 0xc3, 0xdb, 0x30, 0xe3, 0xe3, 0xa1, 0xed, 0xb8, 0x8e, 0xbb, 0xd7, 0xdd, 0x3d, 0x0e, - 0x71, 0xc0, 0xaf, 0xaf, 0xb5, 0x68, 0xf8, 0x11, 0x19, 0x25, 0xc2, 0xee, 0x0e, 0xbc, 0x5d, 0x1e, - 0xa4, 0xe9, 0x6f, 0xf4, 0x46, 0x3c, 0x4a, 0x97, 0xa5, 0xde, 0xc4, 0xb8, 0x94, 0xf9, 0x27, 0x39, - 0xa8, 0x7e, 0x62, 0x87, 0x3d, 0xb1, 0x83, 0xd0, 0x3a, 0xd4, 0xa2, 0x30, 0x4e, 0x47, 0xb8, 0xdc, - 0x89, 0x03, 0x07, 0xc5, 0x11, 0xf7, 0x1a, 0x71, 0xe0, 0x98, 0xee, 0xa9, 0x03, 0x94, 0x94, 0xed, - 0xf6, 0xf0, 0x20, 0x22, 0x95, 0xcb, 0x26, 0x45, 0x01, 0x55, 0x52, 0xea, 0x00, 0xfa, 0x3a, 0xd4, - 0x47, 0xbe, 0xb7, 0xe7, 0xe3, 0x20, 0x88, 0x88, 0xb1, 0x14, 0x6e, 0x6a, 0x88, 0x6d, 0x73, 0xd0, - 0xc4, 0x29, 0xe6, 0xfe, 0x93, 0x09, 0x6b, 0x66, 0x14, 0x9f, 0x93, 0x81, 0x75, 0x46, 0x9e, 0xf7, - 0x58, 0x64, 0xfd, 0x41, 0x1e, 0x50, 0x7a, 0x99, 0x5f, 0xf4, 0x98, 0x7c, 0x0b, 0x6a, 0x41, 0x68, - 0xfb, 0xa9, 0x3d, 0x3f, 0x4d, 0x47, 0xa3, 0x1d, 0xff, 0x36, 0x44, 0x92, 0x75, 0x5d, 0x2f, 0x74, - 0x5e, 0x1e, 0xb3, 0x0b, 0x8a, 0x55, 0x13, 0xc3, 0x9b, 0x74, 0x14, 0x6d, 0x42, 0xe9, 0xa5, 0x33, - 0x08, 0xb1, 0x1f, 0x34, 0x0a, 0x0b, 0xf9, 0xdb, 0xb5, 0xe5, 0x77, 0x4f, 0x33, 0xcc, 0xe2, 0xc7, - 0x14, 0xbe, 0x73, 0x3c, 0x52, 0x4f, 0xbf, 0x9c, 0x88, 0x7a, 0x8c, 0x2f, 0xea, 0x6f, 0x44, 0x26, - 0x4c, 0xbd, 0x22, 0x44, 0xbb, 0x4e, 0x9f, 0xe6, 0xe2, 0xc8, 0x0f, 0xef, 0x5b, 0x25, 0x3a, 0xb1, - 0xde, 0x47, 0x37, 0x61, 0xea, 0xa5, 0x6f, 0xef, 0x0d, 0xb1, 0x1b, 0xb2, 0x5b, 0xbe, 0x84, 0x89, - 0x26, 0xcc, 0x45, 0x00, 0x29, 0x0a, 0xc9, 0x7c, 0x9b, 0x5b, 0xdb, 0xcf, 0x3b, 0xf5, 0x09, 0x54, - 0x85, 0xa9, 0xcd, 0xad, 0xb5, 0xf6, 0x46, 0x9b, 0xe4, 0x46, 0x91, 0xf3, 0xee, 0x49, 0xa7, 0x6b, - 0x09, 0x43, 0xc4, 0xf6, 0x84, 0x2a, 0x97, 0x11, 0xbf, 0x74, 0x0b, 0xb9, 0x04, 0x89, 0x7b, 0xe6, - 0x0d, 0x98, 0xd3, 0x6d, 0x0d, 0x01, 0x70, 0xdf, 0xfc, 0x97, 0x1c, 0x4c, 0x73, 0x47, 0x38, 0x97, - 0xe7, 0x5e, 0x55, 0xa4, 0xe2, 0xd7, 0x13, 0xa1, 0xa4, 0x06, 0x94, 0x98, 0x83, 0xf4, 0xf9, 0xfd, - 0x57, 0x7c, 0x92, 0xe0, 0xcc, 0xf6, 0x3b, 0xee, 0x73, 0xb3, 0x47, 0xdf, 0xda, 0xb0, 0x59, 0xc8, - 0x0c, 0x9b, 0x91, 0xc3, 0xd9, 0x01, 0x3f, 0x58, 0x95, 0xa5, 0x29, 0xaa, 0xc2, 0xa9, 0xc8, 0x64, - 0xcc, 0x66, 0xa5, 0x0c, 0x9b, 0xa1, 0x5b, 0x50, 0xc4, 0x87, 0xd8, 0x0d, 0x83, 0x46, 0x85, 0x26, - 0xd2, 0x69, 0x71, 0xa1, 0x6a, 0x93, 0x51, 0x8b, 0x4f, 0x4a, 0x53, 0x7d, 0x04, 0xb3, 0xf4, 0xbe, - 0xfb, 0xd8, 0xb7, 0x5d, 0xf5, 0xce, 0xde, 0xe9, 0x6c, 0xf0, 0xb4, 0x43, 0x7e, 0xa2, 0x1a, 0xe4, - 0xd6, 0xd7, 0xb8, 0x7e, 0x72, 0xeb, 0x6b, 0x12, 0xff, 0xf7, 0x0d, 0x40, 0x2a, 0x81, 0x73, 0xd9, - 0x22, 0xc1, 0x45, 0xc8, 0x91, 0x97, 0x72, 0xcc, 0x41, 0x01, 0xfb, 0xbe, 0xe7, 0xb3, 0x40, 0x69, - 0xb1, 0x0f, 0x29, 0xcd, 0xfb, 0x5c, 0x18, 0x0b, 0x1f, 0x7a, 0x07, 0x51, 0x04, 0x60, 0x64, 0x8d, - 0xb4, 0xf0, 0x1d, 0xb8, 0x14, 0x03, 0xbf, 0x98, 0x14, 0xbf, 0x05, 0x33, 0x94, 0xea, 0xea, 0x3e, - 0xee, 0x1d, 0x8c, 0x3c, 0xc7, 0x4d, 0x49, 0x80, 0x6e, 0x92, 0xd8, 0x25, 0xd2, 0x05, 0x59, 0x22, - 0x5b, 0x73, 0x35, 0x1a, 0xec, 0x74, 0x36, 0xe4, 0x56, 0xdf, 0x85, 0x2b, 0x09, 0x82, 0x62, 0x65, - 0xbf, 0x0a, 0x95, 0x5e, 0x34, 0x18, 0xf0, 0x13, 0xe4, 0xf5, 0xb8, 0xb8, 0x49, 0x54, 0x15, 0x43, - 0xf2, 0xf8, 0x3a, 0xbc, 0x96, 0xe2, 0x71, 0x11, 0xea, 0xb8, 0x6f, 0xde, 0x85, 0xcb, 0x94, 0xf2, - 0x53, 0x8c, 0x47, 0xad, 0x81, 0x73, 0x78, 0xba, 0x59, 0x8e, 0xf9, 0x7a, 0x15, 0x8c, 0x2f, 0x77, - 0x5b, 0x49, 0xd6, 0x6d, 0xce, 0xba, 0xe3, 0x0c, 0x71, 0xc7, 0xdb, 0xc8, 0x96, 0x96, 0x24, 0xf2, - 0x03, 0x7c, 0x1c, 0xf0, 0xe3, 0x23, 0xfd, 0x2d, 0xa3, 0xd7, 0x5f, 0x1b, 0x5c, 0x9d, 0x2a, 0x9d, - 0x2f, 0xd9, 0x35, 0xe6, 0x01, 0xf6, 0x88, 0x0f, 0xe2, 0x3e, 0x99, 0x60, 0xb5, 0x39, 0x65, 0x24, - 0x12, 0x98, 0x64, 0xa1, 0x6a, 0x52, 0xe0, 0xeb, 0xdc, 0x71, 0xe8, 0x7f, 0x82, 0xd4, 0x49, 0xe9, - 0x2d, 0xa8, 0xd0, 0x99, 0x9d, 0xd0, 0x0e, 0xc7, 0x41, 0x96, 0xe5, 0x56, 0xcc, 0x1f, 0x18, 0xdc, - 0xa3, 0x04, 0x9d, 0x73, 0xad, 0xf9, 0x1e, 0x14, 0xe9, 0x0d, 0x51, 0xdc, 0x74, 0xae, 0x6a, 0x36, - 0x36, 0x93, 0xc8, 0xe2, 0x80, 0xca, 0x39, 0xc9, 0x80, 0xe2, 0x33, 0xda, 0x39, 0x50, 0xa4, 0x9d, - 0x14, 0x96, 0x73, 0xed, 0x21, 0x2b, 0x3f, 0x96, 0x2d, 0xfa, 0x9b, 0x5e, 0x08, 0x30, 0xf6, 0x9f, - 0x5b, 0x1b, 0xec, 0x06, 0x52, 0xb6, 0xa2, 0x6f, 0xa2, 0xd8, 0xde, 0xc0, 0xc1, 0x6e, 0x48, 0x67, - 0x27, 0xe9, 0xac, 0x32, 0x82, 0x6e, 0x41, 0xd9, 0x09, 0x36, 0xb0, 0xed, 0xbb, 0xbc, 0xc4, 0xaf, - 0x04, 0x66, 0x39, 0x23, 0xf7, 0xd8, 0x37, 0xa0, 0xce, 0x24, 0x6b, 0xf5, 0xfb, 0xca, 0x69, 0x3f, - 0xe2, 0x6f, 0x24, 0xf8, 0xc7, 0xe8, 0xe7, 0x4e, 0xa7, 0xff, 0x37, 0x06, 0xcc, 0x2a, 0x0c, 0xce, - 0x65, 0x82, 0xf7, 0xa0, 0xc8, 0xfa, 0x2f, 0xfc, 0x28, 0x38, 0x17, 0xc7, 0x62, 0x6c, 0x2c, 0x0e, - 0x83, 0x16, 0xa1, 0xc4, 0x7e, 0x89, 0x6b, 0x9c, 0x1e, 0x5c, 0x00, 0x49, 0x91, 0x17, 0xe1, 0x12, - 0x9f, 0xc3, 0x43, 0x4f, 0xe7, 0x73, 0x93, 0xf1, 0x08, 0xf1, 0x7d, 0x03, 0xe6, 0xe2, 0x08, 0xe7, - 0x5a, 0xa5, 0x22, 0x77, 0xee, 0x0b, 0xc9, 0xfd, 0x6b, 0x42, 0xee, 0xe7, 0xa3, 0xbe, 0x72, 0xe4, - 0x4c, 0xee, 0x38, 0xd5, 0xba, 0xb9, 0xb8, 0x75, 0x25, 0xad, 0x1f, 0x45, 0x6b, 0x12, 0xc4, 0xce, - 0xb5, 0xa6, 0x0f, 0xce, 0xb4, 0x26, 0xe5, 0x08, 0x96, 0x5a, 0xdc, 0xba, 0xd8, 0x46, 0x1b, 0x4e, - 0x10, 0x65, 0x9c, 0x77, 0xa1, 0x3a, 0x70, 0x5c, 0x6c, 0xfb, 0xbc, 0x87, 0x64, 0xa8, 0xfb, 0xf1, - 0x81, 0x15, 0x9b, 0x94, 0xa4, 0x7e, 0xdb, 0x00, 0xa4, 0xd2, 0xfa, 0xc5, 0x58, 0x6b, 0x49, 0x28, - 0x78, 0xdb, 0xf7, 0x86, 0x5e, 0x78, 0xda, 0x36, 0xbb, 0x6f, 0xfe, 0xae, 0x01, 0x97, 0x13, 0x18, - 0xbf, 0x08, 0xc9, 0xef, 0x9b, 0xd7, 0x60, 0x76, 0x0d, 0x8b, 0x33, 0x5e, 0xaa, 0x76, 0xb0, 0x03, - 0x48, 0x9d, 0xbd, 0x98, 0x53, 0xcc, 0x2f, 0xc1, 0xec, 0x33, 0xef, 0x90, 0x04, 0x72, 0x32, 0x2d, - 0xc3, 0x14, 0x2b, 0x66, 0x45, 0xfa, 0x8a, 0xbe, 0x65, 0xe8, 0xdd, 0x01, 0xa4, 0x62, 0x5e, 0x84, - 0x38, 0x2b, 0xe6, 0x7f, 0x1b, 0x50, 0x6d, 0x0d, 0x6c, 0x7f, 0x28, 0x44, 0xf9, 0x08, 0x8a, 0xac, - 0x32, 0xc3, 0xcb, 0xac, 0x6f, 0xc5, 0xe9, 0xa9, 0xb0, 0xec, 0xa3, 0xc5, 0xea, 0x38, 0x1c, 0x8b, - 0x2c, 0x85, 0x77, 0x96, 0xd7, 0x12, 0x9d, 0xe6, 0x35, 0xf4, 0x3e, 0x14, 0x6c, 0x82, 0x42, 0xd3, - 0x6b, 0x2d, 0x59, 0x2e, 0xa3, 0xd4, 0xc8, 0x95, 0xc8, 0x62, 0x50, 0xe6, 0x87, 0x50, 0x51, 0x38, - 0xa0, 0x12, 0xe4, 0x1f, 0xb7, 0xf9, 0x35, 0xa9, 0xb5, 0xda, 0x59, 0x7f, 0xc1, 0x4a, 0x88, 0x35, - 0x80, 0xb5, 0x76, 0xf4, 0x9d, 0xd3, 0x34, 0xf6, 0x6c, 0x4e, 0x87, 0xe7, 0x2d, 0x55, 0x42, 0x23, - 0x4b, 0xc2, 0xdc, 0x59, 0x24, 0x94, 0x2c, 0x7e, 0xcb, 0x80, 0x69, 0xae, 0x9a, 0xf3, 0xa6, 0x66, - 0x4a, 0x39, 0x23, 0x35, 0x2b, 0xcb, 0xb0, 0x38, 0xa0, 0x94, 0xe1, 0x9f, 0x0c, 0xa8, 0xaf, 0x79, - 0xaf, 0xdc, 0x3d, 0xdf, 0xee, 0x47, 0x3e, 0xf8, 0x71, 0xc2, 0x9c, 0x8b, 0x89, 0x4a, 0x7f, 0x02, - 0x5e, 0x0e, 0x24, 0xcc, 0xda, 0x90, 0xb5, 0x14, 0x96, 0xdf, 0xc5, 0xa7, 0xf9, 0x55, 0x98, 0x49, - 0x20, 0x11, 0x03, 0xbd, 0x68, 0x6d, 0xac, 0xaf, 0x11, 0x83, 0xd0, 0x7a, 0x6f, 0x7b, 0xb3, 0xf5, - 0x68, 0xa3, 0xcd, 0xbb, 0xb2, 0xad, 0xcd, 0xd5, 0xf6, 0x86, 0x34, 0xd4, 0x03, 0xb1, 0x82, 0x07, - 0xe6, 0x00, 0x66, 0x15, 0x81, 0xce, 0xdb, 0x1c, 0xd3, 0xcb, 0x2b, 0xb9, 0x35, 0x60, 0x9a, 0x9f, - 0x72, 0x92, 0x8e, 0xff, 0xd3, 0x3c, 0xd4, 0xc4, 0xd4, 0x97, 0x23, 0x05, 0xba, 0x02, 0xc5, 0xfe, - 0xee, 0x8e, 0xf3, 0x6d, 0xd1, 0x97, 0xe5, 0x5f, 0x64, 0x7c, 0xc0, 0xf8, 0xb0, 0xd7, 0x16, 0xfc, - 0x0b, 0x5d, 0x63, 0x0f, 0x31, 0xd6, 0xdd, 0x3e, 0x3e, 0xa2, 0x87, 0xa1, 0x49, 0x4b, 0x0e, 0xd0, - 0xa2, 0x26, 0x7f, 0x95, 0x41, 0xef, 0xba, 0xca, 0x2b, 0x0d, 0xb4, 0x02, 0x75, 0xf2, 0xbb, 0x35, - 0x1a, 0x0d, 0x1c, 0xdc, 0x67, 0x04, 0xc8, 0x35, 0x77, 0x52, 0x9e, 0x76, 0x52, 0x00, 0xe8, 0x06, - 0x14, 0xe9, 0x15, 0x30, 0x68, 0x4c, 0x91, 0xbc, 0x2a, 0x41, 0xf9, 0x30, 0x7a, 0x07, 0x2a, 0x4c, - 0xe2, 0x75, 0xf7, 0x79, 0x80, 0xe9, 0x9b, 0x05, 0xa5, 0x1e, 0xa2, 0xce, 0xc5, 0xcf, 0x59, 0x90, - 0x75, 0xce, 0x42, 0x4b, 0x50, 0x0b, 0x42, 0xcf, 0xb7, 0xf7, 0xf0, 0x0b, 0xae, 0xb2, 0x4a, 0xbc, - 0x68, 0x97, 0x98, 0x96, 0xe6, 0xba, 0x06, 0xb3, 0xad, 0x71, 0xb8, 0xdf, 0x76, 0x49, 0x72, 0x4c, - 0x19, 0xf3, 0x3a, 0x20, 0x32, 0xbb, 0xe6, 0x04, 0xda, 0x69, 0x8e, 0xac, 0xdd, 0x09, 0x0f, 0xcc, - 0x4d, 0xb8, 0x44, 0x66, 0xb1, 0x1b, 0x3a, 0x3d, 0xe5, 0x20, 0x22, 0x8e, 0xba, 0x46, 0xe2, 0xa8, - 0x6b, 0x07, 0xc1, 0x2b, 0xcf, 0xef, 0x73, 0x63, 0x47, 0xdf, 0x92, 0xdb, 0xdf, 0x1b, 0x4c, 0x9a, - 0xe7, 0x41, 0xec, 0x98, 0xfa, 0x05, 0xe9, 0xa1, 0x5f, 0x86, 0x92, 0x37, 0xa2, 0x4f, 0x82, 0x78, - 0xf5, 0xef, 0xca, 0x22, 0x7b, 0x66, 0xb4, 0xc8, 0x09, 0x6f, 0xb1, 0x59, 0xa5, 0x42, 0xc5, 0xe1, - 0x89, 0x9a, 0xf7, 0xed, 0x60, 0x1f, 0xf7, 0xb7, 0x05, 0xf1, 0x58, 0x6d, 0xf4, 0x81, 0x95, 0x98, - 0x96, 0xb2, 0xdf, 0x93, 0xa2, 0x3f, 0xc6, 0xe1, 0x09, 0xa2, 0xab, 0xd5, 0xf7, 0xcb, 0x02, 0x85, - 0x37, 0x0d, 0xcf, 0x82, 0xf5, 0x43, 0x03, 0xae, 0x0b, 0xb4, 0xd5, 0x7d, 0xdb, 0xdd, 0xc3, 0x42, - 0x98, 0x9f, 0x57, 0x5f, 0xe9, 0x45, 0xe7, 0xcf, 0xb8, 0xe8, 0xa7, 0xd0, 0x88, 0x16, 0x4d, 0x2b, - 0x31, 0xde, 0x40, 0x5d, 0xc4, 0x38, 0xe0, 0x11, 0xa1, 0x6c, 0xd1, 0xdf, 0x64, 0xcc, 0xf7, 0x06, - 0xd1, 0x25, 0x88, 0xfc, 0x96, 0xc4, 0x36, 0xe0, 0xaa, 0x20, 0xc6, 0x4b, 0x23, 0x71, 0x6a, 0xa9, - 0x35, 0x9d, 0x48, 0x8d, 0xdb, 0x83, 0xd0, 0x38, 0x79, 0x2b, 0x69, 0x51, 0xe2, 0x26, 0xa4, 0x5c, - 0x0c, 0x1d, 0x97, 0x79, 0xe6, 0x01, 0x44, 0x66, 0xe5, 0xbc, 0x9a, 0x9a, 0x27, 0x24, 0xb5, 0xf3, - 0x7c, 0x0b, 0x90, 0xf9, 0xd4, 0x16, 0xc8, 0xe6, 0x8a, 0x61, 0x3e, 0x12, 0x94, 0xa8, 0x7d, 0x1b, - 0xfb, 0x43, 0x27, 0x08, 0x94, 0x36, 0x94, 0x4e, 0x5d, 0x6f, 0xc1, 0xe4, 0x08, 0xf3, 0xe4, 0x5d, - 0x59, 0x46, 0xc2, 0x27, 0x14, 0x64, 0x3a, 0x2f, 0xd9, 0x0c, 0xe1, 0x86, 0x60, 0xc3, 0x0c, 0xa2, - 0xe5, 0x93, 0x14, 0x53, 0x94, 0xbe, 0x73, 0x19, 0xa5, 0xef, 0x7c, 0xbc, 0xf4, 0x1d, 0x3b, 0x50, - 0xaa, 0x81, 0xea, 0x62, 0x0e, 0x94, 0x1d, 0x66, 0x80, 0x28, 0xbe, 0x5d, 0x0c, 0xd5, 0x3f, 0xe0, - 0x81, 0xea, 0xa2, 0xd2, 0x20, 0xa6, 0x6b, 0x16, 0x4d, 0x4a, 0xf1, 0x89, 0x4c, 0xa8, 0x12, 0x23, - 0x59, 0x6a, 0x4f, 0x60, 0xd2, 0x8a, 0x8d, 0xc9, 0x60, 0x7c, 0x00, 0x73, 0xf1, 0x60, 0x7c, 0x2e, - 0xa1, 0xe6, 0xa0, 0x10, 0x7a, 0x07, 0x58, 0x64, 0x66, 0xf6, 0x91, 0x52, 0x6b, 0x14, 0xa8, 0x2f, - 0x46, 0xad, 0xdf, 0x94, 0x54, 0xa9, 0x03, 0x9e, 0x77, 0x05, 0x64, 0x3b, 0x8a, 0xbb, 0x2f, 0xfb, - 0x90, 0xbc, 0x3e, 0x81, 0x2b, 0xc9, 0xe0, 0x7b, 0x31, 0x8b, 0xe8, 0x32, 0xe7, 0xd4, 0x85, 0xe7, - 0x8b, 0x61, 0xf0, 0x99, 0x8c, 0x93, 0x4a, 0xd0, 0xbd, 0x18, 0xda, 0xbf, 0x0e, 0x4d, 0x5d, 0x0c, - 0xbe, 0x50, 0x5f, 0x8c, 0x42, 0xf2, 0xc5, 0x50, 0xfd, 0xbe, 0x21, 0xc9, 0xaa, 0xbb, 0xe6, 0xc3, - 0x2f, 0x42, 0x56, 0xe4, 0xba, 0xbb, 0xd1, 0xf6, 0x59, 0x8a, 0xa2, 0x65, 0x5e, 0x1f, 0x2d, 0x25, - 0x0a, 0x05, 0x14, 0xfe, 0x27, 0x43, 0xfd, 0x97, 0xb9, 0x7b, 0x39, 0x33, 0x99, 0x77, 0xce, 0xcb, - 0x8c, 0xa4, 0xe7, 0x88, 0x19, 0xfd, 0x48, 0xb9, 0x8a, 0x9a, 0xa4, 0x2e, 0xc6, 0x74, 0xbf, 0x21, - 0x13, 0x4c, 0x2a, 0x8f, 0x5d, 0x0c, 0x07, 0x1b, 0x16, 0xb2, 0x53, 0xd8, 0x85, 0xb0, 0xb8, 0xd3, - 0x82, 0x72, 0x74, 0xf3, 0x55, 0xde, 0xe9, 0x56, 0xa0, 0xb4, 0xb9, 0xb5, 0xb3, 0xdd, 0x5a, 0x25, - 0x17, 0xbb, 0x39, 0x28, 0xad, 0x6e, 0x59, 0xd6, 0xf3, 0xed, 0x0e, 0xb9, 0xd9, 0x25, 0x9f, 0xed, - 0x2c, 0xff, 0x2c, 0x0f, 0xb9, 0xa7, 0x2f, 0xd0, 0xa7, 0x50, 0x60, 0xcf, 0xc6, 0x4e, 0x78, 0x3d, - 0xd8, 0x3c, 0xe9, 0x65, 0x9c, 0xf9, 0xda, 0xf7, 0xfe, 0xf3, 0x67, 0x7f, 0x98, 0x9b, 0x35, 0xab, - 0x4b, 0x87, 0x2b, 0x4b, 0x07, 0x87, 0x4b, 0x34, 0xc9, 0x3e, 0x34, 0xee, 0xa0, 0xaf, 0x41, 0x7e, - 0x7b, 0x1c, 0xa2, 0xcc, 0x57, 0x85, 0xcd, 0xec, 0xc7, 0x72, 0xe6, 0x65, 0x4a, 0x74, 0xc6, 0x04, - 0x4e, 0x74, 0x34, 0x0e, 0x09, 0xc9, 0x6f, 0x41, 0x45, 0x7d, 0xea, 0x76, 0xea, 0x53, 0xc3, 0xe6, - 0xe9, 0xcf, 0xe8, 0xcc, 0xeb, 0x94, 0xd5, 0x6b, 0x26, 0xe2, 0xac, 0xd8, 0x63, 0x3c, 0x75, 0x15, - 0x9d, 0x23, 0x17, 0x65, 0x3e, 0x44, 0x6c, 0x66, 0xbf, 0xac, 0x4b, 0xad, 0x22, 0x3c, 0x72, 0x09, - 0xc9, 0x6f, 0xf2, 0x27, 0x74, 0xbd, 0x10, 0xdd, 0xd0, 0xbc, 0x81, 0x52, 0xdf, 0xf6, 0x34, 0x17, - 0xb2, 0x01, 0x38, 0x93, 0x6b, 0x94, 0xc9, 0x15, 0x73, 0x96, 0x33, 0xe9, 0x45, 0x20, 0x0f, 0x8d, - 0x3b, 0xcb, 0x3d, 0x28, 0xd0, 0xde, 0x31, 0xfa, 0x4c, 0xfc, 0x68, 0x6a, 0xba, 0xf2, 0x19, 0x86, - 0x8e, 0x75, 0x9d, 0xcd, 0x39, 0xca, 0xa8, 0x66, 0x96, 0x09, 0x23, 0xda, 0x39, 0x7e, 0x68, 0xdc, - 0xb9, 0x6d, 0xdc, 0x35, 0x96, 0xff, 0xaa, 0x00, 0x05, 0xda, 0xa3, 0x40, 0x07, 0x00, 0xb2, 0x47, - 0x9a, 0x5c, 0x5d, 0xaa, 0xfd, 0x9a, 0x5c, 0x5d, 0xba, 0xbd, 0x6a, 0x36, 0x29, 0xd3, 0x39, 0x73, - 0x86, 0x30, 0xa5, 0xad, 0x8f, 0x25, 0xda, 0xe9, 0x21, 0x7a, 0xfc, 0xa1, 0xc1, 0x9b, 0x35, 0xcc, - 0xcd, 0x90, 0x8e, 0x5a, 0xac, 0x3f, 0x9a, 0xdc, 0x0e, 0x9a, 0x96, 0xa8, 0xf9, 0x80, 0x32, 0x5c, - 0x32, 0xeb, 0x92, 0xa1, 0x4f, 0x21, 0x1e, 0x1a, 0x77, 0x3e, 0x6b, 0x98, 0x97, 0xb8, 0x96, 0x13, - 0x33, 0xe8, 0x3b, 0x50, 0x8b, 0x77, 0xf2, 0xd0, 0x4d, 0x0d, 0xaf, 0x64, 0x67, 0xb0, 0xf9, 0xe6, - 0xc9, 0x40, 0x5c, 0xa6, 0x79, 0x2a, 0x13, 0x67, 0xce, 0x38, 0x1f, 0x60, 0x3c, 0xb2, 0x09, 0x10, - 0xb7, 0x01, 0xfa, 0x53, 0x83, 0x37, 0x63, 0x65, 0x23, 0x0e, 0xe9, 0xa8, 0xa7, 0xfa, 0x7d, 0xcd, - 0x5b, 0xa7, 0x40, 0x71, 0x21, 0x3e, 0xa4, 0x42, 0x7c, 0x60, 0xce, 0x49, 0x21, 0x42, 0x67, 0x88, - 0x43, 0x8f, 0x4b, 0xf1, 0xd9, 0x35, 0xf3, 0xb5, 0x98, 0x72, 0x62, 0xb3, 0xd2, 0x58, 0xac, 0x61, - 0xa6, 0x35, 0x56, 0xac, 0x27, 0xa7, 0x35, 0x56, 0xbc, 0xdb, 0xa6, 0x33, 0x16, 0x6f, 0x8f, 0x69, - 0x8c, 0x15, 0xcd, 0x2c, 0xff, 0xef, 0x24, 0x94, 0x56, 0xd9, 0x9f, 0xe2, 0x20, 0x0f, 0xca, 0x51, - 0x0b, 0x09, 0xcd, 0xeb, 0xaa, 0xd4, 0xf2, 0x2a, 0xd7, 0xbc, 0x91, 0x39, 0xcf, 0x05, 0x7a, 0x83, - 0x0a, 0xf4, 0xba, 0x79, 0x85, 0x70, 0xe6, 0x7f, 0xed, 0xb3, 0xc4, 0x6a, 0x99, 0x4b, 0x76, 0xbf, - 0x4f, 0x14, 0xf1, 0x9b, 0x50, 0x55, 0x1b, 0x3a, 0xe8, 0x0d, 0x6d, 0x65, 0x5c, 0xed, 0x0e, 0x35, - 0xcd, 0x93, 0x40, 0x38, 0xe7, 0x37, 0x29, 0xe7, 0x79, 0xf3, 0xaa, 0x86, 0xb3, 0x4f, 0x41, 0x63, - 0xcc, 0x59, 0xe7, 0x45, 0xcf, 0x3c, 0xd6, 0xe2, 0xd1, 0x33, 0x8f, 0x37, 0x6e, 0x4e, 0x64, 0x3e, - 0xa6, 0xa0, 0x84, 0x79, 0x00, 0x20, 0x5b, 0x23, 0x48, 0xab, 0x4b, 0xe5, 0xc2, 0x9a, 0x0c, 0x0e, - 0xe9, 0xae, 0x8a, 0x69, 0x52, 0xb6, 0x7c, 0xdf, 0x25, 0xd8, 0x0e, 0x9c, 0x20, 0x64, 0x8e, 0x39, - 0x1d, 0x6b, 0x6c, 0x20, 0xed, 0x7a, 0xe2, 0x7d, 0x92, 0xe6, 0xcd, 0x13, 0x61, 0x38, 0xf7, 0x5b, - 0x94, 0xfb, 0x0d, 0xb3, 0xa9, 0xe1, 0x3e, 0x62, 0xb0, 0x64, 0xb3, 0xfd, 0x5f, 0x11, 0x2a, 0xcf, - 0x6c, 0xc7, 0x0d, 0xb1, 0x6b, 0xbb, 0x3d, 0x8c, 0x76, 0xa1, 0x40, 0x73, 0x77, 0x32, 0x10, 0xab, - 0x75, 0xfc, 0x64, 0x20, 0x8e, 0x15, 0xb2, 0xcd, 0x05, 0xca, 0xb8, 0x69, 0x5e, 0x26, 0x8c, 0x87, - 0x92, 0xf4, 0x12, 0x2b, 0x81, 0x1b, 0x77, 0xd0, 0x4b, 0x28, 0xf2, 0x06, 0x76, 0x82, 0x50, 0xac, - 0xa8, 0xd6, 0xbc, 0xa6, 0x9f, 0xd4, 0xed, 0x65, 0x95, 0x4d, 0x40, 0xe1, 0x08, 0x9f, 0x43, 0x00, - 0xd9, 0x8f, 0x49, 0x5a, 0x34, 0xd5, 0xc7, 0x69, 0x2e, 0x64, 0x03, 0xe8, 0x74, 0xaa, 0xf2, 0xec, - 0x47, 0xb0, 0x84, 0xef, 0x37, 0x60, 0xf2, 0x89, 0x1d, 0xec, 0xa3, 0x44, 0xee, 0x55, 0xde, 0x9b, - 0x36, 0x9b, 0xba, 0x29, 0xce, 0xe5, 0x06, 0xe5, 0x72, 0x95, 0x85, 0x32, 0x95, 0x0b, 0x7d, 0x51, - 0x69, 0xdc, 0x41, 0x7d, 0x28, 0xb2, 0xc7, 0xa6, 0x49, 0xfd, 0xc5, 0x5e, 0xae, 0x26, 0xf5, 0x17, - 0x7f, 0x9f, 0x7a, 0x3a, 0x97, 0x11, 0x4c, 0x89, 0x47, 0x99, 0x28, 0xf1, 0x94, 0x25, 0xf1, 0x92, - 0xb3, 0x39, 0x9f, 0x35, 0xcd, 0x79, 0xdd, 0xa4, 0xbc, 0xae, 0x9b, 0x8d, 0x94, 0xad, 0x38, 0xe4, - 0x43, 0xe3, 0xce, 0x5d, 0x03, 0x7d, 0x07, 0x40, 0x36, 0xac, 0x52, 0x1e, 0x98, 0x6c, 0x82, 0xa5, - 0x3c, 0x30, 0xd5, 0xeb, 0x32, 0x17, 0x29, 0xdf, 0xdb, 0xe6, 0xcd, 0x24, 0xdf, 0xd0, 0xb7, 0xdd, - 0xe0, 0x25, 0xf6, 0xdf, 0x67, 0xd5, 0xf2, 0x60, 0xdf, 0x19, 0x91, 0x25, 0xfb, 0x50, 0x8e, 0xfa, - 0x09, 0xc9, 0x68, 0x9b, 0xec, 0x7c, 0x24, 0xa3, 0x6d, 0xaa, 0x11, 0x11, 0x0f, 0x3b, 0xb1, 0xdd, - 0x22, 0x40, 0x89, 0x03, 0xfe, 0x45, 0x1d, 0x26, 0xc9, 0x81, 0x9c, 0x1c, 0x4e, 0x64, 0xb1, 0x27, - 0xb9, 0xfa, 0x54, 0xbd, 0x3a, 0xb9, 0xfa, 0x74, 0x9d, 0x28, 0x7e, 0x38, 0x21, 0x97, 0xb5, 0x25, - 0x56, 0x45, 0x21, 0x2b, 0xf5, 0xa0, 0xa2, 0x14, 0x81, 0x90, 0x86, 0x58, 0xbc, 0xfe, 0x9d, 0x4c, - 0x77, 0x9a, 0x0a, 0x92, 0xf9, 0x3a, 0xe5, 0x77, 0x99, 0xa5, 0x3b, 0xca, 0xaf, 0xcf, 0x20, 0x08, - 0x43, 0xbe, 0x3a, 0xee, 0xf7, 0x9a, 0xd5, 0xc5, 0x7d, 0x7f, 0x21, 0x1b, 0x20, 0x73, 0x75, 0xd2, - 0xf1, 0x5f, 0x41, 0x55, 0x2d, 0xfc, 0x20, 0x8d, 0xf0, 0x89, 0x0a, 0x7d, 0x32, 0x8f, 0xe8, 0xea, - 0x46, 0xf1, 0xc8, 0x46, 0x59, 0xda, 0x0a, 0x18, 0x61, 0x3c, 0x80, 0x12, 0x2f, 0x00, 0xe9, 0x54, - 0x1a, 0x2f, 0xe2, 0xeb, 0x54, 0x9a, 0xa8, 0x1e, 0xc5, 0x4f, 0xcf, 0x94, 0x23, 0xb9, 0x88, 0x8a, - 0x5c, 0xcd, 0xb9, 0x3d, 0xc6, 0x61, 0x16, 0x37, 0x59, 0xb4, 0xcd, 0xe2, 0xa6, 0xd4, 0x07, 0xb2, - 0xb8, 0xed, 0xe1, 0x90, 0xc7, 0x03, 0x71, 0xb9, 0x46, 0x19, 0xc4, 0xd4, 0xfc, 0x68, 0x9e, 0x04, - 0xa2, 0xbb, 0xdc, 0x48, 0x86, 0x22, 0x39, 0x1e, 0x01, 0xc8, 0x62, 0x54, 0xf2, 0xc4, 0xaa, 0xed, - 0x13, 0x24, 0x4f, 0xac, 0xfa, 0x7a, 0x56, 0x3c, 0xf6, 0x49, 0xbe, 0xec, 0x6e, 0x45, 0x38, 0xff, - 0xd8, 0x00, 0x94, 0x2e, 0x57, 0xa1, 0x77, 0xf5, 0xd4, 0xb5, 0x3d, 0x87, 0xe6, 0x7b, 0x67, 0x03, - 0xd6, 0xa5, 0x33, 0x29, 0x52, 0x8f, 0x42, 0x8f, 0x5e, 0x11, 0xa1, 0xbe, 0x6b, 0xc0, 0x74, 0xac, - 0xc4, 0x85, 0xde, 0xca, 0xb0, 0x69, 0xa2, 0xf1, 0xd0, 0x7c, 0xfb, 0x54, 0x38, 0xdd, 0x51, 0x5e, - 0xd9, 0x01, 0xe2, 0x4e, 0xf3, 0x3b, 0x06, 0xd4, 0xe2, 0x95, 0x30, 0x94, 0x41, 0x3b, 0xd5, 0xaf, - 0x68, 0xde, 0x3e, 0x1d, 0xf0, 0x64, 0xf3, 0xc8, 0xeb, 0xcc, 0x00, 0x4a, 0xbc, 0x64, 0xa6, 0xdb, - 0xf8, 0xf1, 0x06, 0x87, 0x6e, 0xe3, 0x27, 0xea, 0x6d, 0x9a, 0x8d, 0xef, 0x7b, 0x03, 0xac, 0xb8, - 0x19, 0xaf, 0xa4, 0x65, 0x71, 0x3b, 0xd9, 0xcd, 0x12, 0x65, 0xb8, 0x2c, 0x6e, 0xd2, 0xcd, 0x44, - 0xc1, 0x0c, 0x65, 0x10, 0x3b, 0xc5, 0xcd, 0x92, 0xf5, 0x36, 0x8d, 0x9b, 0x51, 0x86, 0x8a, 0x9b, - 0xc9, 0x42, 0x96, 0xce, 0xcd, 0x52, 0xbd, 0x18, 0x9d, 0x9b, 0xa5, 0x6b, 0x61, 0x1a, 0x3b, 0x52, - 0xbe, 0x31, 0x37, 0xbb, 0xa4, 0x29, 0x75, 0xa1, 0xf7, 0x32, 0x94, 0xa8, 0xed, 0xec, 0x34, 0xdf, - 0x3f, 0x23, 0x74, 0xe6, 0x1e, 0x67, 0xea, 0x17, 0x7b, 0xfc, 0x8f, 0x0c, 0x98, 0xd3, 0x55, 0xc7, - 0x50, 0x06, 0x9f, 0x8c, 0x46, 0x50, 0x73, 0xf1, 0xac, 0xe0, 0x27, 0x6b, 0x2b, 0xda, 0xf5, 0x8f, - 0xea, 0xff, 0xfa, 0xf9, 0xbc, 0xf1, 0x1f, 0x9f, 0xcf, 0x1b, 0xff, 0xf5, 0xf9, 0xbc, 0xf1, 0x93, - 0xff, 0x99, 0x9f, 0xd8, 0x2d, 0xd2, 0xff, 0xbf, 0xc3, 0xca, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, - 0x48, 0x49, 0x02, 0x7c, 0x86, 0x42, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// KVClient is the client API for KV service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type KVClient interface { - // Range gets the keys in the range from the key-value store. - Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) -} - -type kVClient struct { - cc *grpc.ClientConn -} - -func NewKVClient(cc *grpc.ClientConn) KVClient { - return &kVClient{cc} -} - -func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { - out := new(RangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { - out := new(PutResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { - out := new(DeleteRangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { - out := new(TxnResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { - out := new(CompactionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// KVServer is the server API for KV service. -type KVServer interface { - // Range gets the keys in the range from the key-value store. - Range(context.Context, *RangeRequest) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(context.Context, *PutRequest) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(context.Context, *TxnRequest) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) -} - -// UnimplementedKVServer can be embedded to have forward compatible implementations. -type UnimplementedKVServer struct { -} - -func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") -} -func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") -} -func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented") -} -func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented") -} -func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") -} - -func RegisterKVServer(s *grpc.Server, srv KVServer) { - s.RegisterService(&_KV_serviceDesc, srv) -} - -func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Range(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Range", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Range(ctx, req.(*RangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Put(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Put", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Put(ctx, req.(*PutRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).DeleteRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/DeleteRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TxnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Txn(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Txn", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompactionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Compact(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Compact", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _KV_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.KV", - HandlerType: (*KVServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Range", - Handler: _KV_Range_Handler, - }, - { - MethodName: "Put", - Handler: _KV_Put_Handler, - }, - { - MethodName: "DeleteRange", - Handler: _KV_DeleteRange_Handler, - }, - { - MethodName: "Txn", - Handler: _KV_Txn_Handler, - }, - { - MethodName: "Compact", - Handler: _KV_Compact_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// WatchClient is the client API for Watch service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type WatchClient interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) -} - -type watchClient struct { - cc *grpc.ClientConn -} - -func NewWatchClient(cc *grpc.ClientConn) WatchClient { - return &watchClient{cc} -} - -func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...) - if err != nil { - return nil, err - } - x := &watchWatchClient{stream} - return x, nil -} - -type Watch_WatchClient interface { - Send(*WatchRequest) error - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type watchWatchClient struct { - grpc.ClientStream -} - -func (x *watchWatchClient) Send(m *WatchRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *watchWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// WatchServer is the server API for Watch service. -type WatchServer interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(Watch_WatchServer) error -} - -// UnimplementedWatchServer can be embedded to have forward compatible implementations. -type UnimplementedWatchServer struct { -} - -func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} - -func RegisterWatchServer(s *grpc.Server, srv WatchServer) { - s.RegisterService(&_Watch_serviceDesc, srv) -} - -func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WatchServer).Watch(&watchWatchServer{stream}) -} - -type Watch_WatchServer interface { - Send(*WatchResponse) error - Recv() (*WatchRequest, error) - grpc.ServerStream -} - -type watchWatchServer struct { - grpc.ServerStream -} - -func (x *watchWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *watchWatchServer) Recv() (*WatchRequest, error) { - m := new(WatchRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Watch_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Watch", - HandlerType: (*WatchServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Watch_Watch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// LeaseClient is the client API for Lease service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LeaseClient interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) -} - -type leaseClient struct { - cc *grpc.ClientConn -} - -func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { - return &leaseClient{cc} -} - -func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { - out := new(LeaseGrantResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { - out := new(LeaseRevokeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...) - if err != nil { - return nil, err - } - x := &leaseLeaseKeepAliveClient{stream} - return x, nil -} - -type Lease_LeaseKeepAliveClient interface { - Send(*LeaseKeepAliveRequest) error - Recv() (*LeaseKeepAliveResponse, error) - grpc.ClientStream -} - -type leaseLeaseKeepAliveClient struct { - grpc.ClientStream -} - -func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { - m := new(LeaseKeepAliveResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { - out := new(LeaseTimeToLiveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { - out := new(LeaseLeasesResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LeaseServer is the server API for Lease service. -type LeaseServer interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(Lease_LeaseKeepAliveServer) error - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) - // LeaseLeases lists all existing leases. - LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) -} - -// UnimplementedLeaseServer can be embedded to have forward compatible implementations. -type UnimplementedLeaseServer struct { -} - -func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented") -} -func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented") -} -func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error { - return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented") -} -func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented") -} -func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented") -} - -func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { - s.RegisterService(&_Lease_serviceDesc, srv) -} - -func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseGrantRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseGrant(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseGrant", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseRevokeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseRevoke(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseRevoke", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) -} - -type Lease_LeaseKeepAliveServer interface { - Send(*LeaseKeepAliveResponse) error - Recv() (*LeaseKeepAliveRequest, error) - grpc.ServerStream -} - -type leaseLeaseKeepAliveServer struct { - grpc.ServerStream -} - -func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { - m := new(LeaseKeepAliveRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseTimeToLiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseTimeToLive(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseLeases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseLeases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lease_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Lease", - HandlerType: (*LeaseServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LeaseGrant", - Handler: _Lease_LeaseGrant_Handler, - }, - { - MethodName: "LeaseRevoke", - Handler: _Lease_LeaseRevoke_Handler, - }, - { - MethodName: "LeaseTimeToLive", - Handler: _Lease_LeaseTimeToLive_Handler, - }, - { - MethodName: "LeaseLeases", - Handler: _Lease_LeaseLeases_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "LeaseKeepAlive", - Handler: _Lease_LeaseKeepAlive_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// ClusterClient is the client API for Cluster service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ClusterClient interface { - // MemberAdd adds a member into the cluster. - MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) - // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. - MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) -} - -type clusterClient struct { - cc *grpc.ClientConn -} - -func NewClusterClient(cc *grpc.ClientConn) ClusterClient { - return &clusterClient{cc} -} - -func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { - out := new(MemberAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { - out := new(MemberRemoveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { - out := new(MemberUpdateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { - out := new(MemberListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { - out := new(MemberPromoteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ClusterServer is the server API for Cluster service. -type ClusterServer interface { - // MemberAdd adds a member into the cluster. - MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) - // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. - MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) -} - -// UnimplementedClusterServer can be embedded to have forward compatible implementations. -type UnimplementedClusterServer struct { -} - -func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented") -} -func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented") -} -func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented") -} -func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented") -} -func (*UnimplementedClusterServer) MemberPromote(ctx context.Context, req *MemberPromoteRequest) (*MemberPromoteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberPromote not implemented") -} - -func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { - s.RegisterService(&_Cluster_serviceDesc, srv) -} - -func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberRemoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberRemove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberRemove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberUpdate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberUpdate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberPromoteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberPromote(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberPromote", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Cluster_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Cluster", - HandlerType: (*ClusterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MemberAdd", - Handler: _Cluster_MemberAdd_Handler, - }, - { - MethodName: "MemberRemove", - Handler: _Cluster_MemberRemove_Handler, - }, - { - MethodName: "MemberUpdate", - Handler: _Cluster_MemberUpdate_Handler, - }, - { - MethodName: "MemberList", - Handler: _Cluster_MemberList_Handler, - }, - { - MethodName: "MemberPromote", - Handler: _Cluster_MemberPromote_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// MaintenanceClient is the client API for Maintenance service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MaintenanceClient interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) - // Status gets the status of the member. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of whole backend keyspace, - // including key, lease, and other buckets in storage. - // This is designed for testing ONLY! - // Do not rely on this in production with ongoing transactions, - // since Hash operation does not hold MVCC locks. - // Use "HashKV" API instead for "key" bucket consistency checks. - Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - // It only iterates "key" bucket in backend storage. - HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) - // Downgrade requests downgrades, verifies feasibility or cancels downgrade - // on the cluster version. - // Supported since etcd 3.5. - Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) -} - -type maintenanceClient struct { - cc *grpc.ClientConn -} - -func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { - return &maintenanceClient{cc} -} - -func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { - out := new(AlarmResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { - out := new(DefragmentResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { - out := new(HashResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { - out := new(HashKVResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...) - if err != nil { - return nil, err - } - x := &maintenanceSnapshotClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Maintenance_SnapshotClient interface { - Recv() (*SnapshotResponse, error) - grpc.ClientStream -} - -type maintenanceSnapshotClient struct { - grpc.ClientStream -} - -func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { - m := new(SnapshotResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { - out := new(MoveLeaderResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) { - out := new(DowngradeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MaintenanceServer is the server API for Maintenance service. -type MaintenanceServer interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) - // Status gets the status of the member. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of whole backend keyspace, - // including key, lease, and other buckets in storage. - // This is designed for testing ONLY! - // Do not rely on this in production with ongoing transactions, - // since Hash operation does not hold MVCC locks. - // Use "HashKV" API instead for "key" bucket consistency checks. - Hash(context.Context, *HashRequest) (*HashResponse, error) - // HashKV computes the hash of all MVCC keys up to a given revision. - // It only iterates "key" bucket in backend storage. - HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error - // MoveLeader requests current leader node to transfer its leadership to transferee. - MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) - // Downgrade requests downgrades, verifies feasibility or cancels downgrade - // on the cluster version. - // Supported since etcd 3.5. - Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error) -} - -// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations. -type UnimplementedMaintenanceServer struct { -} - -func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented") -} -func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented") -} -func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented") -} -func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented") -} -func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error { - return status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} -func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented") -} -func (*UnimplementedMaintenanceServer) Downgrade(ctx context.Context, req *DowngradeRequest) (*DowngradeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Downgrade not implemented") -} - -func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { - s.RegisterService(&_Maintenance_serviceDesc, srv) -} - -func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AlarmRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Alarm(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Alarm", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DefragmentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Defragment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Defragment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Hash(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Hash", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashKVRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).HashKV(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/HashKV", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SnapshotRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) -} - -type Maintenance_SnapshotServer interface { - Send(*SnapshotResponse) error - grpc.ServerStream -} - -type maintenanceSnapshotServer struct { - grpc.ServerStream -} - -func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MoveLeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).MoveLeader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/MoveLeader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DowngradeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Downgrade(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Downgrade", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Maintenance_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Maintenance", - HandlerType: (*MaintenanceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Alarm", - Handler: _Maintenance_Alarm_Handler, - }, - { - MethodName: "Status", - Handler: _Maintenance_Status_Handler, - }, - { - MethodName: "Defragment", - Handler: _Maintenance_Defragment_Handler, - }, - { - MethodName: "Hash", - Handler: _Maintenance_Hash_Handler, - }, - { - MethodName: "HashKV", - Handler: _Maintenance_HashKV_Handler, - }, - { - MethodName: "MoveLeader", - Handler: _Maintenance_MoveLeader_Handler, - }, - { - MethodName: "Downgrade", - Handler: _Maintenance_Downgrade_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Snapshot", - Handler: _Maintenance_Snapshot_Handler, - ServerStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// AuthClient is the client API for Auth service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AuthClient interface { - // AuthEnable enables authentication. - AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) - // AuthStatus displays authentication status. - AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) - // Authenticate processes an authenticate request. - Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. User name cannot be empty. - UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. Role name cannot be empty. - RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { - out := new(AuthEnableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { - out := new(AuthDisableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) { - out := new(AuthStatusResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { - out := new(AuthenticateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { - out := new(AuthUserAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { - out := new(AuthUserGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { - out := new(AuthUserListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { - out := new(AuthUserDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { - out := new(AuthUserChangePasswordResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { - out := new(AuthUserGrantRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { - out := new(AuthUserRevokeRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { - out := new(AuthRoleAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { - out := new(AuthRoleGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { - out := new(AuthRoleListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { - out := new(AuthRoleDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { - out := new(AuthRoleGrantPermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { - out := new(AuthRoleRevokePermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AuthServer is the server API for Auth service. -type AuthServer interface { - // AuthEnable enables authentication. - AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) - // AuthStatus displays authentication status. - AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error) - // Authenticate processes an authenticate request. - Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. User name cannot be empty. - UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. Role name cannot be empty. - RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) -} - -// UnimplementedAuthServer can be embedded to have forward compatible implementations. -type UnimplementedAuthServer struct { -} - -func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented") -} -func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented") -} -func (*UnimplementedAuthServer) AuthStatus(ctx context.Context, req *AuthStatusRequest) (*AuthStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthStatus not implemented") -} -func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented") -} -func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented") -} -func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented") -} -func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented") -} -func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented") -} -func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented") -} -func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented") -} -func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented") -} -func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented") -} -func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented") -} -func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented") -} -func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented") -} -func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented") -} -func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented") -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthEnableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthEnable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthEnable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthDisableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthDisable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthDisable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthenticateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Authenticate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/Authenticate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserChangePasswordRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserChangePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserChangePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGrantRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGrantRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGrantRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserRevokeRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserRevokeRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserRevokeRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGrantPermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGrantPermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleRevokePermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleRevokePermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AuthEnable", - Handler: _Auth_AuthEnable_Handler, - }, - { - MethodName: "AuthDisable", - Handler: _Auth_AuthDisable_Handler, - }, - { - MethodName: "AuthStatus", - Handler: _Auth_AuthStatus_Handler, - }, - { - MethodName: "Authenticate", - Handler: _Auth_Authenticate_Handler, - }, - { - MethodName: "UserAdd", - Handler: _Auth_UserAdd_Handler, - }, - { - MethodName: "UserGet", - Handler: _Auth_UserGet_Handler, - }, - { - MethodName: "UserList", - Handler: _Auth_UserList_Handler, - }, - { - MethodName: "UserDelete", - Handler: _Auth_UserDelete_Handler, - }, - { - MethodName: "UserChangePassword", - Handler: _Auth_UserChangePassword_Handler, - }, - { - MethodName: "UserGrantRole", - Handler: _Auth_UserGrantRole_Handler, - }, - { - MethodName: "UserRevokeRole", - Handler: _Auth_UserRevokeRole_Handler, - }, - { - MethodName: "RoleAdd", - Handler: _Auth_RoleAdd_Handler, - }, - { - MethodName: "RoleGet", - Handler: _Auth_RoleGet_Handler, - }, - { - MethodName: "RoleList", - Handler: _Auth_RoleList_Handler, - }, - { - MethodName: "RoleDelete", - Handler: _Auth_RoleDelete_Handler, - }, - { - MethodName: "RoleGrantPermission", - Handler: _Auth_RoleGrantPermission_Handler, - }, - { - MethodName: "RoleRevokePermission", - Handler: _Auth_RoleRevokePermission_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x20 - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x18 - } - if m.MemberId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - i-- - dAtA[i] = 0x10 - } - if m.ClusterId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *RangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MaxCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - i-- - dAtA[i] = 0x68 - } - if m.MinCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - i-- - dAtA[i] = 0x60 - } - if m.MaxModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - i-- - dAtA[i] = 0x58 - } - if m.MinModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - i-- - dAtA[i] = 0x50 - } - if m.CountOnly { - i-- - if m.CountOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if m.KeysOnly { - i-- - if m.KeysOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.Serializable { - i-- - if m.Serializable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.SortTarget != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - i-- - dAtA[i] = 0x30 - } - if m.SortOrder != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - i-- - dAtA[i] = 0x28 - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x20 - } - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x18 - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Count != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x20 - } - if m.More { - i-- - if m.More { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Kvs) > 0 { - for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PutRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.IgnoreLease { - i-- - if m.IgnoreLease { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.IgnoreValue { - i-- - if m.IgnoreValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.PrevKv { - i-- - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Lease != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x18 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PutResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.PrevKv { - i-- - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PrevKvs) > 0 { - for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Deleted != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Request != nil { - { - size := m.Request.Size() - i -= size - if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RequestRange != nil { - { - size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RequestPut != nil { - { - size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RequestDeleteRange != nil { - { - size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RequestTxn != nil { - { - size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *ResponseOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Response != nil { - { - size := m.Response.Size() - i -= size - if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ResponseRange != nil { - { - size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ResponsePut != nil { - { - size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ResponseDeleteRange != nil { - { - size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ResponseTxn != nil { - { - size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *Compare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0x82 - } - if m.TargetUnion != nil { - { - size := m.TargetUnion.Size() - i -= size - if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a - } - if m.Target != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - i-- - dAtA[i] = 0x10 - } - if m.Result != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x20 - return len(dAtA) - i, nil -} -func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - i-- - dAtA[i] = 0x28 - return len(dAtA) - i, nil -} -func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - i-- - dAtA[i] = 0x30 - return len(dAtA) - i, nil -} -func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x40 - return len(dAtA) - i, nil -} -func (m *TxnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Failure) > 0 { - for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Success) > 0 { - for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Compare) > 0 { - for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *TxnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Responses) > 0 { - for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Succeeded { - i-- - if m.Succeeded { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Physical { - i-- - if m.Physical { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HashRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.HashRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision)) - i-- - dAtA[i] = 0x20 - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x18 - } - if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HashResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x22 - } - if len(m.Blob) > 0 { - i -= len(m.Blob) - copy(dAtA[i:], m.Blob) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i-- - dAtA[i] = 0x1a - } - if m.RemainingBytes != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WatchRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RequestUnion != nil { - { - size := m.RequestUnion.Size() - i -= size - if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CreateRequest != nil { - { - size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CancelRequest != nil { - { - size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ProgressRequest != nil { - { - size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Fragment { - i-- - if m.Fragment { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x38 - } - if m.PrevKv { - i-- - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int - for _, num := range m.Filters { - for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j21++ - } - dAtA22[j21] = uint8(num) - j21++ - } - i -= j21 - copy(dAtA[i:], dAtA22[:j21]) - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i-- - dAtA[i] = 0x2a - } - if m.ProgressNotify { - i-- - if m.ProgressNotify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.StartRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - i-- - dAtA[i] = 0x18 - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *WatchResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if m.Fragment { - i-- - if m.Fragment { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.CancelReason) > 0 { - i -= len(m.CancelReason) - copy(dAtA[i:], m.CancelReason) - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i-- - dAtA[i] = 0x32 - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x28 - } - if m.Canceled { - i-- - if m.Canceled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Created { - i-- - if m.Created { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x18 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Remaining_TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) - i-- - dAtA[i] = 0x10 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Checkpoints) > 0 { - for iNdEx := len(m.Checkpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Checkpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x18 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Keys { - i-- - if m.Keys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.GrantedTTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - i-- - dAtA[i] = 0x20 - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x18 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.IsLearner { - i-- - if m.IsLearner { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.ClientURLs) > 0 { - for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClientURLs[iNdEx]) - copy(dAtA[i:], m.ClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.IsLearner { - i-- - if m.IsLearner { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Member != nil { - { - size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Linearizable { - i-- - if m.Linearizable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberPromoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberPromoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TargetID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- - dAtA[i] = 0x18 - } - if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- - dAtA[i] = 0x10 - } - if m.Action != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AlarmMember) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- - dAtA[i] = 0x10 - } - if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Alarms) > 0 { - for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DowngradeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DowngradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if m.Action != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DowngradeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DowngradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.StorageVersion) > 0 { - i -= len(m.StorageVersion) - copy(dAtA[i:], m.StorageVersion) - i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion))) - i-- - dAtA[i] = 0x5a - } - if m.IsLearner { - i-- - if m.IsLearner { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.DbSizeInUse != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) - i-- - dAtA[i] = 0x48 - } - if len(m.Errors) > 0 { - for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Errors[iNdEx]) - copy(dAtA[i:], m.Errors[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Errors[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.RaftAppliedIndex != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) - i-- - dAtA[i] = 0x38 - } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x30 - } - if m.RaftIndex != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - i-- - dAtA[i] = 0x28 - } - if m.Leader != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - i-- - dAtA[i] = 0x20 - } - if m.DbSize != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - i-- - dAtA[i] = 0x18 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthStatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.HashedPassword) > 0 { - i -= len(m.HashedPassword) - copy(dAtA[i:], m.HashedPassword) - i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword))) - i-- - dAtA[i] = 0x22 - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.HashedPassword) > 0 { - i -= len(m.HashedPassword) - copy(dAtA[i:], m.HashedPassword) - i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword))) - i-- - dAtA[i] = 0x1a - } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0x12 - } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Perm != nil { - { - size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthStatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.AuthRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.AuthRevision)) - i-- - dAtA[i] = 0x18 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Perm) > 0 { - for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Users) > 0 { - for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Users[iNdEx]) - copy(dAtA[i:], m.Users[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - offset -= sovRpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResponseHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ClusterId != 0 { - n += 1 + sovRpc(uint64(m.ClusterId)) - } - if m.MemberId != 0 { - n += 1 + sovRpc(uint64(m.MemberId)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RangeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.SortOrder != 0 { - n += 1 + sovRpc(uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - n += 1 + sovRpc(uint64(m.SortTarget)) - } - if m.Serializable { - n += 2 - } - if m.KeysOnly { - n += 2 - } - if m.CountOnly { - n += 2 - } - if m.MinModRevision != 0 { - n += 1 + sovRpc(uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxCreateRevision)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RangeResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Kvs) > 0 { - for _, e := range m.Kvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.More { - n += 2 - } - if m.Count != 0 { - n += 1 + sovRpc(uint64(m.Count)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PutRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovRpc(uint64(m.Lease)) - } - if m.PrevKv { - n += 2 - } - if m.IgnoreValue { - n += 2 - } - if m.IgnoreLease { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PutResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRangeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRangeResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Deleted != 0 { - n += 1 + sovRpc(uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, e := range m.PrevKvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RequestOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - n += m.Request.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RequestOp_RequestRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RequestRange != nil { - l = m.RequestRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestPut) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RequestPut != nil { - l = m.RequestPut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestDeleteRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RequestDeleteRange != nil { - l = m.RequestDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestTxn) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RequestTxn != nil { - l = m.RequestTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Response != nil { - n += m.Response.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResponseOp_ResponseRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponseRange != nil { - l = m.ResponseRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponsePut) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponsePut != nil { - l = m.ResponsePut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponseDeleteRange != nil { - l = m.ResponseDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseTxn) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponseTxn != nil { - l = m.ResponseTxn.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != 0 { - n += 1 + sovRpc(uint64(m.Result)) - } - if m.Target != 0 { - n += 1 + sovRpc(uint64(m.Target)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.TargetUnion != nil { - n += m.TargetUnion.Size() - } - l = len(m.RangeEnd) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Compare_Version) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRpc(uint64(m.Version)) - return n -} -func (m *Compare_CreateRevision) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRpc(uint64(m.CreateRevision)) - return n -} -func (m *Compare_ModRevision) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRpc(uint64(m.ModRevision)) - return n -} -func (m *Compare_Value) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare_Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRpc(uint64(m.Lease)) - return n -} -func (m *TxnRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Compare) > 0 { - for _, e := range m.Compare { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Success) > 0 { - for _, e := range m.Success { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Failure) > 0 { - for _, e := range m.Failure { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TxnResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Succeeded { - n += 2 - } - if len(m.Responses) > 0 { - for _, e := range m.Responses { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CompactionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.Physical { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CompactionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HashRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HashKVRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HashKVResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - if m.HashRevision != 0 { - n += 1 + sovRpc(uint64(m.HashRevision)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HashResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.RemainingBytes != 0 { - n += 1 + sovRpc(uint64(m.RemainingBytes)) - } - l = len(m.Blob) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RequestUnion != nil { - n += m.RequestUnion.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchRequest_CreateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CreateRequest != nil { - l = m.CreateRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchRequest_CancelRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CancelRequest != nil { - l = m.CancelRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchRequest_ProgressRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ProgressRequest != nil { - l = m.ProgressRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchCreateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.StartRevision != 0 { - n += 1 + sovRpc(uint64(m.StartRevision)) - } - if m.ProgressNotify { - n += 2 - } - if len(m.Filters) > 0 { - l = 0 - for _, e := range m.Filters { - l += sovRpc(uint64(e)) - } - n += 1 + sovRpc(uint64(l)) + l - } - if m.PrevKv { - n += 2 - } - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.Fragment { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchCancelRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchProgressRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.Created { - n += 2 - } - if m.Canceled { - n += 2 - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - l = len(m.CancelReason) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Fragment { - n += 2 - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseGrantRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseGrantResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseRevokeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseRevokeResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseCheckpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.Remaining_TTL != 0 { - n += 1 + sovRpc(uint64(m.Remaining_TTL)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseCheckpointRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Checkpoints) > 0 { - for _, e := range m.Checkpoints { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseCheckpointResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseKeepAliveRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseKeepAliveResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseTimeToLiveRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.Keys { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseTimeToLiveResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - n += 1 + sovRpc(uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - l = len(b) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Member) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.IsLearner { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberAddRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.IsLearner { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberAddResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberRemoveRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberRemoveResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberUpdateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberUpdateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Linearizable { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberPromoteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemberPromoteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DefragmentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DefragmentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MoveLeaderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TargetID != 0 { - n += 1 + sovRpc(uint64(m.TargetID)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MoveLeaderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AlarmRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovRpc(uint64(m.Action)) - } - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AlarmMember) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AlarmResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Alarms) > 0 { - for _, e := range m.Alarms { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DowngradeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovRpc(uint64(m.Action)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DowngradeResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.DbSize != 0 { - n += 1 + sovRpc(uint64(m.DbSize)) - } - if m.Leader != 0 { - n += 1 + sovRpc(uint64(m.Leader)) - } - if m.RaftIndex != 0 { - n += 1 + sovRpc(uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - if m.RaftAppliedIndex != 0 { - n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) - } - if len(m.Errors) > 0 { - for _, s := range m.Errors { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.DbSizeInUse != 0 { - n += 1 + sovRpc(uint64(m.DbSizeInUse)) - } - if m.IsLearner { - n += 2 - } - l = len(m.StorageVersion) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthEnableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthDisableRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthStatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserAddRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.HashedPassword) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserGetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserChangePasswordRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.HashedPassword) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserGrantRoleRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.User) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserRevokeRoleRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleAddRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleGetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Perm != nil { - l = m.Perm.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthEnableResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthDisableResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthStatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Enabled { - n += 2 - } - if m.AuthRevision != 0 { - n += 1 + sovRpc(uint64(m.AuthRevision)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthenticateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserAddResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserGetResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserChangePasswordResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserGrantRoleResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserRevokeRoleResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleAddResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleGetResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Perm) > 0 { - for _, e := range m.Perm { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthUserListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Users) > 0 { - for _, s := range m.Users { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResponseHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) - } - m.ClusterId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) - } - m.MemberId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) - } - m.SortOrder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) - } - m.SortTarget = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Serializable = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.KeysOnly = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CountOnly = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) - } - m.MinModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinModRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) - } - m.MaxModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxModRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) - } - m.MinCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinCreateRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) - } - m.MaxCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxCreateRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.More = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreValue = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreLease = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &mvccpb.KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - m.Deleted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Deleted |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) - if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestPut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponsePut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseDeleteRange{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &TxnResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseTxn{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Compare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Compare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - m.Result = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Result |= Compare_CompareResult(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - m.Target = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Target |= Compare_CompareTarget(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Version{v} - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_CreateRevision{v} - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_ModRevision{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.TargetUnion = &Compare_Value{v} - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Lease{v} - case 64: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Compare = append(m.Compare, &Compare{}) - if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Success = append(m.Success, &RequestOp{}) - if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Failure = append(m.Failure, &RequestOp{}) - if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Succeeded = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Responses = append(m.Responses, &ResponseOp{}) - if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Physical = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashKVResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType) - } - m.HashRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HashRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) - } - m.RemainingBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RemainingBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) - if m.Blob == nil { - m.Blob = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCreateRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CreateRequest{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCancelRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CancelRequest{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchProgressRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_ProgressRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) - } - m.StartRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressNotify = bool(v != 0) - case 5: - if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.Filters) == 0 { - m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount) - } - for iNdEx < postIndex { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fragment = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Created = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Canceled = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fragment = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) - } - m.Remaining_TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Remaining_TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) - if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Keys = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) - } - m.GrantedTTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GrantedTTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) - copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, &LeaseStatus{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsLearner = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsLearner = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Linearizable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Linearizable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) - } - m.TargetID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TargetID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmMember) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alarms = append(m.Alarms, &AlarmMember{}) - if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DowngradeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DowngradeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DowngradeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= DowngradeRequest_DowngradeAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DowngradeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DowngradeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DowngradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) - } - m.DbSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbSize |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - m.Leader = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Leader |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - m.RaftIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) - } - m.RaftAppliedIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftAppliedIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) - } - m.DbSizeInUse = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbSizeInUse |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsLearner = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthStatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &authpb.UserAddOptions{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HashedPassword = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HashedPassword = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Perm == nil { - m.Perm = &authpb.Permission{} - } - if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthStatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) - } - m.AuthRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AuthRevision |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Perm = append(m.Perm, &authpb.Permission{}) - if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRpc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRpc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRpc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/etcdserverpb/rpc.proto b/api/etcdserverpb/rpc.proto deleted file mode 100644 index 9cdc0b37f6e..00000000000 --- a/api/etcdserverpb/rpc.proto +++ /dev/null @@ -1,1390 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcd/api/mvccpb/kv.proto"; -import "etcd/api/authpb/auth.proto"; -import "etcd/api/versionpb/version.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service KV { - // Range gets the keys in the range from the key-value store. - rpc Range(RangeRequest) returns (RangeResponse) { - option (google.api.http) = { - post: "/v3/kv/range" - body: "*" - }; - } - - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - rpc Put(PutRequest) returns (PutResponse) { - option (google.api.http) = { - post: "/v3/kv/put" - body: "*" - }; - } - - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { - option (google.api.http) = { - post: "/v3/kv/deleterange" - body: "*" - }; - } - - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - rpc Txn(TxnRequest) returns (TxnResponse) { - option (google.api.http) = { - post: "/v3/kv/txn" - body: "*" - }; - } - - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - rpc Compact(CompactionRequest) returns (CompactionResponse) { - option (google.api.http) = { - post: "/v3/kv/compaction" - body: "*" - }; - } -} - -service Watch { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - rpc Watch(stream WatchRequest) returns (stream WatchResponse) { - option (google.api.http) = { - post: "/v3/watch" - body: "*" - }; - } -} - -service Lease { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { - option (google.api.http) = { - post: "/v3/lease/grant" - body: "*" - }; - } - - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { - option (google.api.http) = { - post: "/v3/lease/revoke" - body: "*" - additional_bindings { - post: "/v3/kv/lease/revoke" - body: "*" - } - }; - } - - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { - option (google.api.http) = { - post: "/v3/lease/keepalive" - body: "*" - }; - } - - // LeaseTimeToLive retrieves lease information. - rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { - option (google.api.http) = { - post: "/v3/lease/timetolive" - body: "*" - additional_bindings { - post: "/v3/kv/lease/timetolive" - body: "*" - } - }; - } - - // LeaseLeases lists all existing leases. - rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { - option (google.api.http) = { - post: "/v3/lease/leases" - body: "*" - additional_bindings { - post: "/v3/kv/lease/leases" - body: "*" - } - }; - } -} - -service Cluster { - // MemberAdd adds a member into the cluster. - rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { - option (google.api.http) = { - post: "/v3/cluster/member/add" - body: "*" - }; - } - - // MemberRemove removes an existing member from the cluster. - rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { - option (google.api.http) = { - post: "/v3/cluster/member/remove" - body: "*" - }; - } - - // MemberUpdate updates the member configuration. - rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { - option (google.api.http) = { - post: "/v3/cluster/member/update" - body: "*" - }; - } - - // MemberList lists all the members in the cluster. - rpc MemberList(MemberListRequest) returns (MemberListResponse) { - option (google.api.http) = { - post: "/v3/cluster/member/list" - body: "*" - }; - } - - // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. - rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { - option (google.api.http) = { - post: "/v3/cluster/member/promote" - body: "*" - }; - } -} - -service Maintenance { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - rpc Alarm(AlarmRequest) returns (AlarmResponse) { - option (google.api.http) = { - post: "/v3/maintenance/alarm" - body: "*" - }; - } - - // Status gets the status of the member. - rpc Status(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - post: "/v3/maintenance/status" - body: "*" - }; - } - - // Defragment defragments a member's backend database to recover storage space. - rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { - option (google.api.http) = { - post: "/v3/maintenance/defragment" - body: "*" - }; - } - - // Hash computes the hash of whole backend keyspace, - // including key, lease, and other buckets in storage. - // This is designed for testing ONLY! - // Do not rely on this in production with ongoing transactions, - // since Hash operation does not hold MVCC locks. - // Use "HashKV" API instead for "key" bucket consistency checks. - rpc Hash(HashRequest) returns (HashResponse) { - option (google.api.http) = { - post: "/v3/maintenance/hash" - body: "*" - }; - } - - // HashKV computes the hash of all MVCC keys up to a given revision. - // It only iterates "key" bucket in backend storage. - rpc HashKV(HashKVRequest) returns (HashKVResponse) { - option (google.api.http) = { - post: "/v3/maintenance/hash" - body: "*" - }; - } - - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { - option (google.api.http) = { - post: "/v3/maintenance/snapshot" - body: "*" - }; - } - - // MoveLeader requests current leader node to transfer its leadership to transferee. - rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { - option (google.api.http) = { - post: "/v3/maintenance/transfer-leadership" - body: "*" - }; - } - - // Downgrade requests downgrades, verifies feasibility or cancels downgrade - // on the cluster version. - // Supported since etcd 3.5. - rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) { - option (google.api.http) = { - post: "/v3/maintenance/downgrade" - body: "*" - }; - } -} - -service Auth { - // AuthEnable enables authentication. - rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { - option (google.api.http) = { - post: "/v3/auth/enable" - body: "*" - }; - } - - // AuthDisable disables authentication. - rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { - option (google.api.http) = { - post: "/v3/auth/disable" - body: "*" - }; - } - - // AuthStatus displays authentication status. - rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) { - option (google.api.http) = { - post: "/v3/auth/status" - body: "*" - }; - } - - // Authenticate processes an authenticate request. - rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { - option (google.api.http) = { - post: "/v3/auth/authenticate" - body: "*" - }; - } - - // UserAdd adds a new user. User name cannot be empty. - rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { - option (google.api.http) = { - post: "/v3/auth/user/add" - body: "*" - }; - } - - // UserGet gets detailed user information. - rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { - option (google.api.http) = { - post: "/v3/auth/user/get" - body: "*" - }; - } - - // UserList gets a list of all users. - rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { - option (google.api.http) = { - post: "/v3/auth/user/list" - body: "*" - }; - } - - // UserDelete deletes a specified user. - rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { - option (google.api.http) = { - post: "/v3/auth/user/delete" - body: "*" - }; - } - - // UserChangePassword changes the password of a specified user. - rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { - option (google.api.http) = { - post: "/v3/auth/user/changepw" - body: "*" - }; - } - - // UserGrant grants a role to a specified user. - rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { - option (google.api.http) = { - post: "/v3/auth/user/grant" - body: "*" - }; - } - - // UserRevokeRole revokes a role of specified user. - rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { - option (google.api.http) = { - post: "/v3/auth/user/revoke" - body: "*" - }; - } - - // RoleAdd adds a new role. Role name cannot be empty. - rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { - option (google.api.http) = { - post: "/v3/auth/role/add" - body: "*" - }; - } - - // RoleGet gets detailed role information. - rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { - option (google.api.http) = { - post: "/v3/auth/role/get" - body: "*" - }; - } - - // RoleList gets lists of all roles. - rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { - option (google.api.http) = { - post: "/v3/auth/role/list" - body: "*" - }; - } - - // RoleDelete deletes a specified role. - rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { - option (google.api.http) = { - post: "/v3/auth/role/delete" - body: "*" - }; - } - - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { - option (google.api.http) = { - post: "/v3/auth/role/grant" - body: "*" - }; - } - - // RoleRevokePermission revokes a key or range permission of a specified role. - rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { - option (google.api.http) = { - post: "/v3/auth/role/revoke" - body: "*" - }; - } -} - -message ResponseHeader { - option (versionpb.etcd_version_msg) = "3.0"; - - // cluster_id is the ID of the cluster which sent the response. - uint64 cluster_id = 1; - // member_id is the ID of the member which sent the response. - uint64 member_id = 2; - // revision is the key-value store revision when the request was applied, and it's - // unset (so 0) in case of calls not interacting with key-value store. - // For watch progress responses, the header.revision indicates progress. All future events - // received in this stream are guaranteed to have a higher revision number than the - // header.revision number. - int64 revision = 3; - // raft_term is the raft term when the request was applied. - uint64 raft_term = 4; -} - -message RangeRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - enum SortOrder { - option (versionpb.etcd_version_enum) = "3.0"; - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first - } - enum SortTarget { - option (versionpb.etcd_version_enum) = "3.0"; - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; - } - - // key is the first key for the range. If range_end is not given, the request only looks up key. - bytes key = 1; - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), - // then the range request gets all keys prefixed with key. - // If both key and range_end are '\0', then the range request returns all keys. - bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. When limit is set to 0, - // it is treated as no limit. - int64 limit = 3; - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - int64 revision = 4; - - // sort_order is the order for returned sorted results. - SortOrder sort_order = 5; - - // sort_target is the key-value field to use for sorting. - SortTarget sort_target = 6; - - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - bool serializable = 7; - - // keys_only when set returns only the keys and not the values. - bool keys_only = 8; - - // count_only when set returns only the count of the keys in the range. - bool count_only = 9; - - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"]; - - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"]; - - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create revisions will be filtered away. - int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"]; - - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"]; -} - -message RangeResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - repeated mvccpb.KeyValue kvs = 2; - // more indicates if there are more keys to return in the requested range. - bool more = 3; - // count is set to the number of keys within the range when requested. - int64 count = 4; -} - -message PutRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // key is the key, in bytes, to put into the key-value store. - bytes key = 1; - // value is the value, in bytes, to associate with the key in the key-value store. - bytes value = 2; - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - int64 lease = 3; - - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"]; - - // If ignore_value is set, etcd updates the key using its current value. - // Returns an error if the key does not exist. - bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"]; - - // If ignore_lease is set, etcd updates the key using its current lease. - // Returns an error if the key does not exist. - bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"]; -} - -message PutResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // if prev_kv is set in the request, the previous key-value pair will be returned. - mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"]; -} - -message DeleteRangeRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // key is the first key to delete in the range. - bytes key = 1; - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all the keys - // with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - bytes range_end = 2; - - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delete response. - bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"]; -} - -message DeleteRangeResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // deleted is the number of keys deleted by the delete range request. - int64 deleted = 2; - // if prev_kv is set in the request, the previous key-value pairs will be returned. - repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"]; -} - -message RequestOp { - option (versionpb.etcd_version_msg) = "3.0"; - // request is a union of request types accepted by a transaction. - oneof request { - RangeRequest request_range = 1; - PutRequest request_put = 2; - DeleteRangeRequest request_delete_range = 3; - TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"]; - } -} - -message ResponseOp { - option (versionpb.etcd_version_msg) = "3.0"; - - // response is a union of response types returned by a transaction. - oneof response { - RangeResponse response_range = 1; - PutResponse response_put = 2; - DeleteRangeResponse response_delete_range = 3; - TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"]; - } -} - -message Compare { - option (versionpb.etcd_version_msg) = "3.0"; - - enum CompareResult { - option (versionpb.etcd_version_enum) = "3.0"; - - EQUAL = 0; - GREATER = 1; - LESS = 2; - NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"]; - } - enum CompareTarget { - option (versionpb.etcd_version_enum) = "3.0"; - - VERSION = 0; - CREATE = 1; - MOD = 2; - VALUE = 3; - LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"]; - } - // result is logical comparison operation for this comparison. - CompareResult result = 1; - // target is the key-value field to inspect for the comparison. - CompareTarget target = 2; - // key is the subject key for the comparison operation. - bytes key = 3; - oneof target_union { - // version is the version of the given key - int64 version = 4; - // create_revision is the creation revision of the given key - int64 create_revision = 5; - // mod_revision is the last modified revision of the given key. - int64 mod_revision = 6; - // value is the value of the given key, in bytes. - bytes value = 7; - // lease is the lease id of the given key. - int64 lease = 8 [(versionpb.etcd_version_field)="3.3"]; - // leave room for more target_union field tags, jump to 64 - } - - // range_end compares the given target to all keys in the range [key, range_end). - // See RangeRequest for more details on key ranges. - bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"]; - // TODO: fill out with most of the rest of RangeRequest fields when needed. -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -message TxnRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - repeated Compare compare = 1; - // success is a list of requests which will be applied when compare evaluates to true. - repeated RequestOp success = 2; - // failure is a list of requests which will be applied when compare evaluates to false. - repeated RequestOp failure = 3; -} - -message TxnResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // succeeded is set to true if the compare evaluated to true or false otherwise. - bool succeeded = 2; - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - repeated ResponseOp responses = 3; -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -message CompactionRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // revision is the key-value store revision for the compaction operation. - int64 revision = 1; - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - bool physical = 2; -} - -message CompactionResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message HashRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message HashKVRequest { - option (versionpb.etcd_version_msg) = "3.3"; - // revision is the key-value store revision for the hash operation. - int64 revision = 1; -} - -message HashKVResponse { - option (versionpb.etcd_version_msg) = "3.3"; - - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's MVCC keys up to a given revision. - uint32 hash = 2; - // compact_revision is the compacted revision of key-value store when hash begins. - int64 compact_revision = 3; - // hash_revision is the revision up to which the hash is calculated. - int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"]; -} - -message HashResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's KV's backend. - uint32 hash = 2; -} - -message SnapshotRequest { - option (versionpb.etcd_version_msg) = "3.3"; -} - -message SnapshotResponse { - option (versionpb.etcd_version_msg) = "3.3"; - - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - ResponseHeader header = 1; - - // remaining_bytes is the number of blob bytes to be sent after this message - uint64 remaining_bytes = 2; - - // blob contains the next chunk of the snapshot in the snapshot stream. - bytes blob = 3; - - // local version of server that created the snapshot. - // In cluster with binaries with different version, each cluster can return different result. - // Informs which etcd server version should be used when restoring the snapshot. - string version = 4 [(versionpb.etcd_version_field)="3.6"]; -} - -message WatchRequest { - option (versionpb.etcd_version_msg) = "3.0"; - // request_union is a request to either create a new watcher or cancel an existing watcher. - oneof request_union { - WatchCreateRequest create_request = 1; - WatchCancelRequest cancel_request = 2; - WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"]; - } -} - -message WatchCreateRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // key is the key to register for watching. - bytes key = 1; - - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - bytes range_end = 2; - - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - int64 start_revision = 3; - - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - bool progress_notify = 4; - - enum FilterType { - option (versionpb.etcd_version_enum) = "3.1"; - - // filter out put event. - NOPUT = 0; - // filter out delete event. - NODELETE = 1; - } - - // filters filter the events at server side before it sends back to the watcher. - repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"]; - - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"]; - - // If watch_id is provided and non-zero, it will be assigned to this watcher. - // Since creating a watcher in etcd is not a synchronous operation, - // this can be used ensure that ordering is correct when creating multiple - // watchers on the same stream. Creating a watcher with an ID already in - // use on the stream will cause an error to be returned. - int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"]; - - // fragment enables splitting large revisions into multiple watch responses. - bool fragment = 8 [(versionpb.etcd_version_field)="3.4"]; -} - -message WatchCancelRequest { - option (versionpb.etcd_version_msg) = "3.1"; - // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"]; -} - -// Requests the a watch stream progress status be sent in the watch response stream as soon as -// possible. -message WatchProgressRequest { - option (versionpb.etcd_version_msg) = "3.4"; -} - -message WatchResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // watch_id is the ID of the watcher that corresponds to the response. - int64 watch_id = 2; - - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - bool created = 3; - - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - bool canceled = 4; - - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - int64 compact_revision = 5; - - // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"]; - - // framgment is true if large watch response was split over multiple responses. - bool fragment = 7 [(versionpb.etcd_version_field)="3.4"]; - - repeated mvccpb.Event events = 11; -} - -message LeaseGrantRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // TTL is the advisory time-to-live in seconds. Expired lease will return -1. - int64 TTL = 1; - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - int64 ID = 2; -} - -message LeaseGrantResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // ID is the lease ID for the granted lease. - int64 ID = 2; - // TTL is the server chosen lease time-to-live in seconds. - int64 TTL = 3; - string error = 4; -} - -message LeaseRevokeRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - int64 ID = 1; -} - -message LeaseRevokeResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message LeaseCheckpoint { - option (versionpb.etcd_version_msg) = "3.4"; - - // ID is the lease ID to checkpoint. - int64 ID = 1; - - // Remaining_TTL is the remaining time until expiry of the lease. - int64 remaining_TTL = 2; -} - -message LeaseCheckpointRequest { - option (versionpb.etcd_version_msg) = "3.4"; - - repeated LeaseCheckpoint checkpoints = 1; -} - -message LeaseCheckpointResponse { - option (versionpb.etcd_version_msg) = "3.4"; - - ResponseHeader header = 1; -} - -message LeaseKeepAliveRequest { - option (versionpb.etcd_version_msg) = "3.0"; - // ID is the lease ID for the lease to keep alive. - int64 ID = 1; -} - -message LeaseKeepAliveResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the new time-to-live for the lease. - int64 TTL = 3; -} - -message LeaseTimeToLiveRequest { - option (versionpb.etcd_version_msg) = "3.1"; - // ID is the lease ID for the lease. - int64 ID = 1; - // keys is true to query all the keys attached to this lease. - bool keys = 2; -} - -message LeaseTimeToLiveResponse { - option (versionpb.etcd_version_msg) = "3.1"; - - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - int64 TTL = 3; - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - int64 grantedTTL = 4; - // Keys is the list of keys attached to this lease. - repeated bytes keys = 5; -} - -message LeaseLeasesRequest { - option (versionpb.etcd_version_msg) = "3.3"; -} - -message LeaseStatus { - option (versionpb.etcd_version_msg) = "3.3"; - - int64 ID = 1; - // TODO: int64 TTL = 2; -} - -message LeaseLeasesResponse { - option (versionpb.etcd_version_msg) = "3.3"; - - ResponseHeader header = 1; - repeated LeaseStatus leases = 2; -} - -message Member { - option (versionpb.etcd_version_msg) = "3.0"; - - // ID is the member ID for this member. - uint64 ID = 1; - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - string name = 2; - // peerURLs is the list of URLs the member exposes to the cluster for communication. - repeated string peerURLs = 3; - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - repeated string clientURLs = 4; - // isLearner indicates if the member is raft learner. - bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"]; -} - -message MemberAddRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - repeated string peerURLs = 1; - // isLearner indicates if the added member is raft learner. - bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"]; -} - -message MemberAddResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // member is the member information for the added member. - Member member = 2; - // members is a list of all members after adding the new member. - repeated Member members = 3; -} - -message MemberRemoveRequest { - option (versionpb.etcd_version_msg) = "3.0"; - // ID is the member ID of the member to remove. - uint64 ID = 1; -} - -message MemberRemoveResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // members is a list of all members after removing the member. - repeated Member members = 2; -} - -message MemberUpdateRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // ID is the member ID of the member to update. - uint64 ID = 1; - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - repeated string peerURLs = 2; -} - -message MemberUpdateResponse{ - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // members is a list of all members after updating the member. - repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"]; -} - -message MemberListRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"]; -} - -message MemberListResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // members is a list of all members associated with the cluster. - repeated Member members = 2; -} - -message MemberPromoteRequest { - option (versionpb.etcd_version_msg) = "3.4"; - // ID is the member ID of the member to promote. - uint64 ID = 1; -} - -message MemberPromoteResponse { - option (versionpb.etcd_version_msg) = "3.4"; - - ResponseHeader header = 1; - // members is a list of all members after promoting the member. - repeated Member members = 2; -} - -message DefragmentRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message DefragmentResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message MoveLeaderRequest { - option (versionpb.etcd_version_msg) = "3.3"; - // targetID is the node ID for the new leader. - uint64 targetID = 1; -} - -message MoveLeaderResponse { - option (versionpb.etcd_version_msg) = "3.3"; - - ResponseHeader header = 1; -} - -enum AlarmType { - option (versionpb.etcd_version_enum) = "3.0"; - - NONE = 0; // default, used to query if any alarm is active - NOSPACE = 1; // space quota is exhausted - CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected -} - -message AlarmRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - enum AlarmAction { - option (versionpb.etcd_version_enum) = "3.0"; - - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; - } - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - AlarmAction action = 1; - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - uint64 memberID = 2; - // alarm is the type of alarm to consider for this request. - AlarmType alarm = 3; -} - -message AlarmMember { - option (versionpb.etcd_version_msg) = "3.0"; - // memberID is the ID of the member associated with the raised alarm. - uint64 memberID = 1; - // alarm is the type of alarm which has been raised. - AlarmType alarm = 2; -} - -message AlarmResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // alarms is a list of alarms associated with the alarm request. - repeated AlarmMember alarms = 2; -} - -message DowngradeRequest { - option (versionpb.etcd_version_msg) = "3.5"; - - enum DowngradeAction { - option (versionpb.etcd_version_enum) = "3.5"; - - VALIDATE = 0; - ENABLE = 1; - CANCEL = 2; - } - - // action is the kind of downgrade request to issue. The action may - // VALIDATE the target version, DOWNGRADE the cluster version, - // or CANCEL the current downgrading job. - DowngradeAction action = 1; - // version is the target version to downgrade. - string version = 2; -} - -message DowngradeResponse { - option (versionpb.etcd_version_msg) = "3.5"; - - ResponseHeader header = 1; - // version is the current cluster version. - string version = 2; -} - -message StatusRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message StatusResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // version is the cluster protocol version used by the responding member. - string version = 2; - // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. - int64 dbSize = 3; - // leader is the member ID which the responding member believes is the current leader. - uint64 leader = 4; - // raftIndex is the current raft committed index of the responding member. - uint64 raftIndex = 5; - // raftTerm is the current raft term of the responding member. - uint64 raftTerm = 6; - // raftAppliedIndex is the current raft applied index of the responding member. - uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"]; - // errors contains alarm/health information and status. - repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"]; - // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. - int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"]; - // isLearner indicates if the member is raft learner. - bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"]; - // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version. - string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"]; -} - -message AuthEnableRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message AuthDisableRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message AuthStatusRequest { - option (versionpb.etcd_version_msg) = "3.5"; -} - -message AuthenticateRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string name = 1; - string password = 2; -} - -message AuthUserAddRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string name = 1; - string password = 2; - authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"]; - string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"]; -} - -message AuthUserGetRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string name = 1; -} - -message AuthUserDeleteRequest { - option (versionpb.etcd_version_msg) = "3.0"; - // name is the name of the user to delete. - string name = 1; -} - -message AuthUserChangePasswordRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // name is the name of the user whose password is being changed. - string name = 1; - // password is the new password for the user. Note that this field will be removed in the API layer. - string password = 2; - // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. - string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"]; -} - -message AuthUserGrantRoleRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // user is the name of the user which should be granted a given role. - string user = 1; - // role is the name of the role to grant to the user. - string role = 2; -} - -message AuthUserRevokeRoleRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string name = 1; - string role = 2; -} - -message AuthRoleAddRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // name is the name of the role to add to the authentication system. - string name = 1; -} - -message AuthRoleGetRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string role = 1; -} - -message AuthUserListRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message AuthRoleListRequest { - option (versionpb.etcd_version_msg) = "3.0"; -} - -message AuthRoleDeleteRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string role = 1; -} - -message AuthRoleGrantPermissionRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - // name is the name of the role which will be granted the permission. - string name = 1; - // perm is the permission to grant to the role. - authpb.Permission perm = 2; -} - -message AuthRoleRevokePermissionRequest { - option (versionpb.etcd_version_msg) = "3.0"; - - string role = 1; - bytes key = 2; - bytes range_end = 3; -} - -message AuthEnableResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthDisableResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthStatusResponse { - option (versionpb.etcd_version_msg) = "3.5"; - - ResponseHeader header = 1; - bool enabled = 2; - // authRevision is the current revision of auth store - uint64 authRevision = 3; -} - -message AuthenticateResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - // token is an authorized token that can be used in succeeding RPCs - string token = 2; -} - -message AuthUserAddResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthUserGetResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserDeleteResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthUserChangePasswordResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthUserGrantRoleResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthUserRevokeRoleResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthRoleAddResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthRoleGetResponse { - ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"]; - - repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"]; -} - -message AuthRoleListResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserListResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; - - repeated string users = 2; -} - -message AuthRoleDeleteResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthRoleGrantPermissionResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} - -message AuthRoleRevokePermissionResponse { - option (versionpb.etcd_version_msg) = "3.0"; - - ResponseHeader header = 1; -} diff --git a/api/go.mod b/api/go.mod deleted file mode 100644 index b2ee57fbb37..00000000000 --- a/api/go.mod +++ /dev/null @@ -1,33 +0,0 @@ -module go.etcd.io/etcd/api/v3 - -go 1.19 - -require ( - github.com/coreos/go-semver v0.3.1 - github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.2 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/stretchr/testify v1.8.1 - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 - google.golang.org/grpc v1.51.0 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/api/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/pkg/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY -) diff --git a/api/go.sum b/api/go.sum deleted file mode 100644 index 00c60e35229..00000000000 --- a/api/go.sum +++ /dev/null @@ -1,168 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/api/membershippb/membership.pb.go b/api/membershippb/membership.pb.go deleted file mode 100644 index 386185f0f8d..00000000000 --- a/api/membershippb/membership.pb.go +++ /dev/null @@ -1,1458 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: membership.proto - -package membershippb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - _ "go.etcd.io/etcd/api/v3/versionpb" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// RaftAttributes represents the raft related attributes of an etcd member. -type RaftAttributes struct { - // peerURLs is the list of peers in the raft cluster. - PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"` - // isLearner indicates if the member is raft learner. - IsLearner bool `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RaftAttributes) Reset() { *m = RaftAttributes{} } -func (m *RaftAttributes) String() string { return proto.CompactTextString(m) } -func (*RaftAttributes) ProtoMessage() {} -func (*RaftAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{0} -} -func (m *RaftAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RaftAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RaftAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RaftAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_RaftAttributes.Merge(m, src) -} -func (m *RaftAttributes) XXX_Size() int { - return m.Size() -} -func (m *RaftAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_RaftAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_RaftAttributes proto.InternalMessageInfo - -// Attributes represents all the non-raft related attributes of an etcd member. -type Attributes struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - ClientUrls []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Attributes) Reset() { *m = Attributes{} } -func (m *Attributes) String() string { return proto.CompactTextString(m) } -func (*Attributes) ProtoMessage() {} -func (*Attributes) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{1} -} -func (m *Attributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Attributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Attributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Attributes.Merge(m, src) -} -func (m *Attributes) XXX_Size() int { - return m.Size() -} -func (m *Attributes) XXX_DiscardUnknown() { - xxx_messageInfo_Attributes.DiscardUnknown(m) -} - -var xxx_messageInfo_Attributes proto.InternalMessageInfo - -type Member struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - RaftAttributes *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"` - MemberAttributes *Attributes `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{2} -} -func (m *Member) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Member.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Member) XXX_Merge(src proto.Message) { - xxx_messageInfo_Member.Merge(m, src) -} -func (m *Member) XXX_Size() int { - return m.Size() -} -func (m *Member) XXX_DiscardUnknown() { - xxx_messageInfo_Member.DiscardUnknown(m) -} - -var xxx_messageInfo_Member proto.InternalMessageInfo - -type ClusterVersionSetRequest struct { - Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterVersionSetRequest) Reset() { *m = ClusterVersionSetRequest{} } -func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterVersionSetRequest) ProtoMessage() {} -func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{3} -} -func (m *ClusterVersionSetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterVersionSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClusterVersionSetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClusterVersionSetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterVersionSetRequest.Merge(m, src) -} -func (m *ClusterVersionSetRequest) XXX_Size() int { - return m.Size() -} -func (m *ClusterVersionSetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterVersionSetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterVersionSetRequest proto.InternalMessageInfo - -type ClusterMemberAttrSetRequest struct { - Member_ID uint64 `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"` - MemberAttributes *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClusterMemberAttrSetRequest) Reset() { *m = ClusterMemberAttrSetRequest{} } -func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) } -func (*ClusterMemberAttrSetRequest) ProtoMessage() {} -func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{4} -} -func (m *ClusterMemberAttrSetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterMemberAttrSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClusterMemberAttrSetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClusterMemberAttrSetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterMemberAttrSetRequest.Merge(m, src) -} -func (m *ClusterMemberAttrSetRequest) XXX_Size() int { - return m.Size() -} -func (m *ClusterMemberAttrSetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterMemberAttrSetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterMemberAttrSetRequest proto.InternalMessageInfo - -type DowngradeInfoSetRequest struct { - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Ver string `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DowngradeInfoSetRequest) Reset() { *m = DowngradeInfoSetRequest{} } -func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) } -func (*DowngradeInfoSetRequest) ProtoMessage() {} -func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_949fe0d019050ef5, []int{5} -} -func (m *DowngradeInfoSetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DowngradeInfoSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DowngradeInfoSetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DowngradeInfoSetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DowngradeInfoSetRequest.Merge(m, src) -} -func (m *DowngradeInfoSetRequest) XXX_Size() int { - return m.Size() -} -func (m *DowngradeInfoSetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DowngradeInfoSetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DowngradeInfoSetRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes") - proto.RegisterType((*Attributes)(nil), "membershippb.Attributes") - proto.RegisterType((*Member)(nil), "membershippb.Member") - proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest") - proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest") - proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest") -} - -func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) } - -var fileDescriptor_949fe0d019050ef5 = []byte{ - // 401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xcd, 0xae, 0xd2, 0x40, - 0x14, 0xbe, 0xd3, 0xde, 0xdc, 0xdb, 0x9e, 0x6b, 0x10, 0x27, 0x24, 0x36, 0xa0, 0xb5, 0x61, 0xc5, - 0xaa, 0x24, 0x12, 0x36, 0xee, 0x54, 0x58, 0x60, 0xc4, 0xc5, 0x18, 0xdc, 0x92, 0x29, 0x1c, 0xb0, - 0x49, 0x69, 0xeb, 0xcc, 0x14, 0xf7, 0x2e, 0x7d, 0x02, 0xdf, 0xc2, 0x95, 0xef, 0xc0, 0xd2, 0x47, - 0x50, 0x7c, 0x11, 0xd3, 0x99, 0x42, 0x4b, 0x74, 0x75, 0x77, 0xa7, 0x5f, 0xcf, 0xf9, 0xfe, 0x5a, - 0x68, 0xef, 0x70, 0x17, 0xa1, 0x90, 0x1f, 0xe3, 0x3c, 0xcc, 0x45, 0xa6, 0x32, 0xfa, 0xa0, 0x46, - 0xf2, 0xa8, 0xdb, 0xd9, 0x66, 0xdb, 0x4c, 0xbf, 0x18, 0x96, 0x93, 0xd9, 0xe9, 0x06, 0xa8, 0x56, - 0xeb, 0x21, 0xcf, 0xe3, 0xe1, 0x1e, 0x85, 0x8c, 0xb3, 0x34, 0x8f, 0x4e, 0x93, 0xd9, 0xe8, 0x2f, - 0xa0, 0xc5, 0xf8, 0x46, 0xbd, 0x54, 0x4a, 0xc4, 0x51, 0xa1, 0x50, 0xd2, 0x1e, 0xb8, 0x39, 0xa2, - 0x58, 0x16, 0x22, 0x91, 0x1e, 0x09, 0xec, 0x81, 0xcb, 0x9c, 0x12, 0x58, 0x88, 0x44, 0xd2, 0xa7, - 0x00, 0xb1, 0x5c, 0x26, 0xc8, 0x45, 0x8a, 0xc2, 0xb3, 0x02, 0x32, 0x70, 0x98, 0x1b, 0xcb, 0xb7, - 0x06, 0x78, 0x71, 0xfb, 0xe5, 0x87, 0x67, 0x8f, 0xc2, 0x71, 0xff, 0x0d, 0x40, 0x83, 0x92, 0xc2, - 0x75, 0xca, 0x77, 0xe8, 0x91, 0x80, 0x0c, 0x5c, 0xa6, 0x67, 0xfa, 0x0c, 0xee, 0x56, 0x49, 0x8c, - 0xa9, 0x32, 0x42, 0x96, 0x16, 0x02, 0x03, 0x95, 0x52, 0x35, 0xd7, 0x77, 0x02, 0x37, 0x73, 0x9d, - 0x95, 0xb6, 0xc0, 0x9a, 0x4d, 0x34, 0xcd, 0x35, 0xb3, 0x66, 0x13, 0x3a, 0x85, 0x87, 0x82, 0x6f, - 0xd4, 0x92, 0x9f, 0xb5, 0xb4, 0xa7, 0xbb, 0xe7, 0x4f, 0xc2, 0x66, 0x3b, 0xe1, 0x65, 0x44, 0xd6, - 0x12, 0x97, 0x91, 0xa7, 0xf0, 0xc8, 0xac, 0x37, 0x89, 0x6c, 0x4d, 0xe4, 0x5d, 0x12, 0x35, 0x48, - 0xaa, 0x2f, 0x52, 0x23, 0xb5, 0xe3, 0x31, 0x78, 0xaf, 0x93, 0x42, 0x2a, 0x14, 0x1f, 0x4c, 0xd9, - 0xef, 0x51, 0x31, 0xfc, 0x54, 0xa0, 0x54, 0xb4, 0x0d, 0xf6, 0x1e, 0x45, 0x55, 0x45, 0x39, 0xd6, - 0x67, 0x5f, 0x09, 0xf4, 0xaa, 0xbb, 0xf9, 0x99, 0xbb, 0x71, 0xda, 0x03, 0xb7, 0xb2, 0x79, 0x2e, - 0xc1, 0x31, 0x80, 0xae, 0xe2, 0x3f, 0x19, 0xac, 0xfb, 0x67, 0x78, 0x07, 0x8f, 0x27, 0xd9, 0xe7, - 0x74, 0x2b, 0xf8, 0x1a, 0x67, 0xe9, 0x26, 0x6b, 0xf8, 0xf0, 0xe0, 0x16, 0x53, 0x1e, 0x25, 0xb8, - 0xd6, 0x2e, 0x1c, 0x76, 0x7a, 0x3c, 0x85, 0xb3, 0xfe, 0x0d, 0xf7, 0xaa, 0x73, 0xf8, 0xed, 0x5f, - 0x1d, 0x8e, 0x3e, 0xf9, 0x79, 0xf4, 0xc9, 0xaf, 0xa3, 0x4f, 0xbe, 0xfd, 0xf1, 0xaf, 0xa2, 0x1b, - 0xfd, 0x17, 0x8e, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xa3, 0xbd, 0xee, 0xdf, 0x02, 0x00, - 0x00, -} - -func (m *RaftAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RaftAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RaftAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.IsLearner { - i-- - if m.IsLearner { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.PeerUrls) > 0 { - for iNdEx := len(m.PeerUrls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerUrls[iNdEx]) - copy(dAtA[i:], m.PeerUrls[iNdEx]) - i = encodeVarintMembership(dAtA, i, uint64(len(m.PeerUrls[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Attributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Attributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ClientUrls) > 0 { - for iNdEx := len(m.ClientUrls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClientUrls[iNdEx]) - copy(dAtA[i:], m.ClientUrls[iNdEx]) - i = encodeVarintMembership(dAtA, i, uint64(len(m.ClientUrls[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMembership(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemberAttributes != nil { - { - size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMembership(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.RaftAttributes != nil { - { - size, err := m.RaftAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMembership(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ID != 0 { - i = encodeVarintMembership(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterVersionSetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterVersionSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ver) > 0 { - i -= len(m.Ver) - copy(dAtA[i:], m.Ver) - i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterMemberAttrSetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterMemberAttrSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemberAttributes != nil { - { - size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMembership(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Member_ID != 0 { - i = encodeVarintMembership(dAtA, i, uint64(m.Member_ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DowngradeInfoSetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DowngradeInfoSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ver) > 0 { - i -= len(m.Ver) - copy(dAtA[i:], m.Ver) - i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver))) - i-- - dAtA[i] = 0x12 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMembership(dAtA []byte, offset int, v uint64) int { - offset -= sovMembership(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RaftAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.PeerUrls) > 0 { - for _, s := range m.PeerUrls { - l = len(s) - n += 1 + l + sovMembership(uint64(l)) - } - } - if m.IsLearner { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Attributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMembership(uint64(l)) - } - if len(m.ClientUrls) > 0 { - for _, s := range m.ClientUrls { - l = len(s) - n += 1 + l + sovMembership(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Member) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovMembership(uint64(m.ID)) - } - if m.RaftAttributes != nil { - l = m.RaftAttributes.Size() - n += 1 + l + sovMembership(uint64(l)) - } - if m.MemberAttributes != nil { - l = m.MemberAttributes.Size() - n += 1 + l + sovMembership(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ClusterVersionSetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ver) - if l > 0 { - n += 1 + l + sovMembership(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ClusterMemberAttrSetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Member_ID != 0 { - n += 1 + sovMembership(uint64(m.Member_ID)) - } - if m.MemberAttributes != nil { - l = m.MemberAttributes.Size() - n += 1 + l + sovMembership(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DowngradeInfoSetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Enabled { - n += 2 - } - l = len(m.Ver) - if l > 0 { - n += 1 + l + sovMembership(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMembership(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMembership(x uint64) (n int) { - return sovMembership(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RaftAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RaftAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RaftAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsLearner = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Attributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Attributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RaftAttributes == nil { - m.RaftAttributes = &RaftAttributes{} - } - if err := m.RaftAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemberAttributes == nil { - m.MemberAttributes = &Attributes{} - } - if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterVersionSetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterVersionSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterMemberAttrSetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterMemberAttrSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Member_ID", wireType) - } - m.Member_ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Member_ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemberAttributes == nil { - m.MemberAttributes = &Attributes{} - } - if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DowngradeInfoSetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DowngradeInfoSetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMembership - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMembership - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMembership - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMembership(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMembership - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMembership(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMembership - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMembership - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMembership - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMembership - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMembership - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMembership - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMembership = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMembership = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/mvccpb/kv.pb.go b/api/mvccpb/kv.pb.go deleted file mode 100644 index fc258d6c206..00000000000 --- a/api/mvccpb/kv.pb.go +++ /dev/null @@ -1,798 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: kv.proto - -package mvccpb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Event_EventType int32 - -const ( - PUT Event_EventType = 0 - DELETE Event_EventType = 1 -) - -var Event_EventType_name = map[int32]string{ - 0: "PUT", - 1: "DELETE", -} - -var Event_EventType_value = map[string]int32{ - "PUT": 0, - "DELETE": 1, -} - -func (x Event_EventType) String() string { - return proto.EnumName(Event_EventType_name, int32(x)) -} - -func (Event_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1, 0} -} - -type KeyValue struct { - // key is the key in bytes. An empty key is not allowed. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // create_revision is the revision of last creation on this key. - CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` - // mod_revision is the revision of last modification on this key. - ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // value is the value held by the key, in bytes. - Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{0} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return m.Size() -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyValue proto.InternalMessageInfo - -type Event struct { - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` - // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) - proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") - proto.RegisterType((*Event)(nil), "mvccpb.Event") -} - -func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } - -var fileDescriptor_2216fe83c9c12408 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} - -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Lease != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x30 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x2a - } - if m.Version != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x20 - } - if m.ModRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - i-- - dAtA[i] = 0x18 - } - if m.CreateRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - i-- - dAtA[i] = 0x10 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Kv != nil { - { - size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - offset -= sovKv(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *KeyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.CreateRevision != 0 { - n += 1 + sovKv(uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - n += 1 + sovKv(uint64(m.ModRevision)) - } - if m.Version != 0 { - n += 1 + sovKv(uint64(m.Version)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovKv(uint64(m.Lease)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovKv(uint64(m.Type)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovKv(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovKv(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovKv(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - m.CreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - m.ModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Event_EventType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthKv - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupKv - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthKv - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") -) diff --git a/api/v3rpc/rpctypes/error.go b/api/v3rpc/rpctypes/error.go deleted file mode 100644 index 50a859282b3..00000000000 --- a/api/v3rpc/rpctypes/error.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// server-side error -var ( - ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() - ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() - ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() - ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() - ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() - ErrGRPCInvalidClientAPIVersion = status.New(codes.InvalidArgument, "etcdserver: invalid client api version").Err() - ErrGRPCInvalidSortOption = status.New(codes.InvalidArgument, "etcdserver: invalid sort option").Err() - ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() - ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() - ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() - - ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() - ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() - ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() - - ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err() - - ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() - ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() - ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() - ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() - ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() - ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() - ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() - ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() - - ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() - ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() - - ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() - ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() - ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() - ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() - ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() - ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() - ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() - ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() - ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() - ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err() - ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() - ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() - ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() - ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() - ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() - ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() - ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err() - - ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() - ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() - ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() - ErrGRPCNotCapable = status.New(codes.FailedPrecondition, "etcdserver: not capable").Err() - ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() - ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() - ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() - ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() - ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err() - ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() - ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() - ErrGRPCNotSupportedForLearner = status.New(codes.FailedPrecondition, "etcdserver: rpc not supported for learner").Err() - ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() - - ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err() - ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err() - ErrGRPCClusterVersionUnavailable = status.New(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade").Err() - ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err() - ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err() - - ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: request canceled").Err() - ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: context deadline exceeded").Err() - - errStringToError = map[string]error{ - ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, - ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, - ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, - ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - - ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption, - ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, - - ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, - ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, - ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, - - ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, - ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, - ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, - ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, - ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, - ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, - ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, - ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, - - ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, - ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, - - ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, - ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, - ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, - ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, - ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, - ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, - ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, - ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, - ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, - ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, - ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, - ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, - ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, - ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, - ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, - ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision, - - ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, - ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, - ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, - ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, - ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, - ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, - ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, - ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, - ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, - ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, - ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner, - ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, - - ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable, - ErrorDesc(ErrGRPCWrongDowngradeVersionFormat): ErrGRPCWrongDowngradeVersionFormat, - ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion, - ErrorDesc(ErrGRPCDowngradeInProcess): ErrGRPCDowngradeInProcess, - ErrorDesc(ErrGRPCNoInflightDowngrade): ErrGRPCNoInflightDowngrade, - } -) - -// client-side error -var ( - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) - - ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) - ErrLeaseExist = Error(ErrGRPCLeaseExist) - ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) - - ErrMemberExist = Error(ErrGRPCMemberExist) - ErrPeerURLExist = Error(ErrGRPCPeerURLExist) - ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) - ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) - ErrMemberNotFound = Error(ErrGRPCMemberNotFound) - ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) - ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) - ErrTooManyLearners = Error(ErrGRPCTooManyLearners) - - ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) - ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) - - ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) - ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) - ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) - ErrUserEmpty = Error(ErrGRPCUserEmpty) - ErrUserNotFound = Error(ErrGRPCUserNotFound) - ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) - ErrRoleNotFound = Error(ErrGRPCRoleNotFound) - ErrRoleEmpty = Error(ErrGRPCRoleEmpty) - ErrAuthFailed = Error(ErrGRPCAuthFailed) - ErrPermissionDenied = Error(ErrGRPCPermissionDenied) - ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) - ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) - ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) - ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) - ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) - ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) - - ErrNoLeader = Error(ErrGRPCNoLeader) - ErrNotLeader = Error(ErrGRPCNotLeader) - ErrLeaderChanged = Error(ErrGRPCLeaderChanged) - ErrNotCapable = Error(ErrGRPCNotCapable) - ErrStopped = Error(ErrGRPCStopped) - ErrTimeout = Error(ErrGRPCTimeout) - ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) - ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) - ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex) - ErrUnhealthy = Error(ErrGRPCUnhealthy) - ErrCorrupt = Error(ErrGRPCCorrupt) - ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) - - ErrClusterVersionUnavailable = Error(ErrGRPCClusterVersionUnavailable) - ErrWrongDowngradeVersionFormat = Error(ErrGRPCWrongDowngradeVersionFormat) - ErrInvalidDowngradeTargetVersion = Error(ErrGRPCInvalidDowngradeTargetVersion) - ErrDowngradeInProcess = Error(ErrGRPCDowngradeInProcess) - ErrNoInflightDowngrade = Error(ErrGRPCNoInflightDowngrade) -) - -// EtcdError defines gRPC server errors. -// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) -type EtcdError struct { - code codes.Code - desc string -} - -// Code returns grpc/codes.Code. -// TODO: define clientv3/codes.Code. -func (e EtcdError) Code() codes.Code { - return e.code -} - -func (e EtcdError) Error() string { - return e.desc -} - -func Error(err error) error { - if err == nil { - return nil - } - verr, ok := errStringToError[ErrorDesc(err)] - if !ok { // not gRPC error - return err - } - ev, ok := status.FromError(verr) - var desc string - if ok { - desc = ev.Message() - } else { - desc = verr.Error() - } - return EtcdError{code: ev.Code(), desc: desc} -} - -func ErrorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/api/v3rpc/rpctypes/error_test.go b/api/v3rpc/rpctypes/error_test.go deleted file mode 100644 index 525d9698311..00000000000 --- a/api/v3rpc/rpctypes/error_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -import ( - "testing" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestConvert(t *testing.T) { - e1 := status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - e2 := ErrGRPCEmptyKey - e3 := ErrEmptyKey - - if e1.Error() != e2.Error() { - t.Fatalf("expected %q == %q", e1.Error(), e2.Error()) - } - if ev1, ok := status.FromError(e1); ok && ev1.Code() != e3.(EtcdError).Code() { - t.Fatalf("expected them to be equal, got %v / %v", ev1.Code(), e3.(EtcdError).Code()) - } - - if e1.Error() == e3.Error() { - t.Fatalf("expected %q != %q", e1.Error(), e3.Error()) - } - if ev2, ok := status.FromError(e2); ok && ev2.Code() != e3.(EtcdError).Code() { - t.Fatalf("expected them to be equal, got %v / %v", ev2.Code(), e3.(EtcdError).Code()) - } -} diff --git a/api/version/version.go b/api/version/version.go deleted file mode 100644 index bd39791a929..00000000000 --- a/api/version/version.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version implements etcd version parsing and contains latest version -// information. -package version - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" -) - -var ( - // MinClusterVersion is the min cluster version this etcd binary is compatible with. - MinClusterVersion = "3.0.0" - Version = "3.6.0-alpha.0" - APIVersion = "unknown" - - // Git SHA Value will be set during build - GitSHA = "Not provided (use ./build instead of go build)" -) - -// Get all constant versions defined in a centralized place. -var ( - V3_0 = semver.Version{Major: 3, Minor: 0} - V3_1 = semver.Version{Major: 3, Minor: 1} - V3_2 = semver.Version{Major: 3, Minor: 2} - V3_3 = semver.Version{Major: 3, Minor: 3} - V3_4 = semver.Version{Major: 3, Minor: 4} - V3_5 = semver.Version{Major: 3, Minor: 5} - V3_6 = semver.Version{Major: 3, Minor: 6} - V3_7 = semver.Version{Major: 3, Minor: 7} - V4_0 = semver.Version{Major: 4, Minor: 0} -) - -func init() { - ver, err := semver.NewVersion(Version) - if err == nil { - APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) - } -} - -type Versions struct { - Server string `json:"etcdserver"` - Cluster string `json:"etcdcluster"` - Storage string `json:"storage"` - // TODO: raft state machine version -} - -// Cluster only keeps the major.minor. -func Cluster(v string) string { - vs := strings.Split(v, ".") - if len(vs) <= 2 { - return v - } - return fmt.Sprintf("%s.%s", vs[0], vs[1]) -} - -func Compare(ver1, ver2 semver.Version) int { - return ver1.Compare(ver2) -} - -func LessThan(ver1, ver2 semver.Version) bool { - return ver1.LessThan(ver2) -} - -func Equal(ver1, ver2 semver.Version) bool { - return ver1.Equal(ver2) -} diff --git a/api/version/version_test.go b/api/version/version_test.go deleted file mode 100644 index 532e7525a21..00000000000 --- a/api/version/version_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" -) - -func TestVersionCompare(t *testing.T) { - cases := []struct { - name string - ver1 semver.Version - ver2 semver.Version - expectedCompareResult int - expectedLessThanResult bool - expectedEqualResult bool - }{ - { - name: "ver1 should be great than ver2", - ver1: V3_5, - ver2: V3_4, - expectedCompareResult: 1, - expectedLessThanResult: false, - expectedEqualResult: false, - }, - { - name: "ver1(4.0) should be great than ver2", - ver1: V4_0, - ver2: V3_7, - expectedCompareResult: 1, - expectedLessThanResult: false, - expectedEqualResult: false, - }, - { - name: "ver1 should be less than ver2", - ver1: V3_5, - ver2: V3_6, - expectedCompareResult: -1, - expectedLessThanResult: true, - expectedEqualResult: false, - }, - { - name: "ver1 should be less than ver2 (4.0)", - ver1: V3_5, - ver2: V4_0, - expectedCompareResult: -1, - expectedLessThanResult: true, - expectedEqualResult: false, - }, - { - name: "ver1 should be equal to ver2", - ver1: V3_5, - ver2: V3_5, - expectedCompareResult: 0, - expectedLessThanResult: false, - expectedEqualResult: true, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - compareResult := Compare(tc.ver1, tc.ver2) - lessThanResult := LessThan(tc.ver1, tc.ver2) - equalResult := Equal(tc.ver1, tc.ver2) - - assert.Equal(t, tc.expectedCompareResult, compareResult) - assert.Equal(t, tc.expectedLessThanResult, lessThanResult) - assert.Equal(t, tc.expectedEqualResult, equalResult) - }) - } -} diff --git a/api/versionpb/version.pb.go b/api/versionpb/version.pb.go deleted file mode 100644 index 8e5ce7ec2a6..00000000000 --- a/api/versionpb/version.pb.go +++ /dev/null @@ -1,90 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: version.proto - -package versionpb - -import ( - fmt "fmt" - math "math" - - _ "github.com/gogo/protobuf/gogoproto" - protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -var E_EtcdVersionMsg = &proto.ExtensionDesc{ - ExtendedType: (*protobuf.MessageOptions)(nil), - ExtensionType: (*string)(nil), - Field: 50000, - Name: "versionpb.etcd_version_msg", - Tag: "bytes,50000,opt,name=etcd_version_msg", - Filename: "version.proto", -} - -var E_EtcdVersionField = &proto.ExtensionDesc{ - ExtendedType: (*protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 50001, - Name: "versionpb.etcd_version_field", - Tag: "bytes,50001,opt,name=etcd_version_field", - Filename: "version.proto", -} - -var E_EtcdVersionEnum = &proto.ExtensionDesc{ - ExtendedType: (*protobuf.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 50002, - Name: "versionpb.etcd_version_enum", - Tag: "bytes,50002,opt,name=etcd_version_enum", - Filename: "version.proto", -} - -var E_EtcdVersionEnumValue = &proto.ExtensionDesc{ - ExtendedType: (*protobuf.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 50003, - Name: "versionpb.etcd_version_enum_value", - Tag: "bytes,50003,opt,name=etcd_version_enum_value", - Filename: "version.proto", -} - -func init() { - proto.RegisterExtension(E_EtcdVersionMsg) - proto.RegisterExtension(E_EtcdVersionField) - proto.RegisterExtension(E_EtcdVersionEnum) - proto.RegisterExtension(E_EtcdVersionEnumValue) -} - -func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) } - -var fileDescriptor_7d2c07d79758f814 = []byte{ - // 261 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4b, 0x2d, 0x2a, - 0xce, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xa2, 0xfa, 0x20, 0x16, 0x44, 0x81, 0x94, 0x42, 0x7a, 0x7e, - 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x92, 0x5a, 0x9c, 0x5c, 0x94, - 0x59, 0x50, 0x92, 0x5f, 0x04, 0x51, 0x61, 0xe5, 0xc7, 0x25, 0x90, 0x5a, 0x92, 0x9c, 0x12, 0x0f, - 0x35, 0x29, 0x3e, 0xb7, 0x38, 0x5d, 0x48, 0x5e, 0x0f, 0xa2, 0x4d, 0x0f, 0xa6, 0x4d, 0xcf, 0x37, - 0xb5, 0xb8, 0x38, 0x31, 0x3d, 0xd5, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0xe2, 0x42, 0x1b, - 0xb3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x1f, 0x48, 0x6b, 0x18, 0x44, 0xa7, 0x6f, 0x71, 0x7a, 0x07, - 0x23, 0xa3, 0x55, 0x00, 0x97, 0x10, 0x8a, 0x79, 0x69, 0x99, 0xa9, 0x39, 0x29, 0x42, 0xb2, 0x18, - 0x26, 0xba, 0x81, 0xc4, 0x61, 0xe6, 0x5d, 0x84, 0x9a, 0x27, 0x80, 0x64, 0x1e, 0x58, 0x01, 0xc8, - 0x44, 0x5f, 0x2e, 0x41, 0x14, 0x13, 0x53, 0xf3, 0x4a, 0x73, 0x85, 0x64, 0x30, 0x0c, 0x74, 0xcd, - 0x2b, 0xcd, 0x85, 0x99, 0x77, 0x09, 0x6a, 0x1e, 0x3f, 0x92, 0x79, 0x20, 0x79, 0x90, 0x71, 0xb1, - 0x5c, 0xe2, 0x18, 0xc6, 0xc5, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x0a, 0x29, 0x62, 0x35, 0x34, 0x0c, - 0x24, 0x07, 0x33, 0xf9, 0x32, 0xd4, 0x64, 0x11, 0x34, 0x93, 0xc1, 0x8a, 0x3a, 0x18, 0x19, 0x9d, - 0x04, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, - 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0xa6, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x77, 0x44, 0xe2, - 0xa4, 0xbc, 0x01, 0x00, 0x00, -} diff --git a/api/versionpb/version.proto b/api/versionpb/version.proto deleted file mode 100644 index 27cfb5d40c4..00000000000 --- a/api/versionpb/version.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package versionpb; - -import "gogoproto/gogo.proto"; -import "google/protobuf/descriptor.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message. -extend google.protobuf.MessageOptions { - optional string etcd_version_msg = 50000; -} - -// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field. -extend google.protobuf.FieldOptions { - optional string etcd_version_field = 50001; -} - -// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum. -extend google.protobuf.EnumOptions { - optional string etcd_version_enum = 50002; -} - -// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value. -extend google.protobuf.EnumValueOptions { - optional string etcd_version_enum_value = 50003; -} diff --git a/bill-of-materials.json b/bill-of-materials.json deleted file mode 100644 index 9638e8e54aa..00000000000 --- a/bill-of-materials.json +++ /dev/null @@ -1,762 +0,0 @@ -[ - { - "project": "github.com/VividCortex/ewma", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/anishathalye/porcupine", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/benbjohnson/clock", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/beorn7/perks/quantile", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "github.com/bgentry/speakeasy", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9441624365482234 - } - ] - }, - { - "project": "github.com/cenkalti/backoff/v4", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/cespare/xxhash/v2", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/cheggaaa/pb/v3", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9916666666666667 - } - ] - }, - { - "project": "github.com/coreos/go-semver/semver", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/coreos/go-systemd/v22", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 0.9966703662597114 - } - ] - }, - { - "project": "github.com/creack/pty", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "github.com/davecgh/go-spew/spew", - "licenses": [ - { - "type": "ISC License", - "confidence": 0.9850746268656716 - } - ] - }, - { - "project": "github.com/dustin/go-humanize", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.96875 - } - ] - }, - { - "project": "github.com/fatih/color", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/go-logr/logr", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/go-logr/stdr", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/gogo/protobuf", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9163346613545816 - } - ] - }, - { - "project": "github.com/golang-jwt/jwt/v4", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "github.com/golang/groupcache/lru", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 0.9966703662597114 - } - ] - }, - { - "project": "github.com/golang/protobuf", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "github.com/google/btree", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/google/go-cmp/cmp", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "github.com/gorilla/websocket", - "licenses": [ - { - "type": "BSD 2-clause \"Simplified\" License", - "confidence": 0.9852216748768473 - } - ] - }, - { - "project": "github.com/grpc-ecosystem/go-grpc-middleware", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/grpc-ecosystem/go-grpc-prometheus", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/grpc-ecosystem/grpc-gateway", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.979253112033195 - } - ] - }, - { - "project": "github.com/grpc-ecosystem/grpc-gateway/v2", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.979253112033195 - } - ] - }, - { - "project": "github.com/inconshreveable/mousetrap", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/jonboulle/clockwork", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/mattn/go-colorable", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/mattn/go-isatty", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9587628865979382 - } - ] - }, - { - "project": "github.com/mattn/go-runewidth", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/olekukonko/tablewriter", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "github.com/pmezard/go-difflib/difflib", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9830508474576272 - } - ] - }, - { - "project": "github.com/prometheus/client_golang/prometheus", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/prometheus/client_model/go", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/prometheus/common", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/prometheus/procfs", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/rivo/uniseg", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/sirupsen/logrus", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/soheilhy/cmux", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/spf13/cobra", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 0.9573241061130334 - } - ] - }, - { - "project": "github.com/spf13/pflag", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "github.com/stretchr/testify/assert", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "github.com/tmc/grpc-websocket-proxy/wsproxy", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "github.com/xiang90/probing", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/bbolt", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/api/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/client/pkg/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/client/v2", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/client/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/etcdctl/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/etcdutl/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/pkg/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/server/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/tests/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/etcd/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.etcd.io/raft/v3", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/exporters/otlp/internal/retry", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/metric", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/sdk", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/trace", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/proto/otlp", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.uber.org/atomic", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "go.uber.org/multierr", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "go.uber.org/zap", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.9891304347826086 - } - ] - }, - { - "project": "golang.org/x/crypto", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "golang.org/x/net", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "golang.org/x/sys/unix", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "golang.org/x/text", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "golang.org/x/time/rate", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "google.golang.org/genproto", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "google.golang.org/grpc", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "google.golang.org/protobuf", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 - } - ] - }, - { - "project": "gopkg.in/natefinch/lumberjack.v2", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, - { - "project": "gopkg.in/yaml.v2", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - }, - { - "type": "MIT License", - "confidence": 0.8975609756097561 - } - ] - }, - { - "project": "gopkg.in/yaml.v3", - "licenses": [ - { - "type": "MIT License", - "confidence": 0.7469879518072289 - } - ] - }, - { - "project": "sigs.k8s.io/json", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 0.9617021276595744 - } - ] - }, - { - "project": "sigs.k8s.io/yaml", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 1 - } - ] - } -] diff --git a/bill-of-materials.override.json b/bill-of-materials.override.json deleted file mode 100644 index 15afc56409d..00000000000 --- a/bill-of-materials.override.json +++ /dev/null @@ -1,18 +0,0 @@ -[ - { - "project": "sigs.k8s.io/yaml", - "licenses": [ - { - "type": "BSD 3-clause \"New\" or \"Revised\" License" - } - ] - }, - { - "project": "github.com/inconshreveable/mousetrap", - "licenses": [ - { - "type": "Apache License 2.0" - } - ] - } -] diff --git a/cert/ca.crt b/cert/ca.crt new file mode 100644 index 00000000000..d18562e5467 --- /dev/null +++ b/cert/ca.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC4zCCAcugAwIBAgIBADANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwdldGNk +LWNhMCAXDTIyMDMxMTAyMzY0M1oYDzIxMjIwMjE1MDIzNjQzWjASMRAwDgYDVQQD +EwdldGNkLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtUMEfdwu +s4r9zw8LZDQWiAWzW/v8TAQ2GsO7s8MRtmcUbNL4aC26cwwlJhCkvY99UxyAjoUR +izWW0g3mP8AiLZgZP+SzsrYClrQI86OnmiNK8sHHU4mfasIYW1WXU3YRslyBomdg +a9Ytt+d3MoJNLi0Xg5pd3d4kyEWjhwCIX3QE5xGkME6MiEu6hrz7i25YaK2NsK9Y +oTwGm3TXhWc9Y7WJK0Y7+W6f5oodZPXCYzQnEYTIxZt8TtqWUgG7ybn8v0gBKPvm +yMHiSFkmsgfu8Gm3E1e4/dARAxXkoOoIKaDX1uLn6VXQ73zyeHSWkuyntzeHwF8K +CnPq75gf3NibUQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQU5/jC6+FWQA7i+pdzUI0ES5nQ2uUwDQYJKoZIhvcNAQEL +BQADggEBAAdN0ptUClmsRx2/MQsMRrQngJBzKWA0HIc72JiBtKblUffbYIso3u6E +pJERmIsnhKhmzLB9WTZ3Nc18k/+AoFzDYF/7nSMFwEII6ei27WUH4zEeg8zwTv6h +aSHUrVHZW5hoovT2JqI6wxsuLuUVHZqbRdA+55A5xGmpl8ASdvSklTL4iV+eS/Ly +nAZeIYI3WBLJF56SigyIncw+dbbQtqLk+F4sAGsW9PwoeAHgT4c6WaL1ODLKtZsW +YAFs4FENADaLobHXT3PPgCBOU30mBU2JiwkYiu50GXd23ukJpvyRNCJe57q377qw +nZrEOoRxco8iql+SfevQrxSEXbywzQU= +-----END CERTIFICATE----- diff --git a/cert/server.crt b/cert/server.crt new file mode 100644 index 00000000000..f9e5efae4bb --- /dev/null +++ b/cert/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIILzYehk9swfcwDQYJKoZIhvcNAQELBQAwEjEQMA4GA1UE +AxMHZXRjZC1jYTAgFw0yMjAzMTEwMjM2NDNaGA8yMTIyMDIxNTAyMzY0NFowFzEV +MBMGA1UEAxMMazhzLW1hc3RlcjAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAtVP1gFmU8Ojcn+dL+1YVxPMshM4MS14LcJCnRx2BJye/1CFJrN6gjllW +ljebFxrhF4HebOlknHgVI9LDZngtLkNGE4QiVW2mRnQPahEZhZE1t6kAQt6KeOxb +exG7tj2/dUY+w8Skk6BfT4jD/RXu64deX09MkwjMyVCalLOZ5ESAy3W9Iw13qVhT +aIbDZ35SR7Rah44wu8ZAtL18qvGSKhAtelX21R4ywij2p9AsI/TK4Js3HrDTsu96 +UjUpSuBmp2veYu/ju6H7YF1qmsXPsJu6u+dMF5e2T8sQj2vT9FoOb7Ifde+nkRBo +wQ7mZWOaoJsn/89+RMmOvChRigSiFQIDAQABo4GVMIGSMA4GA1UdDwEB/wQEAwIF +oDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHwYDVR0jBBgwFoAU5/jC +6+FWQA7i+pdzUI0ES5nQ2uUwQAYDVR0RBDkwN4IJbG9jYWxob3N0ggxrOHMtbWFz +dGVyMDGHBH8AAAGHBAoKCnmHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL +BQADggEBAIZzw5TgvYWx5wMacdsT00wxa4bWu6OBpc+wEmNRAxl0za9krcE3Xnxl +pxSnoLLW3F2KF5shLnsZ3cQh1GlzVeuo3pWIgKcXEZ+98SJC3lh04AF9JTJqDJTC +tM8LG8aF3PD+Y/r/5xihGKLnbfkdbI2rBdirntQJ0+P1ZoUbdpSbq8g+5IYQ1Va1 +sZKy04pKDv4wQ7EtXknF8Cz3EwkYv6imO+mfW6bf/VDV6LuVp/IQhapSwx4tgYao +QXUvnc9qlw4enDLMdqx+fldUdSAUNa+ye3+yvqD2hbgjeXfkm1o1FpT42dj9TxI3 +7TyJ89HsaL+z2QagHcmWD4O5LbX4+ik= +-----END CERTIFICATE----- diff --git a/cert/server.key b/cert/server.key new file mode 100644 index 00000000000..b5519b68ba5 --- /dev/null +++ b/cert/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAtVP1gFmU8Ojcn+dL+1YVxPMshM4MS14LcJCnRx2BJye/1CFJ +rN6gjllWljebFxrhF4HebOlknHgVI9LDZngtLkNGE4QiVW2mRnQPahEZhZE1t6kA +Qt6KeOxbexG7tj2/dUY+w8Skk6BfT4jD/RXu64deX09MkwjMyVCalLOZ5ESAy3W9 +Iw13qVhTaIbDZ35SR7Rah44wu8ZAtL18qvGSKhAtelX21R4ywij2p9AsI/TK4Js3 +HrDTsu96UjUpSuBmp2veYu/ju6H7YF1qmsXPsJu6u+dMF5e2T8sQj2vT9FoOb7If +de+nkRBowQ7mZWOaoJsn/89+RMmOvChRigSiFQIDAQABAoIBAAawrFQp+fbRgKJd +tE33pSH5HuFfgNCfDaj/jUxwaD17l/ZJrCA2rpHR9gHg11YI5dkqL3yxN8cWHyGN +OyxirrgQP1uk+mdQflwHGDJ/owNskiDOmXXeJBnGDrIBu31D1faLuvEaJkBUIHAc +ya5iysfh3LeDg33BS1z6HlnyLnFfja9x+qUcrJUOFg5c/jbd6t3Khc/vqninpYJo +sCiwyXqtUbJqX+sIxvveFzyweUr0ywrfdiuihptW72it6bW4Q5uZcr9Gn5bMU9PK +cmldrhbAi8ixrTuQTrZjijojrRY6OEwI7jrpRcXZ7t3s2G0suw0PoVI46ZN3vz7R +vRaT5IECgYEAw2KfSdP0kM/dfxY3zvOBIwwCzrXXn+ua7iaCTXzTRv/G0V4qqW+7 +m2DB9IM33pqj1AWudS0vEVN7TONxagK/s3o7172KrWk7AKss/GeyFK25Kx5fYWIF +pRsftXoFHjvdiOmO6GGBaMqLb0P2lngCWoaBHlmr9uzAQY0WPlLek8UCgYEA7ZTq +joIzMMb8uSuo4q5qr16yOTChWRcXZqpfVSX4y/aJRGwzhmsHU8P2PjItX5hXURgQ +2wZlXB053kfgFHE1hapt0aIS9qXWtu/QRtWwDmSk4ulkkCmn2QT//RpIFrXOn4qq +fgf9hjnzpCuaf5FcJQVHT4pgG/LkzknbqpRsqhECgYEAjV04tJjnVTMgJgg0PsbN +w0a4bUkCBpHX2cEA/AF5d+AtwGPqaAcQbP4XtsqNzMCEEi4+KEeVy2pkRqA0+aed +fcTNsW1Q/eCqMPSoqsJ4BSAgXkMubW4XeXrjeVEcjOBxi9K4dAfAMsqBEfLRYdLY +mRjCKOxmUFTBUWw8EMGyiqUCgYEAq5QaeWTqV1W9+nTfeSYBgjlfeRH31IFqswhj +5PiRX6vionmKFI+DMSma0nwmbJ12oehBdAyAcy/gNPmviNPhlXDp8rWcAGjwUhmL +TzzP8vUYZ4+qwrpyr7Z+sWmjmlMer/XS/0YCAEgl/vBGmc5+v3W6dGU417ZpK9oH +PIAIoiECgYAkhCGjGwRs3jLMxFCDoDyoXWR4kkTWuXhDszaoJsN0SJ0LJ4xOzsv4 +Ko7riFgzoHZ3nKOPCLQY/Rpv5J0wm1jErerKGtIgWGoG8iE/iVs6N9yv1iNHewDy +TSfmF7oeGXDSd4XdmSHOmM1jd+xfsvrYQC9LugEYc6M1mVB+MMyBrQ== +-----END RSA PRIVATE KEY----- diff --git a/client/pkg/LICENSE b/client/pkg/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/client/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client/pkg/fileutil/filereader.go b/client/pkg/fileutil/filereader.go deleted file mode 100644 index 55248888c60..00000000000 --- a/client/pkg/fileutil/filereader.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "bufio" - "io" - "io/fs" - "os" -) - -// FileReader is a wrapper of io.Reader. It also provides file info. -type FileReader interface { - io.Reader - FileInfo() (fs.FileInfo, error) -} - -type fileReader struct { - *os.File -} - -func NewFileReader(f *os.File) FileReader { - return &fileReader{f} -} - -func (fr *fileReader) FileInfo() (fs.FileInfo, error) { - return fr.Stat() -} - -// FileBufReader is a wrapper of bufio.Reader. It also provides file info. -type FileBufReader struct { - *bufio.Reader - fi fs.FileInfo -} - -func NewFileBufReader(fr FileReader) *FileBufReader { - bufReader := bufio.NewReader(fr) - fi, err := fr.FileInfo() - if err != nil { - // This should never happen. - panic(err) - } - return &FileBufReader{bufReader, fi} -} - -func (fbr *FileBufReader) FileInfo() fs.FileInfo { - return fbr.fi -} diff --git a/client/pkg/fileutil/filereader_test.go b/client/pkg/fileutil/filereader_test.go deleted file mode 100644 index 2f863cdcef5..00000000000 --- a/client/pkg/fileutil/filereader_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFileBufReader(t *testing.T) { - f, err := os.CreateTemp(t.TempDir(), "wal") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - fi, err := f.Stat() - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - fbr := NewFileBufReader(NewFileReader(f)) - - if !strings.HasPrefix(fbr.FileInfo().Name(), "wal") { - t.Errorf("Unexpected file name: %s", fbr.FileInfo().Name()) - } - assert.Equal(t, fi.Size(), fbr.FileInfo().Size()) - assert.Equal(t, fi.IsDir(), fbr.FileInfo().IsDir()) - assert.Equal(t, fi.Mode(), fbr.FileInfo().Mode()) - assert.Equal(t, fi.ModTime(), fbr.FileInfo().ModTime()) -} diff --git a/client/pkg/fileutil/fileutil.go b/client/pkg/fileutil/fileutil.go deleted file mode 100644 index 3bedee7d2b3..00000000000 --- a/client/pkg/fileutil/fileutil.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "fmt" - "io" - "io/fs" - "os" - "path/filepath" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/verify" -) - -const ( - // PrivateFileMode grants owner to read/write a file. - PrivateFileMode = 0600 -) - -// IsDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func IsDirWriteable(dir string) error { - f, err := filepath.Abs(filepath.Join(dir, ".touch")) - if err != nil { - return err - } - if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil { - return err - } - return os.Remove(f) -} - -// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory -// does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(lg *zap.Logger, dir string) error { - verify.Assert(lg != nil, "nil log isn't allowed") - // If path is already a directory, MkdirAll does nothing and returns nil, so, - // first check if dir exists with an expected permission mode. - if Exist(dir) { - err := CheckDirPermission(dir, PrivateDirMode) - if err != nil { - lg.Warn("check file permission", zap.Error(err)) - } - } else { - err := os.MkdirAll(dir, PrivateDirMode) - if err != nil { - // if mkdirAll("a/text") and "text" is not - // a directory, this will return syscall.ENOTDIR - return err - } - } - - return IsDirWriteable(dir) -} - -// CreateDirAll is similar to TouchDirAll but returns error -// if the deepest directory was not empty. -func CreateDirAll(lg *zap.Logger, dir string) error { - err := TouchDirAll(lg, dir) - if err == nil { - var ns []string - ns, err = ReadDir(dir) - if err != nil { - return err - } - if len(ns) != 0 { - err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) - } - } - return err -} - -// Exist returns true if a file or directory exists. -func Exist(name string) bool { - _, err := os.Stat(name) - return err == nil -} - -// DirEmpty returns true if a directory empty and can access. -func DirEmpty(name string) bool { - ns, err := ReadDir(name) - return len(ns) == 0 && err == nil -} - -// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily -// shorten the length of the file. -func ZeroToEnd(f *os.File) error { - // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - lenf, lerr := f.Seek(0, io.SeekEnd) - if lerr != nil { - return lerr - } - if err = f.Truncate(off); err != nil { - return err - } - // make sure blocks remain allocated - if err = Preallocate(f, lenf, true); err != nil { - return err - } - _, err = f.Seek(off, io.SeekStart) - return err -} - -// CheckDirPermission checks permission on an existing dir. -// Returns error if dir is empty or exist with a different permission than specified. -func CheckDirPermission(dir string, perm os.FileMode) error { - if !Exist(dir) { - return fmt.Errorf("directory %q empty, cannot check permission", dir) - } - //check the existing permission on the directory - dirInfo, err := os.Stat(dir) - if err != nil { - return err - } - dirMode := dirInfo.Mode().Perm() - if dirMode != perm { - err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) - return err - } - return nil -} - -// RemoveMatchFile deletes file if matchFunc is true on an existing dir -// Returns error if the dir does not exist or remove file fail -func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error { - if lg == nil { - lg = zap.NewNop() - } - if !Exist(dir) { - return fmt.Errorf("directory %s does not exist", dir) - } - fileNames, err := ReadDir(dir) - if err != nil { - return err - } - var removeFailedFiles []string - for _, fileName := range fileNames { - if matchFunc(fileName) { - file := filepath.Join(dir, fileName) - if err = os.Remove(file); err != nil { - removeFailedFiles = append(removeFailedFiles, fileName) - lg.Error("remove file failed", - zap.String("file", file), - zap.Error(err)) - continue - } - } - } - if len(removeFailedFiles) != 0 { - return fmt.Errorf("remove file(s) %v error", removeFailedFiles) - } - return nil -} - -// ListFiles lists files if matchFunc is true on an existing dir -// Returns error if the dir does not exist -func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) { - var files []string - err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { - if matchFunc(path) { - files = append(files, path) - } - return nil - }) - return files, err -} diff --git a/client/pkg/fileutil/fileutil_test.go b/client/pkg/fileutil/fileutil_test.go deleted file mode 100644 index f6b22e55de6..00000000000 --- a/client/pkg/fileutil/fileutil_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "fmt" - "io" - "math/rand" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" -) - -func TestIsDirWriteable(t *testing.T) { - tmpdir := t.TempDir() - if err := IsDirWriteable(tmpdir); err != nil { - t.Fatalf("unexpected IsDirWriteable error: %v", err) - } - if err := os.Chmod(tmpdir, 0444); err != nil { - t.Fatalf("unexpected os.Chmod error: %v", err) - } - me, err := user.Current() - if err != nil { - // err can be non-nil when cross compiled - // http://stackoverflow.com/questions/20609415/cross-compiling-user-current-not-implemented-on-linux-amd64 - t.Skipf("failed to get current user: %v", err) - } - if me.Name == "root" || runtime.GOOS == "windows" { - // ideally we should check CAP_DAC_OVERRIDE. - // but it does not matter for tests. - // Chmod is not supported under windows. - t.Skipf("running as a superuser or in windows") - } - if err := IsDirWriteable(tmpdir); err == nil { - t.Fatalf("expected IsDirWriteable to error") - } -} - -func TestCreateDirAll(t *testing.T) { - tmpdir := t.TempDir() - - tmpdir2 := filepath.Join(tmpdir, "testdir") - if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil { - t.Fatal(err) - } - - if err := os.WriteFile(filepath.Join(tmpdir2, "text.txt"), []byte("test text"), PrivateFileMode); err != nil { - t.Fatal(err) - } - - if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") { - t.Fatalf("unexpected error %v", err) - } -} - -func TestExist(t *testing.T) { - fdir := filepath.Join(os.TempDir(), fmt.Sprint(time.Now().UnixNano()+rand.Int63n(1000))) - os.RemoveAll(fdir) - if err := os.Mkdir(fdir, 0666); err != nil { - t.Skip(err) - } - defer os.RemoveAll(fdir) - if !Exist(fdir) { - t.Fatalf("expected Exist true, got %v", Exist(fdir)) - } - - f, err := os.CreateTemp(os.TempDir(), "fileutil") - if err != nil { - t.Fatal(err) - } - f.Close() - - if g := Exist(f.Name()); !g { - t.Errorf("exist = %v, want true", g) - } - - os.Remove(f.Name()) - if g := Exist(f.Name()); g { - t.Errorf("exist = %v, want false", g) - } -} - -func TestDirEmpty(t *testing.T) { - dir := t.TempDir() - - if !DirEmpty(dir) { - t.Fatalf("expected DirEmpty true, got %v", DirEmpty(dir)) - } - - file, err := os.CreateTemp(dir, "new_file") - if err != nil { - t.Fatal(err) - } - file.Close() - - if DirEmpty(dir) { - t.Fatalf("expected DirEmpty false, got %v", DirEmpty(dir)) - } - if DirEmpty(file.Name()) { - t.Fatalf("expected DirEmpty false, got %v", DirEmpty(file.Name())) - } -} - -func TestZeroToEnd(t *testing.T) { - f, err := os.CreateTemp(os.TempDir(), "fileutil") - if err != nil { - t.Fatal(err) - } - defer os.Remove(f.Name()) - defer f.Close() - - // Ensure 0 size is a nop so zero-to-end on an empty file won't give EINVAL. - if err = ZeroToEnd(f); err != nil { - t.Fatal(err) - } - - b := make([]byte, 1024) - for i := range b { - b[i] = 12 - } - if _, err = f.Write(b); err != nil { - t.Fatal(err) - } - if _, err = f.Seek(512, io.SeekStart); err != nil { - t.Fatal(err) - } - if err = ZeroToEnd(f); err != nil { - t.Fatal(err) - } - off, serr := f.Seek(0, io.SeekCurrent) - if serr != nil { - t.Fatal(serr) - } - if off != 512 { - t.Fatalf("expected offset 512, got %d", off) - } - - b = make([]byte, 512) - if _, err = f.Read(b); err != nil { - t.Fatal(err) - } - for i := range b { - if b[i] != 0 { - t.Errorf("expected b[%d] = 0, got %d", i, b[i]) - } - } -} - -func TestDirPermission(t *testing.T) { - tmpdir := t.TempDir() - - tmpdir2 := filepath.Join(tmpdir, "testpermission") - // create a new dir with 0700 - if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil { - t.Fatal(err) - } - // check dir permission with mode different than created dir - if err := CheckDirPermission(tmpdir2, 0600); err == nil { - t.Errorf("expected error, got nil") - } -} - -func TestRemoveMatchFile(t *testing.T) { - tmpdir := t.TempDir() - f, err := os.CreateTemp(tmpdir, "tmp") - if err != nil { - t.Fatal(err) - } - f.Close() - f, err = os.CreateTemp(tmpdir, "foo.tmp") - if err != nil { - t.Fatal(err) - } - f.Close() - - err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool { - return strings.HasPrefix(fileName, "tmp") - }) - if err != nil { - t.Errorf("expected nil, got error") - } - fnames, err := ReadDir(tmpdir) - if err != nil { - t.Fatal(err) - } - if len(fnames) != 1 { - t.Errorf("expected exist 1 files, got %d", len(fnames)) - } - - f, err = os.CreateTemp(tmpdir, "tmp") - if err != nil { - t.Fatal(err) - } - f.Close() - err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool { - os.Remove(filepath.Join(tmpdir, fileName)) - return strings.HasPrefix(fileName, "tmp") - }) - if err == nil { - t.Errorf("expected error, got nil") - } -} - -func TestTouchDirAll(t *testing.T) { - tmpdir := t.TempDir() - assert.Panics(t, func() { - TouchDirAll(nil, tmpdir) - }, "expected panic with nil log") - - if err := TouchDirAll(zaptest.NewLogger(t), tmpdir); err != nil { - t.Fatal(err) - } -} diff --git a/client/pkg/fileutil/lock.go b/client/pkg/fileutil/lock.go deleted file mode 100644 index 338627f43c8..00000000000 --- a/client/pkg/fileutil/lock.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "errors" - "os" -) - -var ( - ErrLocked = errors.New("fileutil: file already locked") -) - -type LockedFile struct{ *os.File } diff --git a/client/pkg/fileutil/lock_linux_test.go b/client/pkg/fileutil/lock_linux_test.go deleted file mode 100644 index 65dd96b91f5..00000000000 --- a/client/pkg/fileutil/lock_linux_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux - -package fileutil - -import "testing" - -// TestLockAndUnlockSyscallFlock tests the fallback flock using the flock syscall. -func TestLockAndUnlockSyscallFlock(t *testing.T) { - oldTryLock, oldLock := linuxTryLockFile, linuxLockFile - defer func() { - linuxTryLockFile, linuxLockFile = oldTryLock, oldLock - }() - linuxTryLockFile, linuxLockFile = flockTryLockFile, flockLockFile - TestLockAndUnlock(t) -} diff --git a/client/pkg/fileutil/lock_test.go b/client/pkg/fileutil/lock_test.go deleted file mode 100644 index b7f6fd5ce57..00000000000 --- a/client/pkg/fileutil/lock_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "testing" - "time" -) - -func TestLockAndUnlock(t *testing.T) { - f, err := os.CreateTemp("", "lock") - if err != nil { - t.Fatal(err) - } - f.Close() - defer func() { - err = os.Remove(f.Name()) - if err != nil { - t.Fatal(err) - } - }() - - // lock the file - l, err := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode) - if err != nil { - t.Fatal(err) - } - - // try lock a locked file - if _, err = TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode); err != ErrLocked { - t.Fatal(err) - } - - // unlock the file - if err = l.Close(); err != nil { - t.Fatal(err) - } - - // try lock the unlocked file - dupl, err := TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode) - if err != nil { - t.Errorf("err = %v, want %v", err, nil) - } - - // blocking on locked file - locked := make(chan struct{}, 1) - go func() { - bl, blerr := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode) - if blerr != nil { - t.Error(blerr) - } - locked <- struct{}{} - if blerr = bl.Close(); blerr != nil { - t.Error(blerr) - } - }() - - select { - case <-locked: - t.Error("unexpected unblocking") - case <-time.After(100 * time.Millisecond): - } - - // unlock - if err = dupl.Close(); err != nil { - t.Fatal(err) - } - - // the previously blocked routine should be unblocked - select { - case <-locked: - case <-time.After(1 * time.Second): - t.Error("unexpected blocking") - } -} diff --git a/client/pkg/fileutil/lock_windows.go b/client/pkg/fileutil/lock_windows.go deleted file mode 100644 index 51010bdf81c..00000000000 --- a/client/pkg/fileutil/lock_windows.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows - -package fileutil - -import ( - "errors" - "fmt" - "os" - "syscall" - - "golang.org/x/sys/windows" -) - -var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := open(path, flag, perm) - if err != nil { - return nil, err - } - if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, nil -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := open(path, flag, perm) - if err != nil { - return nil, err - } - if err := lockFile(windows.Handle(f.Fd()), 0); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, nil -} - -func open(path string, flag int, perm os.FileMode) (*os.File, error) { - if path == "" { - return nil, errors.New("cannot open empty filename") - } - var access uint32 - switch flag { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - case syscall.O_WRONLY | syscall.O_CREAT: - access = syscall.GENERIC_ALL - default: - panic(fmt.Errorf("flag %v is not supported", flag)) - } - fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), - access, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, - syscall.OPEN_ALWAYS, - syscall.FILE_ATTRIBUTE_NORMAL, - 0) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), nil -} - -func lockFile(fd windows.Handle, flags uint32) error { - if fd == windows.InvalidHandle { - return nil - } - err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{}) - if err == nil { - return nil - } else if err.Error() == errLocked.Error() { - return ErrLocked - } else if err != windows.ERROR_LOCK_VIOLATION { - return err - } - return nil -} diff --git a/client/pkg/fileutil/preallocate.go b/client/pkg/fileutil/preallocate.go deleted file mode 100644 index c747b7cf81f..00000000000 --- a/client/pkg/fileutil/preallocate.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "io" - "os" -) - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { - if sizeInBytes == 0 { - // fallocate will return EINVAL if length is 0; skip - return nil - } - if extendFile { - return preallocExtend(f, sizeInBytes) - } - return preallocFixed(f, sizeInBytes) -} - -func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - size, err := f.Seek(sizeInBytes, io.SeekEnd) - if err != nil { - return err - } - if _, err = f.Seek(curOff, io.SeekStart); err != nil { - return err - } - if sizeInBytes > size { - return nil - } - return f.Truncate(sizeInBytes) -} diff --git a/client/pkg/fileutil/preallocate_test.go b/client/pkg/fileutil/preallocate_test.go deleted file mode 100644 index 47a006704b2..00000000000 --- a/client/pkg/fileutil/preallocate_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "testing" -) - -func TestPreallocateExtend(t *testing.T) { - pf := func(f *os.File, sz int64) error { return Preallocate(f, sz, true) } - tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, pf) } - runPreallocTest(t, tf) -} - -func TestPreallocateExtendTrunc(t *testing.T) { - tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, preallocExtendTrunc) } - runPreallocTest(t, tf) -} - -func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) error) { - size := int64(64 * 1000) - if err := pf(f, size); err != nil { - t.Fatal(err) - } - - stat, err := f.Stat() - if err != nil { - t.Fatal(err) - } - if stat.Size() != size { - t.Errorf("size = %d, want %d", stat.Size(), size) - } -} - -func TestPreallocateFixed(t *testing.T) { runPreallocTest(t, testPreallocateFixed) } -func testPreallocateFixed(t *testing.T, f *os.File) { - size := int64(64 * 1000) - if err := Preallocate(f, size, false); err != nil { - t.Fatal(err) - } - - stat, err := f.Stat() - if err != nil { - t.Fatal(err) - } - if stat.Size() != 0 { - t.Errorf("size = %d, want %d", stat.Size(), 0) - } -} - -func runPreallocTest(t *testing.T, test func(*testing.T, *os.File)) { - p := t.TempDir() - - f, err := os.CreateTemp(p, "") - if err != nil { - t.Fatal(err) - } - test(t, f) -} diff --git a/client/pkg/fileutil/preallocate_unix.go b/client/pkg/fileutil/preallocate_unix.go deleted file mode 100644 index b02070b30b3..00000000000 --- a/client/pkg/fileutil/preallocate_unix.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux - -package fileutil - -import ( - "os" - "syscall" -) - -func preallocExtend(f *os.File, sizeInBytes int64) error { - // use mode = 0 to change size - err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) - if err != nil { - errno, ok := err.(syscall.Errno) - // not supported; fallback - // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { - return preallocExtendTrunc(f, sizeInBytes) - } - } - return err -} - -func preallocFixed(f *os.File, sizeInBytes int64) error { - // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE - err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) - if err != nil { - errno, ok := err.(syscall.Errno) - // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { - return nil - } - } - return err -} diff --git a/client/pkg/fileutil/purge_test.go b/client/pkg/fileutil/purge_test.go deleted file mode 100644 index a10a3283be1..00000000000 --- a/client/pkg/fileutil/purge_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" -) - -func TestPurgeFile(t *testing.T) { - dir := t.TempDir() - - // minimal file set - for i := 0; i < 3; i++ { - f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i))) - if ferr != nil { - t.Fatal(ferr) - } - f.Close() - } - - stop, purgec := make(chan struct{}), make(chan string, 10) - - // keep 3 most recent files - errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil) - select { - case f := <-purgec: - t.Errorf("unexpected purge on %q", f) - case <-time.After(10 * time.Millisecond): - } - - // rest of the files - for i := 4; i < 10; i++ { - go func(n int) { - f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", n))) - if ferr != nil { - t.Error(ferr) - } - f.Close() - }(i) - } - - // watch files purge away - for i := 4; i < 10; i++ { - select { - case <-purgec: - case <-time.After(time.Second): - t.Errorf("purge took too long") - } - } - - fnames, rerr := ReadDir(dir) - if rerr != nil { - t.Fatal(rerr) - } - wnames := []string{"7.test", "8.test", "9.test"} - if !reflect.DeepEqual(fnames, wnames) { - t.Errorf("filenames = %v, want %v", fnames, wnames) - } - - // no error should be reported from purge routine - select { - case f := <-purgec: - t.Errorf("unexpected purge on %q", f) - case err := <-errch: - t.Errorf("unexpected purge error %v", err) - case <-time.After(10 * time.Millisecond): - } - close(stop) -} - -func TestPurgeFileHoldingLockFile(t *testing.T) { - dir := t.TempDir() - - for i := 0; i < 10; i++ { - var f *os.File - f, err := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i))) - if err != nil { - t.Fatal(err) - } - f.Close() - } - - // create a purge barrier at 5 - p := filepath.Join(dir, fmt.Sprintf("%d.test", 5)) - l, err := LockFile(p, os.O_WRONLY, PrivateFileMode) - if err != nil { - t.Fatal(err) - } - - stop, purgec := make(chan struct{}), make(chan string, 10) - errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil) - - for i := 0; i < 5; i++ { - select { - case <-purgec: - case <-time.After(time.Second): - t.Fatalf("purge took too long") - } - } - - fnames, rerr := ReadDir(dir) - if rerr != nil { - t.Fatal(rerr) - } - - wnames := []string{"5.test", "6.test", "7.test", "8.test", "9.test"} - if !reflect.DeepEqual(fnames, wnames) { - t.Errorf("filenames = %v, want %v", fnames, wnames) - } - - select { - case s := <-purgec: - t.Errorf("unexpected purge %q", s) - case err = <-errch: - t.Errorf("unexpected purge error %v", err) - case <-time.After(10 * time.Millisecond): - } - - // remove the purge barrier - if err = l.Close(); err != nil { - t.Fatal(err) - } - - // wait for rest of purges (5, 6) - for i := 0; i < 2; i++ { - select { - case <-purgec: - case <-time.After(time.Second): - t.Fatalf("purge took too long") - } - } - - fnames, rerr = ReadDir(dir) - if rerr != nil { - t.Fatal(rerr) - } - wnames = []string{"7.test", "8.test", "9.test"} - if !reflect.DeepEqual(fnames, wnames) { - t.Errorf("filenames = %v, want %v", fnames, wnames) - } - - select { - case f := <-purgec: - t.Errorf("unexpected purge on %q", f) - case err := <-errch: - t.Errorf("unexpected purge error %v", err) - case <-time.After(10 * time.Millisecond): - } - - close(stop) -} diff --git a/client/pkg/fileutil/read_dir_test.go b/client/pkg/fileutil/read_dir_test.go deleted file mode 100644 index 79a37d886ca..00000000000 --- a/client/pkg/fileutil/read_dir_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "path/filepath" - "reflect" - "testing" -) - -func TestReadDir(t *testing.T) { - tmpdir := t.TempDir() - - files := []string{"def", "abc", "xyz", "ghi"} - for _, f := range files { - writeFunc(t, filepath.Join(tmpdir, f)) - } - fs, err := ReadDir(tmpdir) - if err != nil { - t.Fatalf("error calling ReadDir: %v", err) - } - wfs := []string{"abc", "def", "ghi", "xyz"} - if !reflect.DeepEqual(fs, wfs) { - t.Fatalf("ReadDir: got %v, want %v", fs, wfs) - } - - files = []string{"def.wal", "abc.wal", "xyz.wal", "ghi.wal"} - for _, f := range files { - writeFunc(t, filepath.Join(tmpdir, f)) - } - fs, err = ReadDir(tmpdir, WithExt(".wal")) - if err != nil { - t.Fatalf("error calling ReadDir: %v", err) - } - wfs = []string{"abc.wal", "def.wal", "ghi.wal", "xyz.wal"} - if !reflect.DeepEqual(fs, wfs) { - t.Fatalf("ReadDir: got %v, want %v", fs, wfs) - } -} - -func writeFunc(t *testing.T, path string) { - fh, err := os.Create(path) - if err != nil { - t.Fatalf("error creating file: %v", err) - } - if err = fh.Close(); err != nil { - t.Fatalf("error closing file: %v", err) - } -} diff --git a/client/pkg/go.mod b/client/pkg/go.mod deleted file mode 100644 index d961c419937..00000000000 --- a/client/pkg/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module go.etcd.io/etcd/client/pkg/v3 - -go 1.19 - -require ( - github.com/coreos/go-systemd/v22 v22.5.0 - github.com/stretchr/testify v1.8.1 - go.uber.org/zap v1.24.0 - golang.org/x/sys v0.0.0-20210603125802-9665404d3644 -) - -require ( - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/client/pkg/go.sum b/client/pkg/go.sum deleted file mode 100644 index bd0107f5d70..00000000000 --- a/client/pkg/go.sum +++ /dev/null @@ -1,33 +0,0 @@ -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/client/pkg/logutil/log_format.go b/client/pkg/logutil/log_format.go deleted file mode 100644 index 494ab33fb97..00000000000 --- a/client/pkg/logutil/log_format.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutil - -import "fmt" - -const ( - JsonLogFormat = "json" - ConsoleLogFormat = "console" -) - -var DefaultLogFormat = JsonLogFormat - -// ConvertToZapFormat converts and validated log format string. -func ConvertToZapFormat(format string) (string, error) { - switch format { - case ConsoleLogFormat: - return ConsoleLogFormat, nil - case JsonLogFormat: - return JsonLogFormat, nil - case "": - return DefaultLogFormat, nil - default: - return "", fmt.Errorf("unknown log format: %s, supported values json, console", format) - } -} diff --git a/client/pkg/logutil/log_format_test.go b/client/pkg/logutil/log_format_test.go deleted file mode 100644 index 3c17061db7e..00000000000 --- a/client/pkg/logutil/log_format_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logutil - -import ( - "testing" -) - -func TestLogFormat(t *testing.T) { - tests := []struct { - given string - want string - errExpected bool - }{ - {"json", JsonLogFormat, false}, - {"console", ConsoleLogFormat, false}, - {"", JsonLogFormat, false}, - {"konsole", "", true}, - } - - for i, tt := range tests { - got, err := ConvertToZapFormat(tt.given) - if got != tt.want { - t.Errorf("#%d: ConvertToZapFormat failure: want=%v, got=%v", i, tt.want, got) - } - - if err != nil { - if !tt.errExpected { - t.Errorf("#%d: ConvertToZapFormat unexpected error: %v", i, err) - } - } - } -} diff --git a/client/pkg/logutil/zap_journal_test.go b/client/pkg/logutil/zap_journal_test.go deleted file mode 100644 index be5efd5d3ec..00000000000 --- a/client/pkg/logutil/zap_journal_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows - -package logutil - -import ( - "bytes" - "testing" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func TestNewJournalWriter(t *testing.T) { - buf := bytes.NewBuffer(nil) - jw, err := NewJournalWriter(buf) - if err != nil { - t.Skip(err) - } - - syncer := zapcore.AddSync(jw) - - cr := zapcore.NewCore( - zapcore.NewJSONEncoder(DefaultZapLoggerConfig.EncoderConfig), - syncer, - zap.NewAtomicLevelAt(zap.InfoLevel), - ) - - lg := zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer)) - defer lg.Sync() - - lg.Info("TestNewJournalWriter") - if buf.String() == "" { - // check with "journalctl -f" - t.Log("sent logs successfully to journald") - } -} diff --git a/client/pkg/pathutil/path.go b/client/pkg/pathutil/path.go deleted file mode 100644 index f26254ba933..00000000000 --- a/client/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/client/pkg/pathutil/path_test.go b/client/pkg/pathutil/path_test.go deleted file mode 100644 index 209fdc93c13..00000000000 --- a/client/pkg/pathutil/path_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pathutil - -import "testing" - -func TestCanonicalURLPath(t *testing.T) { - tests := []struct { - p string - wp string - }{ - {"/a", "/a"}, - {"", "/"}, - {"a", "/a"}, - {"//a", "/a"}, - {"/a/.", "/a"}, - {"/a/..", "/"}, - {"/a/", "/a/"}, - {"/a//", "/a/"}, - } - for i, tt := range tests { - if g := CanonicalURLPath(tt.p); g != tt.wp { - t.Errorf("#%d: canonical path = %s, want %s", i, g, tt.wp) - } - } -} diff --git a/client/pkg/srv/srv_test.go b/client/pkg/srv/srv_test.go deleted file mode 100644 index a61938fec89..00000000000 --- a/client/pkg/srv/srv_test.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package srv - -import ( - "errors" - "fmt" - "net" - "reflect" - "strings" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func notFoundErr(service, proto, domain string) error { - name := fmt.Sprintf("_%s._%s.%s", service, proto, domain) - return &net.DNSError{Err: "no such host", Name: name, Server: "10.0.0.53:53", IsTimeout: false, IsTemporary: false, IsNotFound: true} -} - -func TestSRVGetCluster(t *testing.T) { - defer func() { - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr - }() - - hasErr := func(err error) bool { - return err != nil - } - - name := "dnsClusterTest" - dns := map[string]string{ - "1.example.com.:2480": "10.0.0.1:2480", - "2.example.com.:2480": "10.0.0.2:2480", - "3.example.com.:2480": "10.0.0.3:2480", - "4.example.com.:2380": "10.0.0.3:2380", - } - srvAll := []*net.SRV{ - {Target: "1.example.com.", Port: 2480}, - {Target: "2.example.com.", Port: 2480}, - {Target: "3.example.com.", Port: 2480}, - } - var srvNone []*net.SRV - - tests := []struct { - service string - scheme string - withSSL []*net.SRV - withoutSSL []*net.SRV - urls []string - expected string - werr bool - }{ - { - "etcd-server-ssl", - "https", - srvNone, - srvNone, - nil, - "", - true, - }, - { - "etcd-server-ssl", - "https", - srvAll, - srvNone, - nil, - "0=https://1.example.com:2480,1=https://2.example.com:2480,2=https://3.example.com:2480", - false, - }, - { - "etcd-server", - "http", - srvNone, - srvAll, - nil, - "0=http://1.example.com:2480,1=http://2.example.com:2480,2=http://3.example.com:2480", - false, - }, - { - "etcd-server-ssl", - "https", - srvAll, - srvNone, - []string{"https://10.0.0.1:2480"}, - "dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480", - false, - }, - // matching local member with resolved addr and return unresolved hostnames - { - "etcd-server-ssl", - "https", - srvAll, - srvNone, - []string{"https://10.0.0.1:2480"}, - "dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480", - false, - }, - // reject if apurls are TLS but SRV is only http - { - "etcd-server", - "http", - srvNone, - srvAll, - []string{"https://10.0.0.1:2480"}, - "0=http://2.example.com:2480,1=http://3.example.com:2480", - false, - }, - } - - resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) { - if strings.Contains(addr, "10.0.0.") { - // accept IP addresses when resolving apurls - return net.ResolveTCPAddr(network, addr) - } - if dns[addr] == "" { - return nil, errors.New("missing dns record") - } - return net.ResolveTCPAddr(network, dns[addr]) - } - - for i, tt := range tests { - lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { - if service == "etcd-server-ssl" { - if len(tt.withSSL) > 0 { - return "", tt.withSSL, nil - } - return "", nil, notFoundErr(service, proto, domain) - } - if service == "etcd-server" { - if len(tt.withoutSSL) > 0 { - return "", tt.withoutSSL, nil - } - return "", nil, notFoundErr(service, proto, domain) - } - return "", nil, errors.New("unknown service in mock") - } - - urls := testutil.MustNewURLs(t, tt.urls) - str, err := GetCluster(tt.scheme, tt.service, name, "example.com", urls) - - if hasErr(err) != tt.werr { - t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr) - } - if strings.Join(str, ",") != tt.expected { - t.Errorf("#%d: cluster = %s, want %s", i, str, tt.expected) - } - } -} - -func TestSRVDiscover(t *testing.T) { - defer func() { lookupSRV = net.LookupSRV }() - - hasErr := func(err error) bool { - return err != nil - } - - tests := []struct { - withSSL []*net.SRV - withoutSSL []*net.SRV - expected []string - werr bool - }{ - { - []*net.SRV{}, - []*net.SRV{}, - []string{}, - true, - }, - { - []*net.SRV{}, - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []string{"http://10.0.0.1:2480", "http://10.0.0.2:2480", "http://10.0.0.3:2480"}, - false, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"}, - false, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - false, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - false, - }, - { - []*net.SRV{ - {Target: "a.example.com", Port: 2480}, - {Target: "b.example.com", Port: 2480}, - {Target: "c.example.com.", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com.:2480"}, - false, - }, - } - - for i, tt := range tests { - lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { - if service == "etcd-client-ssl" { - if len(tt.withSSL) > 0 { - return "", tt.withSSL, nil - } - return "", nil, notFoundErr(service, proto, domain) - } - if service == "etcd-client" { - if len(tt.withoutSSL) > 0 { - return "", tt.withoutSSL, nil - } - return "", nil, notFoundErr(service, proto, domain) - } - return "", nil, errors.New("unknown service in mock") - } - - srvs, err := GetClient("etcd-client", "example.com", "") - - if hasErr(err) != tt.werr { - t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr) - } - if srvs == nil { - if len(tt.expected) > 0 { - t.Errorf("#%d: srvs = nil, want non-nil", i) - } - } else { - if !reflect.DeepEqual(srvs.Endpoints, tt.expected) { - t.Errorf("#%d: endpoints = %v, want = %v", i, srvs.Endpoints, tt.expected) - } - } - } -} - -func TestGetSRVService(t *testing.T) { - tests := []struct { - scheme string - serviceName string - - expected string - }{ - { - "https", - "", - "etcd-client-ssl", - }, - { - "http", - "", - "etcd-client", - }, - { - "https", - "foo", - "etcd-client-ssl-foo", - }, - { - "http", - "bar", - "etcd-client-bar", - }, - } - - for i, tt := range tests { - service := GetSRVService("etcd-client", tt.serviceName, tt.scheme) - if strings.Compare(service, tt.expected) != 0 { - t.Errorf("#%d: service = %s, want %s", i, service, tt.expected) - } - } -} diff --git a/client/pkg/testutil/assert.go b/client/pkg/testutil/assert.go deleted file mode 100644 index ef820748e64..00000000000 --- a/client/pkg/testutil/assert.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -func copyToInterface(msg ...string) []interface{} { - newMsg := make([]interface{}, len(msg)) - for i, v := range msg { - newMsg[i] = v - } - return newMsg -} - -func AssertNil(t *testing.T, v interface{}) { - t.Helper() - assert.Nil(t, v) -} - -func AssertNotNil(t *testing.T, v interface{}) { - t.Helper() - if v == nil { - t.Fatalf("expected non-nil, got %+v", v) - } -} - -func AssertTrue(t *testing.T, v bool, msg ...string) { - t.Helper() - newMsg := copyToInterface(msg...) - assert.Equal(t, true, v, newMsg) -} - -func AssertFalse(t *testing.T, v bool, msg ...string) { - t.Helper() - newMsg := copyToInterface(msg...) - assert.Equal(t, false, v, newMsg) -} - -func isNil(v interface{}) bool { - if v == nil { - return true - } - rv := reflect.ValueOf(v) - return rv.Kind() != reflect.Struct && rv.IsNil() -} diff --git a/client/pkg/testutil/before.go b/client/pkg/testutil/before.go deleted file mode 100644 index 1f8c1fa72a5..00000000000 --- a/client/pkg/testutil/before.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "log" - "os" - "testing" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/client/pkg/v3/verify" -) - -func BeforeTest(t testing.TB) { - RegisterLeakDetection(t) - - revertVerifyFunc := verify.EnableAllVerifications() - - path, err := os.Getwd() - assert.NoError(t, err) - tempDir := t.TempDir() - assert.NoError(t, os.Chdir(tempDir)) - t.Logf("Changing working directory to: %s", tempDir) - - t.Cleanup(func() { - revertVerifyFunc() - assert.NoError(t, os.Chdir(path)) - }) -} - -func BeforeIntegrationExamples(*testing.M) func() { - ExitInShortMode("Skipping: the tests require real cluster") - - tempDir, err := os.MkdirTemp(os.TempDir(), "etcd-integration") - if err != nil { - log.Printf("Failed to obtain tempDir: %v", tempDir) - os.Exit(1) - } - - err = os.Chdir(tempDir) - if err != nil { - log.Printf("Failed to change working dir to: %s: %v", tempDir, err) - os.Exit(1) - } - log.Printf("Running tests (examples) in dir(%v): ...", tempDir) - return func() { os.RemoveAll(tempDir) } -} diff --git a/client/pkg/testutil/leak_test.go b/client/pkg/testutil/leak_test.go deleted file mode 100644 index 71b1c7bf3e6..00000000000 --- a/client/pkg/testutil/leak_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "fmt" - "os" - "testing" -) - -// so tests pass if given a -run that doesn't include TestSample -var ranSample = false - -func TestMain(m *testing.M) { - m.Run() - isLeaked := CheckLeakedGoroutine() - if ranSample && !isLeaked { - fmt.Fprintln(os.Stderr, "expected leaky goroutines but none is detected") - os.Exit(1) - } - os.Exit(0) -} - -func TestSample(t *testing.T) { - SkipTestIfShortMode(t, "Counting leaked routines is disabled in --short tests") - defer afterTest(t) - ranSample = true - for range make([]struct{}, 100) { - go func() { - select {} - }() - } -} diff --git a/client/pkg/tlsutil/cipher_suites.go b/client/pkg/tlsutil/cipher_suites.go deleted file mode 100644 index e1f21755d4b..00000000000 --- a/client/pkg/tlsutil/cipher_suites.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "fmt" -) - -// GetCipherSuite returns the corresponding cipher suite, -// and boolean value if it is supported. -func GetCipherSuite(s string) (uint16, bool) { - for _, c := range tls.CipherSuites() { - if s == c.Name { - return c.ID, true - } - } - for _, c := range tls.InsecureCipherSuites() { - if s == c.Name { - return c.ID, true - } - } - switch s { - case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": - return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true - case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": - return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true - } - return 0, false -} - -// GetCipherSuites returns list of corresponding cipher suite IDs. -func GetCipherSuites(ss []string) ([]uint16, error) { - cs := make([]uint16, len(ss)) - for i, s := range ss { - var ok bool - cs[i], ok = GetCipherSuite(s) - if !ok { - return nil, fmt.Errorf("unexpected TLS cipher suite %q", s) - } - } - - return cs, nil -} diff --git a/client/pkg/tlsutil/cipher_suites_test.go b/client/pkg/tlsutil/cipher_suites_test.go deleted file mode 100644 index a17b46c2fee..00000000000 --- a/client/pkg/tlsutil/cipher_suites_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "testing" -) - -func TestGetCipherSuite_not_existing(t *testing.T) { - _, ok := GetCipherSuite("not_existing") - if ok { - t.Fatal("Expected not ok") - } -} - -func CipherSuiteExpectedToExist(tb testing.TB, cipher string, expectedId uint16) { - vid, ok := GetCipherSuite(cipher) - if !ok { - tb.Errorf("Expected %v cipher to exist", cipher) - } - if vid != expectedId { - tb.Errorf("For %v expected=%v found=%v", cipher, expectedId, vid) - } -} - -func TestGetCipherSuite_success(t *testing.T) { - CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA) - CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) - - // Explicit test for legacy names - CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256) - CipherSuiteExpectedToExist(t, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256) -} - -func TestGetCipherSuite_insecure(t *testing.T) { - CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA) -} diff --git a/client/pkg/tlsutil/versions.go b/client/pkg/tlsutil/versions.go deleted file mode 100644 index ffcecd8c670..00000000000 --- a/client/pkg/tlsutil/versions.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2023 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "fmt" -) - -type TLSVersion string - -// Constants for TLS versions. -const ( - TLSVersionDefault TLSVersion = "" - TLSVersion12 TLSVersion = "TLS1.2" - TLSVersion13 TLSVersion = "TLS1.3" -) - -// GetTLSVersion returns the corresponding tls.Version or error. -func GetTLSVersion(version string) (uint16, error) { - var v uint16 - - switch version { - case string(TLSVersionDefault): - v = 0 // 0 means let Go decide. - case string(TLSVersion12): - v = tls.VersionTLS12 - case string(TLSVersion13): - v = tls.VersionTLS13 - default: - return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version) - } - - return v, nil -} diff --git a/client/pkg/tlsutil/versions_test.go b/client/pkg/tlsutil/versions_test.go deleted file mode 100644 index 89c7c3f64b7..00000000000 --- a/client/pkg/tlsutil/versions_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2023 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetVersion(t *testing.T) { - tests := []struct { - name string - version string - want uint16 - expectError bool - }{ - { - name: "TLS1.2", - version: "TLS1.2", - want: tls.VersionTLS12, - }, - { - name: "TLS1.3", - version: "TLS1.3", - want: tls.VersionTLS13, - }, - { - name: "Empty version", - version: "", - want: 0, - }, - { - name: "Converting invalid version string to TLS version", - version: "not_existing", - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := GetTLSVersion(tt.version) - if err != nil { - assert.True(t, tt.expectError, "GetTLSVersion() returned error while expecting success: %v", err) - return - } - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/client/pkg/transport/keepalive_listener.go b/client/pkg/transport/keepalive_listener.go deleted file mode 100644 index 2006a56b7df..00000000000 --- a/client/pkg/transport/keepalive_listener.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "time" -) - -// NewKeepAliveListener returns a listener that listens on the given address. -// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. -// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. -// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html -// -// Note(ahrtr): -// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod` -// by default, so if you want to wrap multiple layers of net.Listener, -// the `keepaliveListener` should be the one which is closest to the -// original `net.Listener` implementation, namely `TCPListener`. -func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { - kal := &keepaliveListener{ - Listener: l, - } - - if scheme == "https" { - if tlscfg == nil { - return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") - } - return newTLSKeepaliveListener(kal, tlscfg), nil - } - - return kal, nil -} - -type keepaliveListener struct{ net.Listener } - -func (kln *keepaliveListener) Accept() (net.Conn, error) { - c, err := kln.Listener.Accept() - if err != nil { - return nil, err - } - - kac, err := createKeepaliveConn(c) - if err != nil { - return nil, fmt.Errorf("create keepalive connection failed, %w", err) - } - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - if err := kac.SetKeepAlive(true); err != nil { - return nil, fmt.Errorf("SetKeepAlive failed, %w", err) - } - if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil { - return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err) - } - return kac, nil -} - -func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) { - tcpc, ok := c.(*net.TCPConn) - if !ok { - return nil, ErrNotTCP - } - return &keepAliveConn{tcpc}, nil -} - -type keepAliveConn struct { - *net.TCPConn -} - -// SetKeepAlive sets keepalive -func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error { - return l.TCPConn.SetKeepAlive(doKeepAlive) -} - -// SetKeepAlivePeriod sets keepalive period -func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error { - return l.TCPConn.SetKeepAlivePeriod(d) -} - -// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. -type tlsKeepaliveListener struct { - net.Listener - config *tls.Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - - c = tls.Server(c, l.config) - return c, nil -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { - l := &tlsKeepaliveListener{} - l.Listener = inner - l.config = config - return l -} diff --git a/client/pkg/transport/keepalive_listener_test.go b/client/pkg/transport/keepalive_listener_test.go deleted file mode 100644 index efe312d94a8..00000000000 --- a/client/pkg/transport/keepalive_listener_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "net" - "net/http" - "testing" -) - -// TestNewKeepAliveListener tests NewKeepAliveListener returns a listener -// that accepts connections. -// TODO: verify the keepalive option is set correctly -func TestNewKeepAliveListener(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unexpected listen error: %v", err) - } - - ln, err = NewKeepAliveListener(ln, "http", nil) - if err != nil { - t.Fatalf("unexpected NewKeepAliveListener error: %v", err) - } - - go http.Get("http://" + ln.Addr().String()) - conn, err := ln.Accept() - if err != nil { - t.Fatalf("unexpected Accept error: %v", err) - } - if _, ok := conn.(*keepAliveConn); !ok { - t.Fatalf("Unexpected conn type: %T, wanted *keepAliveConn", conn) - } - conn.Close() - ln.Close() - - ln, err = net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unexpected Listen error: %v", err) - } - - // tls - tlsinfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create tmpfile: %v", err) - } - tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile} - tlsInfo.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil) - tlscfg, err := tlsInfo.ServerConfig() - if err != nil { - t.Fatalf("unexpected serverConfig error: %v", err) - } - tlsln, err := NewKeepAliveListener(ln, "https", tlscfg) - if err != nil { - t.Fatalf("unexpected NewKeepAliveListener error: %v", err) - } - - go http.Get("https://" + tlsln.Addr().String()) - conn, err = tlsln.Accept() - if err != nil { - t.Fatalf("unexpected Accept error: %v", err) - } - if _, ok := conn.(*tls.Conn); !ok { - t.Errorf("failed to accept *tls.Conn") - } - conn.Close() - tlsln.Close() -} - -func TestNewKeepAliveListenerTLSEmptyConfig(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unexpected listen error: %v", err) - } - - _, err = NewKeepAliveListener(ln, "https", nil) - if err == nil { - t.Errorf("err = nil, want not presented error") - } -} diff --git a/client/pkg/transport/listener.go b/client/pkg/transport/listener.go deleted file mode 100644 index 5e0e13e25a7..00000000000 --- a/client/pkg/transport/listener.go +++ /dev/null @@ -1,596 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "os" - "path/filepath" - "strings" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - "go.etcd.io/etcd/client/pkg/v3/verify" - - "go.uber.org/zap" -) - -// NewListener creates a new listner. -func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { - return newListener(addr, scheme, WithTLSInfo(tlsinfo)) -} - -// NewListenerWithOpts creates a new listener which accepts listener options. -func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { - return newListener(addr, scheme, opts...) -} - -func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { - if scheme == "unix" || scheme == "unixs" { - // unix sockets via unix://laddr - return NewUnixListener(addr) - } - - lnOpts := newListenOpts(opts...) - - switch { - case lnOpts.IsSocketOpts(): - // new ListenConfig with socket options. - config, err := newListenConfig(lnOpts.socketOpts) - if err != nil { - return nil, err - } - lnOpts.ListenConfig = config - // check for timeout - fallthrough - case lnOpts.IsTimeout(), lnOpts.IsSocketOpts(): - // timeout listener with socket options. - ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr) - if err != nil { - return nil, err - } - lnOpts.Listener = &rwTimeoutListener{ - Listener: ln, - readTimeout: lnOpts.readTimeout, - writeTimeout: lnOpts.writeTimeout, - } - case lnOpts.IsTimeout(): - ln, err := newKeepAliveListener(nil, addr) - if err != nil { - return nil, err - } - lnOpts.Listener = &rwTimeoutListener{ - Listener: ln, - readTimeout: lnOpts.readTimeout, - writeTimeout: lnOpts.writeTimeout, - } - default: - ln, err := newKeepAliveListener(nil, addr) - if err != nil { - return nil, err - } - lnOpts.Listener = ln - } - - // only skip if not passing TLSInfo - if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() { - return lnOpts.Listener, nil - } - return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener) -} - -func newKeepAliveListener(cfg *net.ListenConfig, addr string) (ln net.Listener, err error) { - if cfg != nil { - ln, err = cfg.Listen(context.TODO(), "tcp", addr) - } else { - ln, err = net.Listen("tcp", addr) - } - if err != nil { - return - } - - return NewKeepAliveListener(ln, "tcp", nil) -} - -func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { - if scheme != "https" && scheme != "unixs" { - return l, nil - } - if tlsinfo != nil && tlsinfo.SkipClientSANVerify { - return NewTLSListener(l, tlsinfo) - } - return newTLSListener(l, tlsinfo, checkSAN) -} - -func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { - lc := net.ListenConfig{} - if sopts != nil { - ctls := getControls(sopts) - if len(ctls) > 0 { - lc.Control = ctls.Control - } - } - return lc, nil -} - -type TLSInfo struct { - // CertFile is the _server_ cert, it will also be used as a _client_ certificate if ClientCertFile is empty - CertFile string - // KeyFile is the key for the CertFile - KeyFile string - // ClientCertFile is a _client_ cert for initiating connections when ClientCertAuth is defined. If ClientCertAuth - // is true but this value is empty, the CertFile will be used instead. - ClientCertFile string - // ClientKeyFile is the key for the ClientCertFile - ClientKeyFile string - - TrustedCAFile string - ClientCertAuth bool - CRLFile string - InsecureSkipVerify bool - SkipClientSANVerify bool - - // ServerName ensures the cert matches the given host in case of discovery / virtual hosting - ServerName string - - // HandshakeFailure is optionally called when a connection fails to handshake. The - // connection will be closed immediately afterwards. - HandshakeFailure func(*tls.Conn, error) - - // CipherSuites is a list of supported cipher suites. - // If empty, Go auto-populates it by default. - // Note that cipher suites are prioritized in the given order. - CipherSuites []uint16 - - // MinVersion is the minimum TLS version that is acceptable. - // If not set, the minimum version is TLS 1.2. - MinVersion uint16 - - // MaxVersion is the maximum TLS version that is acceptable. - // If not set, the default used by Go is selected (see tls.Config.MaxVersion). - MaxVersion uint16 - - selfCert bool - - // parseFunc exists to simplify testing. Typically, parseFunc - // should be left nil. In that case, tls.X509KeyPair will be used. - parseFunc func([]byte, []byte) (tls.Certificate, error) - - // AllowedCN is a CN which must be provided by a client. - AllowedCN string - - // AllowedHostname is an IP address or hostname that must match the TLS - // certificate provided by a client. - AllowedHostname string - - // Logger logs TLS errors. - // If nil, all logs are discarded. - Logger *zap.Logger - - // EmptyCN indicates that the cert must have empty CN. - // If true, ClientConfig() will return an error for a cert with non empty CN. - EmptyCN bool -} - -func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) -} - -func (info TLSInfo) Empty() bool { - return info.CertFile == "" && info.KeyFile == "" -} - -func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { - verify.Assert(lg != nil, "nil log isn't allowed") - info.Logger = lg - if selfSignedCertValidity == 0 { - err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0") - info.Logger.Warn( - "cannot generate cert", - zap.Error(err), - ) - return - } - err = fileutil.TouchDirAll(lg, dirpath) - if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot create cert directory", - zap.Error(err), - ) - } - return - } - - certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem")) - if err != nil { - return - } - keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem")) - if err != nil { - return - } - _, errcert := os.Stat(certPath) - _, errkey := os.Stat(keyPath) - if errcert == nil && errkey == nil { - info.CertFile = certPath - info.KeyFile = keyPath - info.ClientCertFile = certPath - info.ClientKeyFile = keyPath - info.selfCert = true - return - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate random number", - zap.Error(err), - ) - } - return - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"etcd"}}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), - BasicConstraintsValid: true, - } - - if info.Logger != nil { - info.Logger.Warn( - "automatically generate certificates", - zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter), - ) - } - - for _, host := range hosts { - h, _, _ := net.SplitHostPort(host) - if ip := net.ParseIP(h); ip != nil { - tmpl.IPAddresses = append(tmpl.IPAddresses, ip) - } else { - tmpl.DNSNames = append(tmpl.DNSNames, h) - } - } - - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate ECDSA key", - zap.Error(err), - ) - } - return - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) - if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot generate x509 certificate", - zap.Error(err), - ) - } - return - } - - certOut, err := os.Create(certPath) - if err != nil { - info.Logger.Warn( - "cannot cert file", - zap.String("path", certPath), - zap.Error(err), - ) - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - if info.Logger != nil { - info.Logger.Info("created cert file", zap.String("path", certPath)) - } - - b, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return - } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "cannot key file", - zap.String("path", keyPath), - zap.Error(err), - ) - } - return - } - pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - keyOut.Close() - if info.Logger != nil { - info.Logger.Info("created key file", zap.String("path", keyPath)) - } - return SelfCert(lg, dirpath, hosts, selfSignedCertValidity) -} - -// baseConfig is called on initial TLS handshake start. -// -// Previously, -// 1. Server has non-empty (*tls.Config).Certificates on client hello -// 2. Server calls (*tls.Config).GetCertificate iff: -// - Server's (*tls.Config).Certificates is not empty, or -// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName -// -// When (*tls.Config).Certificates is always populated on initial handshake, -// client is expected to provide a valid matching SNI to pass the TLS -// verification, thus trigger server (*tls.Config).GetCertificate to reload -// TLS assets. However, a cert whose SAN field does not include domain names -// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus -// it was never able to trigger TLS reload on initial handshake; first -// ceritifcate object was being used, never being updated. -// -// Now, (*tls.Config).Certificates is created empty on initial TLS client -// handshake, in order to trigger (*tls.Config).GetCertificate and populate -// rest of the certificates on every new TLS connection, even when client -// SNI is empty (e.g. cert only includes IPs). -func (info TLSInfo) baseConfig() (*tls.Config, error) { - if info.KeyFile == "" || info.CertFile == "" { - return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) - } - if info.Logger == nil { - info.Logger = zap.NewNop() - } - - _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if err != nil { - return nil, err - } - - // Perform prevalidation of client cert and key if either are provided. This makes sure we crash before accepting any connections. - if (info.ClientKeyFile == "") != (info.ClientCertFile == "") { - return nil, fmt.Errorf("ClientKeyFile and ClientCertFile must both be present or both absent: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile) - } - if info.ClientCertFile != "" { - _, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc) - if err != nil { - return nil, err - } - } - - var minVersion uint16 - if info.MinVersion != 0 { - minVersion = info.MinVersion - } else { - // Default minimum version is TLS 1.2, previous versions are insecure and deprecated. - minVersion = tls.VersionTLS12 - } - - cfg := &tls.Config{ - MinVersion: minVersion, - MaxVersion: info.MaxVersion, - ServerName: info.ServerName, - } - - if len(info.CipherSuites) > 0 { - cfg.CipherSuites = info.CipherSuites - } - - // Client certificates may be verified by either an exact match on the CN, - // or a more general check of the CN and SANs. - var verifyCertificate func(*x509.Certificate) bool - if info.AllowedCN != "" { - if info.AllowedHostname != "" { - return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) - } - verifyCertificate = func(cert *x509.Certificate) bool { - return info.AllowedCN == cert.Subject.CommonName - } - } - if info.AllowedHostname != "" { - verifyCertificate = func(cert *x509.Certificate) bool { - return cert.VerifyHostname(info.AllowedHostname) == nil - } - } - if verifyCertificate != nil { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - for _, chains := range verifiedChains { - if len(chains) != 0 { - if verifyCertificate(chains[0]) { - return nil - } - } - } - return errors.New("client certificate authentication failed") - } - } - - // this only reloads certs when there's a client request - // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { - cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find peer cert files", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } - } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create peer certificate", - zap.String("cert-file", info.CertFile), - zap.String("key-file", info.KeyFile), - zap.Error(err), - ) - } - } - return cert, err - } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { - certfile, keyfile := info.CertFile, info.KeyFile - if info.ClientCertFile != "" { - certfile, keyfile = info.ClientCertFile, info.ClientKeyFile - } - cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc) - if os.IsNotExist(err) { - if info.Logger != nil { - info.Logger.Warn( - "failed to find client cert files", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } - } else if err != nil { - if info.Logger != nil { - info.Logger.Warn( - "failed to create client certificate", - zap.String("cert-file", certfile), - zap.String("key-file", keyfile), - zap.Error(err), - ) - } - } - return cert, err - } - return cfg, nil -} - -// cafiles returns a list of CA file paths. -func (info TLSInfo) cafiles() []string { - cs := make([]string, 0) - if info.TrustedCAFile != "" { - cs = append(cs, info.TrustedCAFile) - } - return cs -} - -// ServerConfig generates a tls.Config object for use by an HTTP server. -func (info TLSInfo) ServerConfig() (*tls.Config, error) { - cfg, err := info.baseConfig() - if err != nil { - return nil, err - } - - if info.Logger == nil { - info.Logger = zap.NewNop() - } - - cfg.ClientAuth = tls.NoClientCert - if info.TrustedCAFile != "" || info.ClientCertAuth { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - cs := info.cafiles() - if len(cs) > 0 { - info.Logger.Info("Loading cert pool", zap.Strings("cs", cs), - zap.Any("tlsinfo", info)) - cp, err := tlsutil.NewCertPool(cs) - if err != nil { - return nil, err - } - cfg.ClientCAs = cp - } - - // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server - cfg.NextProtos = []string{"h2"} - - return cfg, nil -} - -// ClientConfig generates a tls.Config object for use by an HTTP client. -func (info TLSInfo) ClientConfig() (*tls.Config, error) { - var cfg *tls.Config - var err error - - if !info.Empty() { - cfg, err = info.baseConfig() - if err != nil { - return nil, err - } - } else { - cfg = &tls.Config{ServerName: info.ServerName} - } - cfg.InsecureSkipVerify = info.InsecureSkipVerify - - cs := info.cafiles() - if len(cs) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(cs) - if err != nil { - return nil, err - } - } - - if info.selfCert { - cfg.InsecureSkipVerify = true - } - - if info.EmptyCN { - hasNonEmptyCN := false - cn := "" - _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { - var block *pem.Block - block, _ = pem.Decode(certPEMBlock) - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return tls.Certificate{}, err - } - if len(cert.Subject.CommonName) != 0 { - hasNonEmptyCN = true - cn = cert.Subject.CommonName - } - return tls.X509KeyPair(certPEMBlock, keyPEMBlock) - }) - if err != nil { - return nil, err - } - if hasNonEmptyCN { - return nil, fmt.Errorf("cert has non empty Common Name (%s): %s", cn, info.CertFile) - } - } - - return cfg, nil -} - -// IsClosedConnError returns true if the error is from closing listener, cmux. -// copied from golang.org/x/net/http2/http2.go -func IsClosedConnError(err error) bool { - // 'use of closed network connection' (Go <=1.8) - // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) - // 'mux: listener closed' (cmux.ErrListenerClosed) - return err != nil && strings.Contains(err.Error(), "closed") -} diff --git a/client/pkg/transport/listener_opts.go b/client/pkg/transport/listener_opts.go deleted file mode 100644 index 7536f6aff46..00000000000 --- a/client/pkg/transport/listener_opts.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type ListenerOptions struct { - Listener net.Listener - ListenConfig net.ListenConfig - - socketOpts *SocketOpts - tlsInfo *TLSInfo - skipTLSInfoCheck bool - writeTimeout time.Duration - readTimeout time.Duration -} - -func newListenOpts(opts ...ListenerOption) *ListenerOptions { - lnOpts := &ListenerOptions{} - lnOpts.applyOpts(opts) - return lnOpts -} - -func (lo *ListenerOptions) applyOpts(opts []ListenerOption) { - for _, opt := range opts { - opt(lo) - } -} - -// IsTimeout returns true if the listener has a read/write timeout defined. -func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 } - -// IsSocketOpts returns true if the listener options includes socket options. -func (lo *ListenerOptions) IsSocketOpts() bool { - if lo.socketOpts == nil { - return false - } - return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress -} - -// IsTLS returns true if listner options includes TLSInfo. -func (lo *ListenerOptions) IsTLS() bool { - if lo.tlsInfo == nil { - return false - } - return !lo.tlsInfo.Empty() -} - -// ListenerOption are options which can be applied to the listener. -type ListenerOption func(*ListenerOptions) - -// WithTimeout allows for a read or write timeout to be applied to the listener. -func WithTimeout(read, write time.Duration) ListenerOption { - return func(lo *ListenerOptions) { - lo.writeTimeout = write - lo.readTimeout = read - } -} - -// WithSocketOpts defines socket options that will be applied to the listener. -func WithSocketOpts(s *SocketOpts) ListenerOption { - return func(lo *ListenerOptions) { lo.socketOpts = s } -} - -// WithTLSInfo adds TLS credentials to the listener. -func WithTLSInfo(t *TLSInfo) ListenerOption { - return func(lo *ListenerOptions) { lo.tlsInfo = t } -} - -// WithSkipTLSInfoCheck when true a transport can be created with an https scheme -// without passing TLSInfo, circumventing not presented error. Skipping this check -// also requires that TLSInfo is not passed. -func WithSkipTLSInfoCheck(skip bool) ListenerOption { - return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip } -} diff --git a/client/pkg/transport/listener_test.go b/client/pkg/transport/listener_test.go deleted file mode 100644 index 13277bcd0e0..00000000000 --- a/client/pkg/transport/listener_test.go +++ /dev/null @@ -1,575 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "net" - "net/http" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" -) - -func createSelfCert(t *testing.T, hosts ...string) (*TLSInfo, error) { - return createSelfCertEx(t, "127.0.0.1") -} - -func createSelfCertEx(t *testing.T, host string, additionalUsages ...x509.ExtKeyUsage) (*TLSInfo, error) { - d := t.TempDir() - info, err := SelfCert(zaptest.NewLogger(t), d, []string{host + ":0"}, 1, additionalUsages...) - if err != nil { - return nil, err - } - return &info, nil -} - -func fakeCertificateParserFunc(cert tls.Certificate, err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) { - return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) { - return cert, err - } -} - -// TestNewListenerTLSInfo tests that NewListener with valid TLSInfo returns -// a TLS listener that accepts TLS connections. -func TestNewListenerTLSInfo(t *testing.T) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - testNewListenerTLSInfoAccept(t, *tlsInfo) -} - -func TestNewListenerWithOpts(t *testing.T) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := map[string]struct { - opts []ListenerOption - scheme string - expectedErr bool - }{ - "https scheme no TLSInfo": { - opts: []ListenerOption{}, - expectedErr: true, - scheme: "https", - }, - "https scheme no TLSInfo with skip check": { - opts: []ListenerOption{WithSkipTLSInfoCheck(true)}, - expectedErr: false, - scheme: "https", - }, - "https scheme empty TLSInfo with skip check": { - opts: []ListenerOption{ - WithSkipTLSInfoCheck(true), - WithTLSInfo(&TLSInfo{}), - }, - expectedErr: false, - scheme: "https", - }, - "https scheme empty TLSInfo no skip check": { - opts: []ListenerOption{ - WithTLSInfo(&TLSInfo{}), - }, - expectedErr: true, - scheme: "https", - }, - "https scheme with TLSInfo and skip check": { - opts: []ListenerOption{ - WithSkipTLSInfoCheck(true), - WithTLSInfo(tlsInfo), - }, - expectedErr: false, - scheme: "https", - }, - } - for testName, test := range tests { - t.Run(testName, func(t *testing.T) { - ln, err := NewListenerWithOpts("127.0.0.1:0", test.scheme, test.opts...) - if ln != nil { - defer ln.Close() - } - if test.expectedErr && err == nil { - t.Fatalf("expected error") - } - if !test.expectedErr && err != nil { - t.Fatalf("unexpected error: %v", err) - } - }) - } -} - -func TestNewListenerWithSocketOpts(t *testing.T) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := map[string]struct { - opts []ListenerOption - scheme string - expectedErr bool - }{ - "nil socketopts": { - opts: []ListenerOption{WithSocketOpts(nil)}, - expectedErr: true, - scheme: "http", - }, - "empty socketopts": { - opts: []ListenerOption{WithSocketOpts(&SocketOpts{})}, - expectedErr: true, - scheme: "http", - }, - - "reuse address": { - opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReuseAddress: true})}, - scheme: "http", - expectedErr: true, - }, - "reuse address with TLS": { - opts: []ListenerOption{ - WithSocketOpts(&SocketOpts{ReuseAddress: true}), - WithTLSInfo(tlsInfo), - }, - scheme: "https", - expectedErr: true, - }, - "reuse address and port": { - opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReuseAddress: true, ReusePort: true})}, - scheme: "http", - expectedErr: false, - }, - "reuse address and port with TLS": { - opts: []ListenerOption{ - WithSocketOpts(&SocketOpts{ReuseAddress: true, ReusePort: true}), - WithTLSInfo(tlsInfo), - }, - scheme: "https", - expectedErr: false, - }, - "reuse port with TLS and timeout": { - opts: []ListenerOption{ - WithSocketOpts(&SocketOpts{ReusePort: true}), - WithTLSInfo(tlsInfo), - WithTimeout(5*time.Second, 5*time.Second), - }, - scheme: "https", - expectedErr: false, - }, - "reuse port with https scheme and no TLSInfo skip check": { - opts: []ListenerOption{ - WithSocketOpts(&SocketOpts{ReusePort: true}), - WithSkipTLSInfoCheck(true), - }, - scheme: "https", - expectedErr: false, - }, - "reuse port": { - opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReusePort: true})}, - scheme: "http", - expectedErr: false, - }, - } - for testName, test := range tests { - t.Run(testName, func(t *testing.T) { - ln, err := NewListenerWithOpts("127.0.0.1:0", test.scheme, test.opts...) - if err != nil { - t.Fatalf("unexpected NewListenerWithSocketOpts error: %v", err) - } - defer ln.Close() - ln2, err := NewListenerWithOpts(ln.Addr().String(), test.scheme, test.opts...) - if ln2 != nil { - ln2.Close() - } - if test.expectedErr && err == nil { - t.Fatalf("expected error") - } - if !test.expectedErr && err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if test.scheme == "http" { - lnOpts := newListenOpts(test.opts...) - if !lnOpts.IsSocketOpts() && !lnOpts.IsTimeout() { - if _, ok := ln.(*keepaliveListener); !ok { - t.Fatalf("ln: unexpected listener type: %T, wanted *keepaliveListener", ln) - } - } - } - }) - } -} - -func testNewListenerTLSInfoAccept(t *testing.T, tlsInfo TLSInfo) { - ln, err := NewListener("127.0.0.1:0", "https", &tlsInfo) - if err != nil { - t.Fatalf("unexpected NewListener error: %v", err) - } - defer ln.Close() - - tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} - cli := &http.Client{Transport: tr} - go cli.Get("https://" + ln.Addr().String()) - - conn, err := ln.Accept() - if err != nil { - t.Fatalf("unexpected Accept error: %v", err) - } - defer conn.Close() - if _, ok := conn.(*tls.Conn); !ok { - t.Error("failed to accept *tls.Conn") - } -} - -// TestNewListenerTLSInfoSkipClientSANVerify tests that if client IP address mismatches -// with specified address in its certificate the connection is still accepted -// if the flag SkipClientSANVerify is set (i.e. checkSAN() is disabled for the client side) -func TestNewListenerTLSInfoSkipClientSANVerify(t *testing.T) { - tests := []struct { - skipClientSANVerify bool - goodClientHost bool - acceptExpected bool - }{ - {false, true, true}, - {false, false, false}, - {true, true, true}, - {true, false, true}, - } - for _, test := range tests { - testNewListenerTLSInfoClientCheck(t, test.skipClientSANVerify, test.goodClientHost, test.acceptExpected) - } -} - -func testNewListenerTLSInfoClientCheck(t *testing.T, skipClientSANVerify, goodClientHost, acceptExpected bool) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - host := "127.0.0.222" - if goodClientHost { - host = "127.0.0.1" - } - clientTLSInfo, err := createSelfCertEx(t, host, x509.ExtKeyUsageClientAuth) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tlsInfo.SkipClientSANVerify = skipClientSANVerify - tlsInfo.TrustedCAFile = clientTLSInfo.CertFile - - rootCAs := x509.NewCertPool() - loaded, err := os.ReadFile(tlsInfo.CertFile) - if err != nil { - t.Fatalf("unexpected missing certfile: %v", err) - } - rootCAs.AppendCertsFromPEM(loaded) - - clientCert, err := tls.LoadX509KeyPair(clientTLSInfo.CertFile, clientTLSInfo.KeyFile) - if err != nil { - t.Fatalf("unable to create peer cert: %v", err) - } - - tlsConfig := &tls.Config{} - tlsConfig.InsecureSkipVerify = false - tlsConfig.Certificates = []tls.Certificate{clientCert} - tlsConfig.RootCAs = rootCAs - - ln, err := NewListener("127.0.0.1:0", "https", tlsInfo) - if err != nil { - t.Fatalf("unexpected NewListener error: %v", err) - } - defer ln.Close() - - tr := &http.Transport{TLSClientConfig: tlsConfig} - cli := &http.Client{Transport: tr} - chClientErr := make(chan error, 1) - go func() { - _, err := cli.Get("https://" + ln.Addr().String()) - chClientErr <- err - }() - - chAcceptErr := make(chan error, 1) - chAcceptConn := make(chan net.Conn, 1) - go func() { - conn, err := ln.Accept() - if err != nil { - chAcceptErr <- err - } else { - chAcceptConn <- conn - } - }() - - select { - case <-chClientErr: - if acceptExpected { - t.Errorf("accepted for good client address: skipClientSANVerify=%t, goodClientHost=%t", skipClientSANVerify, goodClientHost) - } - case acceptErr := <-chAcceptErr: - t.Fatalf("unexpected Accept error: %v", acceptErr) - case conn := <-chAcceptConn: - defer conn.Close() - if _, ok := conn.(*tls.Conn); !ok { - t.Errorf("failed to accept *tls.Conn") - } - if !acceptExpected { - t.Errorf("accepted for bad client address: skipClientSANVerify=%t, goodClientHost=%t", skipClientSANVerify, goodClientHost) - } - } -} - -func TestNewListenerTLSEmptyInfo(t *testing.T) { - _, err := NewListener("127.0.0.1:0", "https", nil) - if err == nil { - t.Errorf("err = nil, want not presented error") - } -} - -func TestNewTransportTLSInfo(t *testing.T) { - tlsinfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := []TLSInfo{ - {}, - { - CertFile: tlsinfo.CertFile, - KeyFile: tlsinfo.KeyFile, - }, - { - CertFile: tlsinfo.CertFile, - KeyFile: tlsinfo.KeyFile, - TrustedCAFile: tlsinfo.TrustedCAFile, - }, - { - TrustedCAFile: tlsinfo.TrustedCAFile, - }, - } - - for i, tt := range tests { - tt.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil) - trans, err := NewTransport(tt, time.Second) - if err != nil { - t.Fatalf("Received unexpected error from NewTransport: %v", err) - } - - if trans.TLSClientConfig == nil { - t.Fatalf("#%d: want non-nil TLSClientConfig", i) - } - } -} - -func TestTLSInfoNonexist(t *testing.T) { - tlsInfo := TLSInfo{CertFile: "@badname", KeyFile: "@badname"} - _, err := tlsInfo.ServerConfig() - werr := &os.PathError{ - Op: "open", - Path: "@badname", - Err: errors.New("no such file or directory"), - } - if err.Error() != werr.Error() { - t.Errorf("err = %v, want %v", err, werr) - } -} - -func TestTLSInfoEmpty(t *testing.T) { - tests := []struct { - info TLSInfo - want bool - }{ - {TLSInfo{}, true}, - {TLSInfo{TrustedCAFile: "baz"}, true}, - {TLSInfo{CertFile: "foo"}, false}, - {TLSInfo{KeyFile: "bar"}, false}, - {TLSInfo{CertFile: "foo", KeyFile: "bar"}, false}, - {TLSInfo{CertFile: "foo", TrustedCAFile: "baz"}, false}, - {TLSInfo{KeyFile: "bar", TrustedCAFile: "baz"}, false}, - {TLSInfo{CertFile: "foo", KeyFile: "bar", TrustedCAFile: "baz"}, false}, - } - - for i, tt := range tests { - got := tt.info.Empty() - if tt.want != got { - t.Errorf("#%d: result of Empty() incorrect: want=%t got=%t", i, tt.want, got) - } - } -} - -func TestTLSInfoMissingFields(t *testing.T) { - tlsinfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := []TLSInfo{ - {CertFile: tlsinfo.CertFile}, - {KeyFile: tlsinfo.KeyFile}, - {CertFile: tlsinfo.CertFile, TrustedCAFile: tlsinfo.TrustedCAFile}, - {KeyFile: tlsinfo.KeyFile, TrustedCAFile: tlsinfo.TrustedCAFile}, - } - - for i, info := range tests { - if _, err = info.ServerConfig(); err == nil { - t.Errorf("#%d: expected non-nil error from ServerConfig()", i) - } - - if _, err = info.ClientConfig(); err == nil { - t.Errorf("#%d: expected non-nil error from ClientConfig()", i) - } - } -} - -func TestTLSInfoParseFuncError(t *testing.T) { - tlsinfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := []struct { - info TLSInfo - }{ - { - info: *tlsinfo, - }, - - { - info: TLSInfo{CertFile: "", KeyFile: "", TrustedCAFile: tlsinfo.CertFile, EmptyCN: true}, - }, - } - - for i, tt := range tests { - tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, errors.New("fake")) - - if _, err = tt.info.ServerConfig(); err == nil { - t.Errorf("#%d: expected non-nil error from ServerConfig()", i) - } - - if _, err = tt.info.ClientConfig(); err == nil { - t.Errorf("#%d: expected non-nil error from ClientConfig()", i) - } - } -} - -func TestTLSInfoConfigFuncs(t *testing.T) { - ln := zaptest.NewLogger(t) - tlsinfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - tests := []struct { - info TLSInfo - clientAuth tls.ClientAuthType - wantCAs bool - }{ - { - info: TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile, Logger: ln}, - clientAuth: tls.NoClientCert, - wantCAs: false, - }, - - { - info: TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile, TrustedCAFile: tlsinfo.CertFile, Logger: ln}, - clientAuth: tls.RequireAndVerifyClientCert, - wantCAs: true, - }, - } - - for i, tt := range tests { - tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil) - - sCfg, err := tt.info.ServerConfig() - if err != nil { - t.Errorf("#%d: expected nil error from ServerConfig(), got non-nil: %v", i, err) - } - - if tt.wantCAs != (sCfg.ClientCAs != nil) { - t.Errorf("#%d: wantCAs=%t but ClientCAs=%v", i, tt.wantCAs, sCfg.ClientCAs) - } - - cCfg, err := tt.info.ClientConfig() - if err != nil { - t.Errorf("#%d: expected nil error from ClientConfig(), got non-nil: %v", i, err) - } - - if tt.wantCAs != (cCfg.RootCAs != nil) { - t.Errorf("#%d: wantCAs=%t but RootCAs=%v", i, tt.wantCAs, sCfg.RootCAs) - } - } -} - -func TestNewListenerUnixSocket(t *testing.T) { - l, err := NewListener("testsocket", "unix", nil) - if err != nil { - t.Errorf("error listening on unix socket (%v)", err) - } - l.Close() -} - -// TestNewListenerTLSInfoSelfCert tests that a new certificate accepts connections. -func TestNewListenerTLSInfoSelfCert(t *testing.T) { - tmpdir := t.TempDir() - - tlsinfo, err := SelfCert(zaptest.NewLogger(t), tmpdir, []string{"127.0.0.1"}, 1) - if err != nil { - t.Fatal(err) - } - if tlsinfo.Empty() { - t.Fatalf("tlsinfo should have certs (%+v)", tlsinfo) - } - testNewListenerTLSInfoAccept(t, tlsinfo) - - assert.Panics(t, func() { - SelfCert(nil, tmpdir, []string{"127.0.0.1"}, 1) - }, "expected panic with nil log") -} - -func TestIsClosedConnError(t *testing.T) { - l, err := NewListener("testsocket", "unix", nil) - if err != nil { - t.Errorf("error listening on unix socket (%v)", err) - } - l.Close() - _, err = l.Accept() - if !IsClosedConnError(err) { - t.Fatalf("expect true, got false (%v)", err) - } -} - -func TestSocktOptsEmpty(t *testing.T) { - tests := []struct { - sopts SocketOpts - want bool - }{ - {SocketOpts{}, true}, - {SocketOpts{ReuseAddress: true, ReusePort: false}, false}, - {SocketOpts{ReusePort: true}, false}, - } - - for i, tt := range tests { - got := tt.sopts.Empty() - if tt.want != got { - t.Errorf("#%d: result of Empty() incorrect: want=%t got=%t", i, tt.want, got) - } - } -} diff --git a/client/pkg/transport/sockopt.go b/client/pkg/transport/sockopt.go deleted file mode 100644 index 49b48dc8767..00000000000 --- a/client/pkg/transport/sockopt.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "syscall" -) - -type Controls []func(network, addr string, conn syscall.RawConn) error - -func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error { - for _, s := range ctls { - if err := s(network, addr, conn); err != nil { - return err - } - } - return nil -} - -type SocketOpts struct { - // ReusePort enables socket option SO_REUSEPORT [1] which allows rebind of - // a port already in use. User should keep in mind that flock can fail - // in which case lock on data file could result in unexpected - // condition. User should take caution to protect against lock race. - // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReusePort bool `json:"reuse-port"` - // ReuseAddress enables a socket option SO_REUSEADDR which allows - // binding to an address in `TIME_WAIT` state. Useful to improve MTTR - // in cases where etcd slow to restart due to excessive `TIME_WAIT`. - // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReuseAddress bool `json:"reuse-address"` -} - -func getControls(sopts *SocketOpts) Controls { - ctls := Controls{} - if sopts.ReuseAddress { - ctls = append(ctls, setReuseAddress) - } - if sopts.ReusePort { - ctls = append(ctls, setReusePort) - } - return ctls -} - -func (sopts *SocketOpts) Empty() bool { - return !sopts.ReuseAddress && !sopts.ReusePort -} diff --git a/client/pkg/transport/sockopt_solaris.go b/client/pkg/transport/sockopt_solaris.go deleted file mode 100644 index 149ad510240..00000000000 --- a/client/pkg/transport/sockopt_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build solaris - -package transport - -import ( - "errors" - "syscall" - - "golang.org/x/sys/unix" -) - -func setReusePort(network, address string, c syscall.RawConn) error { - return errors.New("port reuse is not supported on Solaris") -} - -func setReuseAddress(network, address string, conn syscall.RawConn) error { - return conn.Control(func(fd uintptr) { - syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) - }) -} diff --git a/client/pkg/transport/sockopt_unix.go b/client/pkg/transport/sockopt_unix.go deleted file mode 100644 index 4e76bf95be1..00000000000 --- a/client/pkg/transport/sockopt_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !solaris - -package transport - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -func setReusePort(network, address string, conn syscall.RawConn) error { - return conn.Control(func(fd uintptr) { - syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1) - }) -} - -func setReuseAddress(network, address string, conn syscall.RawConn) error { - return conn.Control(func(fd uintptr) { - syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) - }) -} diff --git a/client/pkg/transport/sockopt_windows.go b/client/pkg/transport/sockopt_windows.go deleted file mode 100644 index 2670b4dc7b5..00000000000 --- a/client/pkg/transport/sockopt_windows.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows - -package transport - -import ( - "errors" - "syscall" -) - -func setReusePort(network, address string, c syscall.RawConn) error { - return errors.New("port reuse is not supported on Windows") -} - -// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as -// there is no protection against port hijacking. -func setReuseAddress(network, addr string, conn syscall.RawConn) error { - return errors.New("address reuse is not supported on Windows") -} diff --git a/client/pkg/transport/timeout_dialer_test.go b/client/pkg/transport/timeout_dialer_test.go deleted file mode 100644 index 854d68d1472..00000000000 --- a/client/pkg/transport/timeout_dialer_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "testing" - "time" -) - -func TestReadWriteTimeoutDialer(t *testing.T) { - stop := make(chan struct{}) - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unexpected listen error: %v", err) - } - defer func() { - stop <- struct{}{} - }() - ts := testBlockingServer{ln, 2, stop} - go ts.Start(t) - - d := rwTimeoutDialer{ - wtimeoutd: 10 * time.Millisecond, - rdtimeoutd: 10 * time.Millisecond, - } - conn, err := d.Dial("tcp", ln.Addr().String()) - if err != nil { - t.Fatalf("unexpected dial error: %v", err) - } - defer conn.Close() - - // fill the socket buffer - data := make([]byte, 5*1024*1024) - done := make(chan struct{}, 1) - go func() { - _, err = conn.Write(data) - done <- struct{}{} - }() - - select { - case <-done: - // Wait 5s more than timeout to avoid delay in low-end systems; - // the slack was 1s extra, but that wasn't enough for CI. - case <-time.After(d.wtimeoutd*10 + 5*time.Second): - t.Fatal("wait timeout") - } - - if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() { - t.Errorf("err = %v, want write i/o timeout error", err) - } - - conn, err = d.Dial("tcp", ln.Addr().String()) - if err != nil { - t.Fatalf("unexpected dial error: %v", err) - } - defer conn.Close() - - buf := make([]byte, 10) - go func() { - _, err = conn.Read(buf) - done <- struct{}{} - }() - - select { - case <-done: - case <-time.After(d.rdtimeoutd * 10): - t.Fatal("wait timeout") - } - - if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() { - t.Errorf("err = %v, want read i/o timeout error", err) - } -} - -type testBlockingServer struct { - ln net.Listener - n int - stop chan struct{} -} - -func (ts *testBlockingServer) Start(t *testing.T) { - for i := 0; i < ts.n; i++ { - conn, err := ts.ln.Accept() - if err != nil { - t.Error(err) - } - defer conn.Close() - } - <-ts.stop -} diff --git a/client/pkg/transport/timeout_listener_test.go b/client/pkg/transport/timeout_listener_test.go deleted file mode 100644 index 828ddf8620f..00000000000 --- a/client/pkg/transport/timeout_listener_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "testing" - "time" -) - -// TestNewTimeoutListener tests that NewTimeoutListener returns a -// rwTimeoutListener struct with timeouts set. -func TestNewTimeoutListener(t *testing.T) { - l, err := NewTimeoutListener("127.0.0.1:0", "http", nil, time.Hour, time.Hour) - if err != nil { - t.Fatalf("unexpected NewTimeoutListener error: %v", err) - } - defer l.Close() - tln := l.(*rwTimeoutListener) - if tln.readTimeout != time.Hour { - t.Errorf("read timeout = %s, want %s", tln.readTimeout, time.Hour) - } - if tln.writeTimeout != time.Hour { - t.Errorf("write timeout = %s, want %s", tln.writeTimeout, time.Hour) - } -} - -func TestWriteReadTimeoutListener(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("unexpected listen error: %v", err) - } - wln := rwTimeoutListener{ - Listener: ln, - writeTimeout: 10 * time.Millisecond, - readTimeout: 10 * time.Millisecond, - } - - blocker := func(stopCh <-chan struct{}) { - conn, derr := net.Dial("tcp", ln.Addr().String()) - if derr != nil { - t.Errorf("unexpected dail error: %v", derr) - } - defer conn.Close() - // block the receiver until the writer timeout - <-stopCh - } - - writerStopCh := make(chan struct{}, 1) - go blocker(writerStopCh) - - conn, err := wln.Accept() - if err != nil { - writerStopCh <- struct{}{} - t.Fatalf("unexpected accept error: %v", err) - } - defer conn.Close() - - // fill the socket buffer - data := make([]byte, 5*1024*1024) - done := make(chan struct{}, 1) - go func() { - _, err = conn.Write(data) - done <- struct{}{} - }() - - select { - case <-done: - // It waits 1s more to avoid delay in low-end system. - case <-time.After(wln.writeTimeout*10 + time.Second): - writerStopCh <- struct{}{} - t.Fatal("wait timeout") - } - - if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() { - t.Errorf("err = %v, want write i/o timeout error", err) - } - writerStopCh <- struct{}{} - - readerStopCh := make(chan struct{}, 1) - go blocker(readerStopCh) - - conn, err = wln.Accept() - if err != nil { - readerStopCh <- struct{}{} - t.Fatalf("unexpected accept error: %v", err) - } - buf := make([]byte, 10) - - go func() { - _, err = conn.Read(buf) - done <- struct{}{} - }() - - select { - case <-done: - case <-time.After(wln.readTimeout * 10): - readerStopCh <- struct{}{} - t.Fatal("wait timeout") - } - - if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() { - t.Errorf("err = %v, want read i/o timeout error", err) - } - readerStopCh <- struct{}{} -} diff --git a/client/pkg/transport/timeout_transport.go b/client/pkg/transport/timeout_transport.go deleted file mode 100644 index ea16b4c0f86..00000000000 --- a/client/pkg/transport/timeout_transport.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "time" -) - -// NewTimeoutTransport returns a transport created using the given TLS info. -// If read/write on the created connection blocks longer than its time limit, -// it will return timeout error. -// If read/write timeout is set, transport will not be able to reuse connection. -func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { - tr, err := NewTransport(info, dialtimeoutd) - if err != nil { - return nil, err - } - - if rdtimeoutd != 0 || wtimeoutd != 0 { - // the timed out connection will timeout soon after it is idle. - // it should not be put back to http transport as an idle connection for future usage. - tr.MaxIdleConnsPerHost = -1 - } else { - // allow more idle connections between peers to avoid unnecessary port allocation. - tr.MaxIdleConnsPerHost = 1024 - } - - tr.Dial = (&rwTimeoutDialer{ - Dialer: net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - }).Dial - return tr, nil -} diff --git a/client/pkg/transport/timeout_transport_test.go b/client/pkg/transport/timeout_transport_test.go deleted file mode 100644 index 95079f9b598..00000000000 --- a/client/pkg/transport/timeout_transport_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "bytes" - "io" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -// TestNewTimeoutTransport tests that NewTimeoutTransport returns a transport -// that can dial out timeout connections. -func TestNewTimeoutTransport(t *testing.T) { - tr, err := NewTimeoutTransport(TLSInfo{}, time.Hour, time.Hour, time.Hour) - if err != nil { - t.Fatalf("unexpected NewTimeoutTransport error: %v", err) - } - - remoteAddr := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(r.RemoteAddr)) - } - srv := httptest.NewServer(http.HandlerFunc(remoteAddr)) - - defer srv.Close() - conn, err := tr.Dial("tcp", srv.Listener.Addr().String()) - if err != nil { - t.Fatalf("unexpected dial error: %v", err) - } - defer conn.Close() - - tconn, ok := conn.(*timeoutConn) - if !ok { - t.Fatalf("failed to dial out *timeoutConn") - } - if tconn.readTimeout != time.Hour { - t.Errorf("read timeout = %s, want %s", tconn.readTimeout, time.Hour) - } - if tconn.writeTimeout != time.Hour { - t.Errorf("write timeout = %s, want %s", tconn.writeTimeout, time.Hour) - } - - // ensure not reuse timeout connection - req, err := http.NewRequest("GET", srv.URL, nil) - if err != nil { - t.Fatalf("unexpected err %v", err) - } - resp, err := tr.RoundTrip(req) - if err != nil { - t.Fatalf("unexpected err %v", err) - } - addr0, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("unexpected err %v", err) - } - - resp, err = tr.RoundTrip(req) - if err != nil { - t.Fatalf("unexpected err %v", err) - } - addr1, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("unexpected err %v", err) - } - - if bytes.Equal(addr0, addr1) { - t.Errorf("addr0 = %s addr1= %s, want not equal", string(addr0), string(addr1)) - } -} diff --git a/client/pkg/transport/tls_test.go b/client/pkg/transport/tls_test.go deleted file mode 100644 index 46af1db6786..00000000000 --- a/client/pkg/transport/tls_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -func TestValidateSecureEndpoints(t *testing.T) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - remoteAddr := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(r.RemoteAddr)) - } - srv := httptest.NewServer(http.HandlerFunc(remoteAddr)) - defer srv.Close() - - tests := map[string]struct { - endPoints []string - expectedEndpoints []string - expectedErr bool - }{ - "invalidEndPoints": { - endPoints: []string{ - "invalid endpoint", - }, - expectedEndpoints: nil, - expectedErr: true, - }, - "insecureEndpoints": { - endPoints: []string{ - "http://127.0.0.1:8000", - "http://" + srv.Listener.Addr().String(), - }, - expectedEndpoints: nil, - expectedErr: true, - }, - "secureEndPoints": { - endPoints: []string{ - "https://" + srv.Listener.Addr().String(), - }, - expectedEndpoints: []string{ - "https://" + srv.Listener.Addr().String(), - }, - expectedErr: false, - }, - "mixEndPoints": { - endPoints: []string{ - "https://" + srv.Listener.Addr().String(), - "http://" + srv.Listener.Addr().String(), - "invalid end points", - }, - expectedEndpoints: []string{ - "https://" + srv.Listener.Addr().String(), - }, - expectedErr: true, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - secureEps, err := ValidateSecureEndpoints(*tlsInfo, test.endPoints) - if test.expectedErr != (err != nil) { - t.Errorf("Unexpected error, got: %v, want: %v", err, test.expectedErr) - } - - if !reflect.DeepEqual(test.expectedEndpoints, secureEps) { - t.Errorf("expected endpoints %v, got %v", test.expectedEndpoints, secureEps) - } - }) - } -} diff --git a/client/pkg/transport/transport.go b/client/pkg/transport/transport.go deleted file mode 100644 index 91462dcdb08..00000000000 --- a/client/pkg/transport/transport.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "context" - "net" - "net/http" - "strings" - "time" -) - -type unixTransport struct{ *http.Transport } - -func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { - cfg, err := info.ClientConfig() - if err != nil { - return nil, err - } - - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: dialtimeoutd, - // value taken from http.DefaultTransport - KeepAlive: 30 * time.Second, - }).DialContext, - // value taken from http.DefaultTransport - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - - dialer := &net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - } - - dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) { - return dialer.DialContext(ctx, "unix", addr) - } - tu := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: dialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - // Cost of reopening connection on sockets is low, and they are mostly used in testing. - // Long living unix-transport connections were leading to 'leak' test flakes. - // Alternatively the returned Transport (t) should override CloseIdleConnections to - // forward it to 'tu' as well. - IdleConnTimeout: time.Microsecond, - } - ut := &unixTransport{tu} - - t.RegisterProtocol("unix", ut) - t.RegisterProtocol("unixs", ut) - - return t, nil -} - -func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { - url := *req.URL - req.URL = &url - req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) - return urt.Transport.RoundTrip(req) -} diff --git a/client/pkg/transport/transport_test.go b/client/pkg/transport/transport_test.go deleted file mode 100644 index 315f32cf2dc..00000000000 --- a/client/pkg/transport/transport_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "net/http" - "strings" - "testing" - "time" -) - -// TestNewTransportTLSInvalidCipherSuitesTLS12 expects a client with invalid -// cipher suites fail to handshake with the server. -func TestNewTransportTLSInvalidCipherSuitesTLS12(t *testing.T) { - tlsInfo, err := createSelfCert(t) - if err != nil { - t.Fatalf("unable to create cert: %v", err) - } - - cipherSuites := []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - } - - // make server and client have unmatched cipher suites - srvTLS, cliTLS := *tlsInfo, *tlsInfo - srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:] - - ln, err := NewListener("127.0.0.1:0", "https", &srvTLS) - if err != nil { - t.Fatalf("unexpected NewListener error: %v", err) - } - defer ln.Close() - - donec := make(chan struct{}) - go func() { - ln.Accept() - donec <- struct{}{} - }() - go func() { - tr, err := NewTransport(cliTLS, 3*time.Second) - tr.TLSClientConfig.MaxVersion = tls.VersionTLS12 - if err != nil { - t.Errorf("unexpected NewTransport error: %v", err) - } - cli := &http.Client{Transport: tr} - _, gerr := cli.Get("https://" + ln.Addr().String()) - if gerr == nil || !strings.Contains(gerr.Error(), "tls: handshake failure") { - t.Error("expected client TLS handshake error") - } - ln.Close() - donec <- struct{}{} - }() - <-donec - <-donec -} diff --git a/client/pkg/types/id.go b/client/pkg/types/id.go deleted file mode 100644 index 9a8429391ed..00000000000 --- a/client/pkg/types/id.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "bytes" - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p IDSlice) String() string { - var b bytes.Buffer - if p.Len() > 0 { - b.WriteString(p[0].String()) - } - - for i := 1; i < p.Len(); i++ { - b.WriteString(",") - b.WriteString(p[i].String()) - } - - return b.String() -} diff --git a/client/pkg/types/id_test.go b/client/pkg/types/id_test.go deleted file mode 100644 index bec2853432b..00000000000 --- a/client/pkg/types/id_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestIDString(t *testing.T) { - tests := []struct { - input ID - want string - }{ - { - input: 12, - want: "c", - }, - { - input: 4918257920282737594, - want: "444129853c343bba", - }, - } - - for i, tt := range tests { - got := tt.input.String() - if tt.want != got { - t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromString(t *testing.T) { - tests := []struct { - input string - want ID - }{ - { - input: "17", - want: 23, - }, - { - input: "612840dae127353", - want: 437557308098245459, - }, - } - - for i, tt := range tests { - got, err := IDFromString(tt.input) - if err != nil { - t.Errorf("#%d: IDFromString failure: err=%v", i, err) - continue - } - if tt.want != got { - t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromStringFail(t *testing.T) { - tests := []string{ - "", - "XXX", - "612840dae127353612840dae127353", - } - - for i, tt := range tests { - _, err := IDFromString(tt) - if err == nil { - t.Fatalf("#%d: IDFromString expected error, but err=nil", i) - } - } -} - -func TestIDSlice(t *testing.T) { - g := []ID{10, 500, 5, 1, 100, 25} - w := []ID{1, 5, 10, 25, 100, 500} - sort.Sort(IDSlice(g)) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/client/pkg/types/set_test.go b/client/pkg/types/set_test.go deleted file mode 100644 index 73572028931..00000000000 --- a/client/pkg/types/set_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUnsafeSet(t *testing.T) { - driveSetTests(t, NewUnsafeSet()) -} - -func TestThreadsafeSet(t *testing.T) { - driveSetTests(t, NewThreadsafeSet()) -} - -// Check that two slices contents are equal; order is irrelevant -func equal(a, b []string) bool { - as := sort.StringSlice(a) - bs := sort.StringSlice(b) - as.Sort() - bs.Sort() - return reflect.DeepEqual(as, bs) -} - -func driveSetTests(t *testing.T, s Set) { - // Verify operations on an empty set - values := s.Values() - if len(values) != 0 { - t.Fatalf("Expect values=%v got %v", []string{}, values) - } - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - for _, v := range []string{"foo", "bar", "baz"} { - if s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be fale, got true", v) - } - } - - // Add three items, ensure they show up - s.Add("foo") - s.Add("bar") - s.Add("baz") - - eValues := []string{"foo", "bar", "baz"} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - for _, v := range eValues { - if !s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be true, got false", v) - } - } - - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Add the same item a second time, ensuring it is not duplicated - s.Add("foo") - - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Remove all items, ensure they are gone - s.Remove("foo") - s.Remove("bar") - s.Remove("baz") - - eValues = []string{} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - - // Create new copies of the set, and ensure they are unlinked to the - // original Set by making modifications - s.Add("foo") - s.Add("bar") - cp1 := s.Copy() - cp2 := s.Copy() - s.Remove("foo") - cp3 := s.Copy() - cp1.Add("baz") - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, cp3.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } - - for i, tt := range []struct { - want bool - got bool - }{ - {true, s.Equals(cp3)}, - {true, cp3.Equals(s)}, - {false, s.Equals(cp2)}, - {false, s.Equals(cp1)}, - {false, cp1.Equals(s)}, - {false, cp2.Equals(s)}, - {false, cp2.Equals(cp1)}, - } { - if tt.got != tt.want { - t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got) - - } - } - - // Subtract values from a Set, ensuring a new Set is created and - // the original Sets are unmodified - sub1 := cp1.Sub(s) - sub2 := cp2.Sub(cp1) - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "baz"}, sub1.Values()}, - {[]string{}, sub2.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } -} - -func TestUnsafeSetContainsAll(t *testing.T) { - vals := []string{"foo", "bar", "baz"} - s := NewUnsafeSet(vals...) - - tests := []struct { - strs []string - wcontain bool - }{ - {[]string{}, true}, - {vals[:1], true}, - {vals[:2], true}, - {vals, true}, - {[]string{"cuz"}, false}, - {[]string{vals[0], "cuz"}, false}, - } - for i, tt := range tests { - if g := s.ContainsAll(tt.strs); g != tt.wcontain { - t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain) - } - } -} diff --git a/client/pkg/types/slice_test.go b/client/pkg/types/slice_test.go deleted file mode 100644 index 8d8a4d0ea70..00000000000 --- a/client/pkg/types/slice_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUint64Slice(t *testing.T) { - g := Uint64Slice{10, 500, 5, 1, 100, 25} - w := Uint64Slice{1, 5, 10, 25, 100, 500} - sort.Sort(g) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/client/pkg/types/urls_test.go b/client/pkg/types/urls_test.go deleted file mode 100644 index fbb9068cdd5..00000000000 --- a/client/pkg/types/urls_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestNewURLs(t *testing.T) { - tests := []struct { - strs []string - wurls URLs - }{ - { - []string{"http://127.0.0.1:2379"}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }, - // it can trim space - { - []string{" http://127.0.0.1:2379 "}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }, - // it does sort - { - []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }, - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - }, - } - for i, tt := range tests { - urls, _ := NewURLs(tt.strs) - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls) - } - } -} - -func TestURLsString(t *testing.T) { - tests := []struct { - us URLs - wstr string - }{ - { - URLs{}, - "", - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - "http://127.0.0.1:2379", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - "http://127.0.0.1:2379,http://127.0.0.2:2379", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }), - "http://127.0.0.2:2379,http://127.0.0.1:2379", - }, - } - for i, tt := range tests { - g := tt.us.String() - if g != tt.wstr { - t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr) - } - } -} - -func TestURLsSort(t *testing.T) { - g := testutil.MustNewURLs(t, []string{ - "http://127.0.0.4:2379", - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - "http://127.0.0.3:2379", - }) - w := testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - "http://127.0.0.3:2379", - "http://127.0.0.4:2379", - }) - gurls := URLs(g) - gurls.Sort() - if !reflect.DeepEqual(g, w) { - t.Errorf("URLs after sort = %#v, want %#v", g, w) - } -} - -func TestURLsStringSlice(t *testing.T) { - tests := []struct { - us URLs - wstr []string - }{ - { - URLs{}, - []string{}, - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - []string{"http://127.0.0.1:2379"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:2379", - "http://127.0.0.2:2379", - }), - []string{"http://127.0.0.1:2379", "http://127.0.0.2:2379"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:2379", - "http://127.0.0.1:2379", - }), - []string{"http://127.0.0.2:2379", "http://127.0.0.1:2379"}, - }, - } - for i, tt := range tests { - g := tt.us.StringSlice() - if !reflect.DeepEqual(g, tt.wstr) { - t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr) - } - } -} - -func TestNewURLsFail(t *testing.T) { - tests := [][]string{ - // no urls given - {}, - // missing protocol scheme - {"://127.0.0.1:2379"}, - // unsupported scheme - {"mailto://127.0.0.1:2379"}, - // not conform to host:port - {"http://127.0.0.1"}, - // contain a path - {"http://127.0.0.1:2379/path"}, - } - for i, tt := range tests { - _, err := NewURLs(tt) - if err == nil { - t.Errorf("#%d: err = nil, but error", i) - } - } -} diff --git a/client/pkg/types/urlsmap_test.go b/client/pkg/types/urlsmap_test.go deleted file mode 100644 index da184282e79..00000000000 --- a/client/pkg/types/urlsmap_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestParseInitialCluster(t *testing.T) { - c, err := NewURLsMap("mem1=http://10.0.0.1:2379,mem1=http://128.193.4.20:2379,mem2=http://10.0.0.2:2379,default=http://127.0.0.1:2379") - if err != nil { - t.Fatalf("unexpected parse error: %v", err) - } - wc := URLsMap(map[string]URLs{ - "mem1": testutil.MustNewURLs(t, []string{"http://10.0.0.1:2379", "http://128.193.4.20:2379"}), - "mem2": testutil.MustNewURLs(t, []string{"http://10.0.0.2:2379"}), - "default": testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}), - }) - if !reflect.DeepEqual(c, wc) { - t.Errorf("cluster = %+v, want %+v", c, wc) - } -} - -func TestParseInitialClusterBad(t *testing.T) { - tests := []string{ - // invalid URL - "%^", - // no URL defined for member - "mem1=,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", - "mem1,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379", - // bad URL for member - "default=http://localhost/", - } - for i, tt := range tests { - if _, err := NewURLsMap(tt); err == nil { - t.Errorf("#%d: unexpected successful parse, want err", i) - } - } -} - -func TestNameURLPairsString(t *testing.T) { - cls := URLsMap(map[string]URLs{ - "abc": testutil.MustNewURLs(t, []string{"http://1.1.1.1:1111", "http://0.0.0.0:0000"}), - "def": testutil.MustNewURLs(t, []string{"http://2.2.2.2:2222"}), - "ghi": testutil.MustNewURLs(t, []string{"http://3.3.3.3:1234", "http://127.0.0.1:2380"}), - // no PeerURLs = not included - "four": testutil.MustNewURLs(t, []string{}), - "five": testutil.MustNewURLs(t, nil), - }) - w := "abc=http://0.0.0.0:0000,abc=http://1.1.1.1:1111,def=http://2.2.2.2:2222,ghi=http://127.0.0.1:2380,ghi=http://3.3.3.3:1234" - if g := cls.String(); g != w { - t.Fatalf("NameURLPairs.String():\ngot %#v\nwant %#v", g, w) - } -} - -func TestParse(t *testing.T) { - tests := []struct { - s string - wm map[string][]string - }{ - { - "", - map[string][]string{}, - }, - { - "a=b", - map[string][]string{"a": {"b"}}, - }, - { - "a=b,a=c", - map[string][]string{"a": {"b", "c"}}, - }, - { - "a=b,a1=c", - map[string][]string{"a": {"b"}, "a1": {"c"}}, - }, - } - for i, tt := range tests { - m := parse(tt.s) - if !reflect.DeepEqual(m, tt.wm) { - t.Errorf("#%d: m = %+v, want %+v", i, m, tt.wm) - } - } -} - -// TestNewURLsMapIPV6 is only tested in Go1.5+ because Go1.4 doesn't support literal IPv6 address with zone in -// URI (https://github.com/golang/go/issues/6530). -func TestNewURLsMapIPV6(t *testing.T) { - c, err := NewURLsMap("mem1=http://[2001:db8::1]:2380,mem1=http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380,mem2=http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380") - if err != nil { - t.Fatalf("unexpected parse error: %v", err) - } - wc := URLsMap(map[string]URLs{ - "mem1": testutil.MustNewURLs(t, []string{"http://[2001:db8::1]:2380", "http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380"}), - "mem2": testutil.MustNewURLs(t, []string{"http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380"}), - }) - if !reflect.DeepEqual(c, wc) { - t.Errorf("cluster = %#v, want %#v", c, wc) - } -} - -func TestNewURLsMapFromStringMapEmpty(t *testing.T) { - mss := make(map[string]string) - urlsMap, err := NewURLsMapFromStringMap(mss, ",") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - s := "" - um, err := NewURLsMap(s) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if um.String() != urlsMap.String() { - t.Errorf("Expected:\n%+v\ngot:\n%+v", um, urlsMap) - } -} - -func TestNewURLsMapFromStringMapNormal(t *testing.T) { - mss := make(map[string]string) - mss["host0"] = "http://127.0.0.1:2379,http://127.0.0.1:2380" - mss["host1"] = "http://127.0.0.1:2381,http://127.0.0.1:2382" - mss["host2"] = "http://127.0.0.1:2383,http://127.0.0.1:2384" - mss["host3"] = "http://127.0.0.1:2385,http://127.0.0.1:2386" - urlsMap, err := NewURLsMapFromStringMap(mss, ",") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - s := "host0=http://127.0.0.1:2379,host0=http://127.0.0.1:2380," + - "host1=http://127.0.0.1:2381,host1=http://127.0.0.1:2382," + - "host2=http://127.0.0.1:2383,host2=http://127.0.0.1:2384," + - "host3=http://127.0.0.1:2385,host3=http://127.0.0.1:2386" - um, err := NewURLsMap(s) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if um.String() != urlsMap.String() { - t.Errorf("Expected:\n%+v\ngot:\n%+v", um, urlsMap) - } -} diff --git a/client/pkg/verify/verify.go b/client/pkg/verify/verify.go deleted file mode 100644 index 0cc1b48277f..00000000000 --- a/client/pkg/verify/verify.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package verify - -import ( - "fmt" - "os" - "strings" -) - -const ENV_VERIFY = "ETCD_VERIFY" - -type VerificationType string - -const ( - ENV_VERIFY_VALUE_ALL VerificationType = "all" - ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" -) - -func getEnvVerify() string { - return strings.ToLower(os.Getenv(ENV_VERIFY)) -} - -func IsVerificationEnabled(verification VerificationType) bool { - env := getEnvVerify() - return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) -} - -// EnableVerifications sets `ENV_VERIFY` and returns a function that -// can be used to bring the original settings. -func EnableVerifications(verification VerificationType) func() { - previousEnv := getEnvVerify() - os.Setenv(ENV_VERIFY, string(verification)) - return func() { - os.Setenv(ENV_VERIFY, previousEnv) - } -} - -// EnableAllVerifications enables verification and returns a function -// that can be used to bring the original settings. -func EnableAllVerifications() func() { - return EnableVerifications(ENV_VERIFY_VALUE_ALL) -} - -// DisableVerifications unsets `ENV_VERIFY` and returns a function that -// can be used to bring the original settings. -func DisableVerifications() func() { - previousEnv := getEnvVerify() - os.Unsetenv(ENV_VERIFY) - return func() { - os.Setenv(ENV_VERIFY, previousEnv) - } -} - -// Verify performs verification if the assertions are enabled. -// In the default setup running in tests and skipped in the production code. -func Verify(f func()) { - if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { - f() - } -} - -// Assert will panic with a given formatted message if the given condition is false. -func Assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/client/v2/LICENSE b/client/v2/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/client/v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client/v2/README.md b/client/v2/README.md deleted file mode 100644 index 9ec7d86ecaa..00000000000 --- a/client/v2/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client) - -For full compatibility, it is recommended to install released versions of clients using go modules. - -## Install - -```bash -go get go.etcd.io/etcd/v3/client -``` - -## Usage - -```go -package main - -import ( - "context" - "log" - "time" - - "go.etcd.io/etcd/v3/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/client/v2/auth_role.go b/client/v2/auth_role.go deleted file mode 100644 index b6ba7e150dc..00000000000 --- a/client/v2/auth_role.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/url" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/client/v2/auth_user.go b/client/v2/auth_user.go deleted file mode 100644 index 8e7e2efe833..00000000000 --- a/client/v2/auth_user.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/url" - "path" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/client/v2/client.go b/client/v2/client.go deleted file mode 100644 index a93c528fb36..00000000000 --- a/client/v2/client.go +++ /dev/null @@ -1,719 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math/rand" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "sync" - "time" - - "go.etcd.io/etcd/api/v3/version" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") - - // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so - // that Do() will not retry a request - oneShotCtxValue interface{} -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - // GetVersion retrieves the current etcd server and cluster version - GetVersion(ctx context.Context) (*version.Versions, error) - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { - ceps := make([]url.URL, len(eps)) - copy(ceps, eps) - - // To perform a lookup on the new endpoint list without using the current - // client, we'll copy it - clientCopy := &httpClusterClient{ - clientFactory: c.clientFactory, - credentials: c.credentials, - rand: c.rand, - - pinned: 0, - endpoints: ceps, - } - - mAPI := NewMembersAPI(clientCopy) - leader, err := mAPI.Leader(ctx) - if err != nil { - return "", err - } - if len(leader.ClientURLs) == 0 { - return "", ErrNoLeaderEndpoint - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { - if len(eps) == 0 { - return []url.URL{}, ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return []url.URL{}, err - } - neps[i] = *u - } - return neps, nil -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - c.endpoints = shuffleEndpoints(c.rand, neps) - // We're not doing anything for PrioritizeLeader here. This is - // due to not having a context meaning we can't call getLeaderEndpoint - // However, if you're using PrioritizeLeader, you've already been told - // to regularly call sync, where we do have a ctx, and can figure the - // leader. PrioritizeLeader is also quite a loose guarantee, so deal - // with it - c.pinned = 0 - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - isOneShot := ctx.Value(&oneShotCtxValue) != nil - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - } else if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - err = cerr.Errors[0] - } - if err != nil { - if !isOneShot { - continue - } - c.Lock() - c.pinned = (k + 1) % leps - c.Unlock() - return nil, nil, err - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - var eps []string - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - npin := 0 - - switch c.selectionMode { - case EndpointSelectionRandom: - c.RLock() - eq := endpointsEqual(c.endpoints, neps) - c.RUnlock() - - if eq { - return nil - } - // When items in the endpoint list changes, we choose a new pin - neps = shuffleEndpoints(c.rand, neps) - case EndpointSelectionPrioritizeLeader: - nle, err := c.getLeaderEndpoint(ctx, neps) - if err != nil { - return ErrNoLeaderEndpoint - } - - for i, n := range neps { - if n.String() == nle { - npin = i - break - } - } - default: - return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) - } - - c.Lock() - defer c.Unlock() - c.endpoints = neps - c.pinned = npin - - return nil -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { - act := &getAction{Prefix: "/version"} - - resp, body, err := c.Do(ctx, act) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case http.StatusOK: - if len(body) == 0 { - return nil, ErrEmptyBody - } - var vresp version.Versions - if err := json.Unmarshal(body, &vresp); err != nil { - return nil, ErrInvalidJSON - } - return &vresp, nil - default: - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return nil, ErrInvalidJSON - } - return nil, etcdErr - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -// ErrNoRequest indicates that the HTTPRequest object could not be found -// or was nil. No processing could continue. -var ErrNoRequest = errors.New("no HTTPRequest was available") - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - if req == nil { - return nil, nil, ErrNoRequest - } - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = io.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - if resp != nil { - resp.Body.Close() - } - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, errors.New("location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - // copied from Go 1.9<= rand.Rand.Perm - n := len(eps) - p := make([]int, n) - for i := 0; i < n; i++ { - j := r.Intn(i + 1) - p[i] = p[j] - p[j] = i - } - neps := make([]url.URL, n) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} - -func endpointsEqual(left, right []url.URL) bool { - if len(left) != len(right) { - return false - } - - sLeft := make([]string, len(left)) - sRight := make([]string, len(right)) - for i, l := range left { - sLeft[i] = l.String() - } - for i, r := range right { - sRight[i] = r.String() - } - - sort.Strings(sLeft) - sort.Strings(sRight) - for i := range sLeft { - if sLeft[i] != sRight[i] { - return false - } - } - return true -} diff --git a/client/v2/client_test.go b/client/v2/client_test.go deleted file mode 100644 index abfcff93d1b..00000000000 --- a/client/v2/client_test.go +++ /dev/null @@ -1,1096 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "errors" - "io" - "math/rand" - "net/http" - "net/url" - "reflect" - "sort" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -type actionAssertingHTTPClient struct { - t *testing.T - num int - act httpAction - - resp http.Response - body []byte - err error -} - -func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) { - if !reflect.DeepEqual(a.act, act) { - a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act) - } - - return &a.resp, a.body, a.err -} - -type staticHTTPClient struct { - resp http.Response - body []byte - err error -} - -func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { - return &s.resp, s.body, s.err -} - -type staticHTTPAction struct { - request http.Request -} - -func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request { - return &s.request -} - -type staticHTTPResponse struct { - resp http.Response - body []byte - err error -} - -type multiStaticHTTPClient struct { - responses []staticHTTPResponse - cur int -} - -func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) { - r := s.responses[s.cur] - s.cur++ - return &r.resp, r.body, r.err -} - -func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory { - var cur int - return func(url.URL) httpClient { - r := responses[cur] - cur++ - return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err} - } -} - -type fakeTransport struct { - respchan chan *http.Response - errchan chan error - startCancel chan struct{} - finishCancel chan struct{} -} - -func newFakeTransport() *fakeTransport { - return &fakeTransport{ - respchan: make(chan *http.Response, 1), - errchan: make(chan error, 1), - startCancel: make(chan struct{}, 1), - finishCancel: make(chan struct{}, 1), - } -} - -func (t *fakeTransport) CancelRequest(*http.Request) { - t.startCancel <- struct{}{} -} - -type fakeAction struct{} - -func (a *fakeAction) HTTPRequest(url.URL) *http.Request { - return &http.Request{} -} - -func TestSimpleHTTPClientDoSuccess(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.respchan <- &http.Response{ - StatusCode: http.StatusTeapot, - Body: io.NopCloser(strings.NewReader("foo")), - } - - resp, body, err := c.Do(context.Background(), &fakeAction{}) - if err != nil { - t.Fatalf("incorrect error value: want=nil got=%v", err) - } - - wantCode := http.StatusTeapot - if wantCode != resp.StatusCode { - t.Fatalf("invalid response code: want=%d got=%d", wantCode, resp.StatusCode) - } - - wantBody := []byte("foo") - if !reflect.DeepEqual(wantBody, body) { - t.Fatalf("invalid response body: want=%q got=%q", wantBody, body) - } -} - -func TestSimpleHTTPClientDoError(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.errchan <- errors.New("fixture") - - _, _, err := c.Do(context.Background(), &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } -} - -type nilAction struct{} - -func (a *nilAction) HTTPRequest(url.URL) *http.Request { - return nil -} - -func TestSimpleHTTPClientDoNilRequest(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.errchan <- errors.New("fixture") - - _, _, err := c.Do(context.Background(), &nilAction{}) - if err != ErrNoRequest { - t.Fatalf("expected non-nil error, got nil") - } -} - -func TestSimpleHTTPClientDoCancelContext(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - tr.startCancel <- struct{}{} - tr.finishCancel <- struct{}{} - - _, _, err := c.Do(context.Background(), &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } -} - -type checkableReadCloser struct { - io.ReadCloser - closed bool -} - -func (c *checkableReadCloser) Close() error { - if !c.closed { - c.closed = true - return c.ReadCloser.Close() - } - return nil -} - -func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - // create an already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - body := &checkableReadCloser{ReadCloser: io.NopCloser(strings.NewReader("foo"))} - go func() { - // wait that simpleHTTPClient knows the context is already timed out, - // and calls CancelRequest - testutil.WaitSchedule() - - // response is returned before cancel effects - tr.respchan <- &http.Response{Body: body} - }() - - _, _, err := c.Do(ctx, &fakeAction{}) - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } - - if !body.closed { - t.Fatalf("expected closed body") - } -} - -type blockingBody struct { - c chan struct{} -} - -func (bb *blockingBody) Read(p []byte) (n int, err error) { - <-bb.c - return 0, errors.New("closed") -} - -func (bb *blockingBody) Close() error { - close(bb.c) - return nil -} - -func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - ctx, cancel := context.WithCancel(context.Background()) - body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}} - go func() { - tr.respchan <- &http.Response{Body: body} - time.Sleep(2 * time.Millisecond) - // cancel after the body is received - cancel() - }() - - _, _, err := c.Do(ctx, &fakeAction{}) - if err != context.Canceled { - t.Fatalf("expected %+v, got %+v", context.Canceled, err) - } - - if !body.closed { - t.Fatalf("expected closed body") - } -} - -func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { - tr := newFakeTransport() - c := &simpleHTTPClient{transport: tr} - - donechan := make(chan struct{}) - ctx, cancel := context.WithCancel(context.Background()) - go func() { - c.Do(ctx, &fakeAction{}) - close(donechan) - }() - - // This should call CancelRequest and begin the cancellation process - cancel() - - select { - case <-donechan: - t.Fatalf("simpleHTTPClient.Do should not have exited yet") - default: - } - - tr.finishCancel <- struct{}{} - - select { - case <-donechan: - //expected behavior - return - case <-time.After(time.Second): - t.Fatalf("simpleHTTPClient.Do did not exit within 1s") - } -} - -func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) { - tr := newFakeTransport() - tr.finishCancel <- struct{}{} - c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond} - - errc := make(chan error, 1) - go func() { - _, _, err := c.Do(context.Background(), &fakeAction{}) - errc <- err - }() - - select { - case err := <-errc: - if err == nil { - t.Fatalf("expected non-nil error, got nil") - } - case <-time.After(time.Second): - t.Fatalf("unexpected timeout when waiting for the test to finish") - } -} - -func TestHTTPClusterClientDo(t *testing.T) { - fakeErr := errors.New("fake!") - fakeURL := url.URL{} - tests := []struct { - client *httpClusterClient - ctx context.Context - - wantCode int - wantErr error - wantPinned int - }{ - // first good response short-circuits Do - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {resp: http.Response{StatusCode: http.StatusTeapot}}, - {err: fakeErr}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - }, - - // fall through to good endpoint if err is arbitrary - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: fakeErr}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - wantPinned: 1, - }, - - // context.Canceled short-circuits Do - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: context.Canceled}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: context.Canceled, - }, - - // return err if there are no endpoints - { - client: &httpClusterClient{ - endpoints: []url.URL{}, - clientFactory: newHTTPClientFactory(nil, nil, 0), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: ErrNoEndpoints, - }, - - // return err if all endpoints return arbitrary errors - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {err: fakeErr}, - {err: fakeErr}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}}, - }, - - // 500-level errors cause Do to fallthrough to next endpoint - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {resp: http.Response{StatusCode: http.StatusBadGateway}}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - wantCode: http.StatusTeapot, - wantPinned: 1, - }, - - // 500-level errors cause one shot Do to fallthrough to next endpoint - { - client: &httpClusterClient{ - endpoints: []url.URL{fakeURL, fakeURL}, - clientFactory: newStaticHTTPClientFactory( - []staticHTTPResponse{ - {resp: http.Response{StatusCode: http.StatusBadGateway}}, - {resp: http.Response{StatusCode: http.StatusTeapot}}, - }, - ), - rand: rand.New(rand.NewSource(0)), - }, - ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue), - wantErr: errors.New("client: etcd member returns server error [Bad Gateway]"), - wantPinned: 1, - }, - } - - for i, tt := range tests { - if tt.ctx == nil { - tt.ctx = context.Background() - } - resp, _, err := tt.client.Do(tt.ctx, nil) - if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) { - t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) - continue - } - - if resp == nil { - if tt.wantCode != 0 { - t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) - continue - } - } else if resp.StatusCode != tt.wantCode { - t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) - continue - } - - if tt.client.pinned != tt.wantPinned { - t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned) - } - } -} - -func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) { - fakeURL := url.URL{} - tr := newFakeTransport() - tr.finishCancel <- struct{}{} - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0), - endpoints: []url.URL{fakeURL}, - } - - errc := make(chan error, 1) - go func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - _, _, err := c.Do(ctx, &fakeAction{}) - errc <- err - }() - - select { - case err := <-errc: - if err != context.DeadlineExceeded { - t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded) - } - case <-time.After(time.Second): - t.Fatalf("unexpected timeout when waiting for request to deadline exceed") - } -} - -type fakeCancelContext struct{} - -var errFakeCancelContext = errors.New("fake context canceled") - -func (f fakeCancelContext) Deadline() (time.Time, bool) { return time.Time{}, false } -func (f fakeCancelContext) Done() <-chan struct{} { - d := make(chan struct{}, 1) - d <- struct{}{} - return d -} -func (f fakeCancelContext) Err() error { return errFakeCancelContext } -func (f fakeCancelContext) Value(key interface{}) interface{} { return 1 } - -func withTimeout(parent context.Context, timeout time.Duration) ( - ctx context.Context, - cancel context.CancelFunc) { - ctx = parent - cancel = func() { - ctx = nil - } - return ctx, cancel -} - -func TestHTTPClusterClientDoCanceledContext(t *testing.T) { - fakeURL := url.URL{} - tr := newFakeTransport() - tr.finishCancel <- struct{}{} - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0), - endpoints: []url.URL{fakeURL}, - } - - errc := make(chan error, 1) - go func() { - ctx, cancel := withTimeout(fakeCancelContext{}, time.Millisecond) - cancel() - _, _, err := c.Do(ctx, &fakeAction{}) - errc <- err - }() - - select { - case err := <-errc: - if err != errFakeCancelContext { - t.Errorf("err = %+v, want %+v", err, errFakeCancelContext) - } - case <-time.After(time.Second): - t.Fatalf("unexpected timeout when waiting for request to fake context canceled") - } -} - -func TestRedirectedHTTPAction(t *testing.T) { - act := &redirectedHTTPAction{ - action: &staticHTTPAction{ - request: http.Request{ - Method: "DELETE", - URL: &url.URL{ - Scheme: "https", - Host: "foo.example.com", - Path: "/ping", - }, - }, - }, - location: url.URL{ - Scheme: "https", - Host: "bar.example.com", - Path: "/pong", - }, - } - - want := &http.Request{ - Method: "DELETE", - URL: &url.URL{ - Scheme: "https", - Host: "bar.example.com", - Path: "/pong", - }, - } - got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"}) - - if !reflect.DeepEqual(want, got) { - t.Fatalf("HTTPRequest is %#v, want %#v", want, got) - } -} - -func TestRedirectFollowingHTTPClient(t *testing.T) { - tests := []struct { - checkRedirect CheckRedirectFunc - client httpClient - wantCode int - wantErr error - }{ - // errors bubbled up - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - err: errors.New("fail!"), - }, - }, - }, - wantErr: errors.New("fail!"), - }, - - // no need to follow redirect if none given - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // redirects if less than max - { - checkRedirect: func(via int) error { - if via >= 2 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // succeed after reaching max redirects - { - checkRedirect: func(via int) error { - if via >= 3 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantCode: http.StatusTeapot, - }, - - // fail if too many redirects - { - checkRedirect: func(via int) error { - if via >= 2 { - return ErrTooManyRedirects - } - return nil - }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - { - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - }, - }, - wantErr: ErrTooManyRedirects, - }, - - // fail if Location header not set - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - }, - }, - }, - }, - wantErr: errors.New("location header not set"), - }, - - // fail if Location header is invalid - { - checkRedirect: func(int) error { return ErrTooManyRedirects }, - client: &multiStaticHTTPClient{ - responses: []staticHTTPResponse{ - { - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{":"}}, - }, - }, - }, - }, - wantErr: errors.New("location header not valid URL: :"), - }, - - // fail if redirects checked way too many times - { - checkRedirect: func(int) error { return nil }, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTemporaryRedirect, - Header: http.Header{"Location": []string{"http://example.com"}}, - }, - }, - wantErr: errTooManyRedirectChecks, - }, - } - - for i, tt := range tests { - client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect} - resp, _, err := client.Do(context.Background(), nil) - if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) { - t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) - continue - } - - if resp == nil { - if tt.wantCode != 0 { - t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) - } - continue - } - - if resp.StatusCode != tt.wantCode { - t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) - continue - } - } -} - -func TestDefaultCheckRedirect(t *testing.T) { - tests := []struct { - num int - err error - }{ - {0, nil}, - {5, nil}, - {10, nil}, - {11, ErrTooManyRedirects}, - {29, ErrTooManyRedirects}, - } - - for i, tt := range tests { - err := DefaultCheckRedirect(tt.num) - if !reflect.DeepEqual(tt.err, err) { - t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err) - } - } -} - -func TestHTTPClusterClientSync(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - want := []string{"http://127.0.0.1:2379"} - got := hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) - } - - err = hc.Sync(context.Background()) - if err != nil { - t.Fatalf("unexpected error during Sync: %#v", err) - } - - want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"} - got = hc.Endpoints() - sort.Strings(got) - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got) - } - - err = hc.SetEndpoints([]string{"http://127.0.0.1:4009"}) - if err != nil { - t.Fatalf("unexpected error during reset: %#v", err) - } - - want = []string{"http://127.0.0.1:4009"} - got = hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints post-reset: want=%#v got=%#v", want, got) - } -} - -func TestHTTPClusterClientSyncFail(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - {err: errors.New("fail!")}, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - want := []string{"http://127.0.0.1:2379"} - got := hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) - } - - err = hc.Sync(context.Background()) - if err == nil { - t.Fatalf("got nil error during Sync") - } - - got = hc.Endpoints() - if !reflect.DeepEqual(want, got) { - t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got) - } -} - -func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - err = hc.AutoSync(ctx, time.Hour) - if err != context.Canceled { - t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err) - } -} - -func TestHTTPClusterClientAutoSyncFail(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - {err: errors.New("fail!")}, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - err = hc.AutoSync(context.Background(), time.Hour) - if !strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()) { - t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err) - } -} - -func TestHTTPClusterClientGetVersion(t *testing.T) { - body := []byte(`{"etcdserver":"2.3.2","etcdcluster":"2.3.0"}`) - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Length": []string{"44"}}}, - body: body, - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - - actual, err := hc.GetVersion(context.Background()) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - expected := version.Versions{Server: "2.3.2", Cluster: "2.3.0"} - if !reflect.DeepEqual(&expected, actual) { - t.Errorf("incorrect Response: want=%#v got=%#v", expected, actual) - } -} - -// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when -// it gets the exactly same member list as before. -func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - pinnedEndpoint := hc.endpoints[hc.pinned] - - for i := 0; i < 3; i++ { - err = hc.Sync(context.Background()) - if err != nil { - t.Fatalf("#%d: unexpected error during Sync: %#v", i, err) - } - - if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint { - t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, pinnedEndpoint) - } - } -} - -// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when -// it gets a different member list than before. -func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - } - err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}) - if err != nil { - t.Fatalf("unexpected error during setup: %#v", err) - } - wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"} - - for i := 0; i < 3; i++ { - err = hc.Sync(context.Background()) - if err != nil { - t.Fatalf("#%d: unexpected error during Sync: %#v", i, err) - } - - if g := hc.endpoints[hc.pinned]; g.String() != wants[i] { - t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i]) - } - } -} - -// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader -// when the selection mode is EndpointSelectionPrioritizeLeader -func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) { - cf := newStaticHTTPClientFactory([]staticHTTPResponse{ - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - { - resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, - body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`), - }, - }) - - hc := &httpClusterClient{ - clientFactory: cf, - rand: rand.New(rand.NewSource(0)), - selectionMode: EndpointSelectionPrioritizeLeader, - endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially - } - - wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"} - - for i, want := range wants { - err := hc.Sync(context.Background()) - if err != nil { - t.Fatalf("#%d: unexpected error during Sync: %#v", i, err) - } - - pinned := hc.endpoints[hc.pinned].String() - if pinned != want { - t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want) - } - } -} - -func TestHTTPClusterClientResetFail(t *testing.T) { - tests := [][]string{ - // need at least one endpoint - {}, - - // urls must be valid - {":"}, - } - - for i, tt := range tests { - hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))} - err := hc.SetEndpoints(tt) - if err == nil { - t.Errorf("#%d: expected non-nil error", i) - } - } -} - -func TestHTTPClusterClientResetPinRandom(t *testing.T) { - round := 2000 - pinNum := 0 - for i := 0; i < round; i++ { - hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))} - err := hc.SetEndpoints([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}) - if err != nil { - t.Fatalf("#%d: reset error (%v)", i, err) - } - if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" { - pinNum++ - } - } - - min := 1.0/3.0 - 0.05 - max := 1.0/3.0 + 0.05 - if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min { - t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max) - } -} diff --git a/client/v2/curl.go b/client/v2/curl.go deleted file mode 100644 index 8d12367541e..00000000000 --- a/client/v2/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = io.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = io.NopCloser(body) - - return nil -} diff --git a/client/v2/discover.go b/client/v2/discover.go deleted file mode 100644 index 646ba5dada7..00000000000 --- a/client/v2/discover.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "go.etcd.io/etcd/client/pkg/v3/srv" -) - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string, serviceName string) ([]string, error) -} - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain, serviceName) - if err != nil { - return nil, err - } - return srvs.Endpoints, nil -} diff --git a/client/v2/doc.go b/client/v2/doc.go deleted file mode 100644 index 68284c20a89..00000000000 --- a/client/v2/doc.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - "context" - - "go.etcd.io/etcd/client/v2" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Clients are safe for concurrent use by multiple goroutines. - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring its previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } -*/ -package client diff --git a/client/v2/fake_transport_test.go b/client/v2/fake_transport_test.go deleted file mode 100644 index 7a725c17cb8..00000000000 --- a/client/v2/fake_transport_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "net/http" -) - -func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - select { - case resp := <-t.respchan: - return resp, nil - case err := <-t.errchan: - return nil, err - case <-t.startCancel: - case <-req.Cancel: - } - select { - // this simulates that the request is finished before cancel effects - case resp := <-t.respchan: - return resp, nil - // wait on finishCancel to simulate taking some amount of - // time while calling CancelRequest - case <-t.finishCancel: - return nil, errors.New("cancelled") - } -} diff --git a/client/v2/go.mod b/client/v2/go.mod deleted file mode 100644 index 4adaae9330f..00000000000 --- a/client/v2/go.mod +++ /dev/null @@ -1,32 +0,0 @@ -module go.etcd.io/etcd/client/v2 - -go 1.19 - -require ( - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 -) - -require ( - github.com/coreos/go-semver v0.3.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.8.1 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace ( - go.etcd.io/etcd/api/v3 => ../../api - go.etcd.io/etcd/client/pkg/v3 => ../pkg -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/pkg/v3 => ./FORBIDDED_DEPENDENCY - go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY -) diff --git a/client/v2/go.sum b/client/v2/go.sum deleted file mode 100644 index e18a9825d55..00000000000 --- a/client/v2/go.sum +++ /dev/null @@ -1,27 +0,0 @@ -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= diff --git a/client/v2/keys_bench_test.go b/client/v2/keys_bench_test.go deleted file mode 100644 index ff136033452..00000000000 --- a/client/v2/keys_bench_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/json" - "net/http" - "reflect" - "strings" - "testing" -) - -func createTestNode(size int) *Node { - return &Node{ - Key: strings.Repeat("a", 30), - Value: strings.Repeat("a", size), - CreatedIndex: 123456, - ModifiedIndex: 123456, - TTL: 123456789, - } -} - -func createTestNodeWithChildren(children, size int) *Node { - node := createTestNode(size) - for i := 0; i < children; i++ { - node.Nodes = append(node.Nodes, createTestNode(size)) - } - return node -} - -func createTestResponse(children, size int) *Response { - return &Response{ - Action: "aaaaa", - Node: createTestNodeWithChildren(children, size), - PrevNode: nil, - } -} - -func benchmarkResponseUnmarshalling(b *testing.B, children, size int) { - header := http.Header{} - header.Add("X-Etcd-Index", "123456") - response := createTestResponse(children, size) - body, err := json.Marshal(response) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - newResponse := new(Response) - for i := 0; i < b.N; i++ { - if newResponse, err = unmarshalSuccessfulKeysResponse(header, body); err != nil { - b.Errorf("error unmarshalling response (%v)", err) - } - - } - if !reflect.DeepEqual(response.Node, newResponse.Node) { - b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse) - } -} - -func BenchmarkSmallResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 30, 20) -} - -func BenchmarkManySmallResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 3000, 20) -} - -func BenchmarkMediumResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 300, 200) -} - -func BenchmarkLargeResponseUnmarshal(b *testing.B) { - benchmarkResponseUnmarshalling(b, 3000, 2000) -} diff --git a/client/v2/keys_test.go b/client/v2/keys_test.go deleted file mode 100644 index 05aeb3f7e10..00000000000 --- a/client/v2/keys_test.go +++ /dev/null @@ -1,1429 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "reflect" - "testing" - "time" -) - -func TestV2KeysURLHelper(t *testing.T) { - tests := []struct { - endpoint url.URL - prefix string - key string - want url.URL - }{ - // key is empty, no problem - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - }, - - // key is joined to path - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "/foo/bar", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys/foo/bar"}, - }, - - // key is joined to path when path is empty - { - endpoint: url.URL{Scheme: "http", Host: "example.com", Path: ""}, - prefix: "", - key: "/foo/bar", - want: url.URL{Scheme: "http", Host: "example.com", Path: "/foo/bar"}, - }, - - // Host field carries through with port - { - endpoint: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"}, - }, - - // Scheme carries through - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, - prefix: "", - key: "", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"}, - }, - // Prefix is applied - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "/baz", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz"}, - }, - // Prefix is joined to path - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar"}, - }, - // Keep trailing slash - { - endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"}, - prefix: "/bar", - key: "/baz/", - want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz/"}, - }, - } - - for i, tt := range tests { - got := v2KeysURL(tt.endpoint, tt.prefix, tt.key) - if tt.want != *got { - t.Errorf("#%d: want=%#v, got=%#v", i, tt.want, *got) - } - } -} - -func TestGetAction(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} - baseWantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/keys/foo/bar", - } - wantHeader := http.Header{} - - tests := []struct { - recursive bool - sorted bool - quorum bool - wantQuery string - }{ - { - recursive: false, - sorted: false, - quorum: false, - wantQuery: "quorum=false&recursive=false&sorted=false", - }, - { - recursive: true, - sorted: false, - quorum: false, - wantQuery: "quorum=false&recursive=true&sorted=false", - }, - { - recursive: false, - sorted: true, - quorum: false, - wantQuery: "quorum=false&recursive=false&sorted=true", - }, - { - recursive: true, - sorted: true, - quorum: false, - wantQuery: "quorum=false&recursive=true&sorted=true", - }, - { - recursive: false, - sorted: false, - quorum: true, - wantQuery: "quorum=true&recursive=false&sorted=false", - }, - } - - for i, tt := range tests { - f := getAction{ - Key: "/foo/bar", - Recursive: tt.recursive, - Sorted: tt.sorted, - Quorum: tt.quorum, - } - got := *f.HTTPRequest(ep) - - wantURL := baseWantURL - wantURL.RawQuery = tt.wantQuery - - err := assertRequest(got, "GET", wantURL, wantHeader, nil) - if err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestWaitAction(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"} - baseWantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/keys/foo/bar", - } - wantHeader := http.Header{} - - tests := []struct { - waitIndex uint64 - recursive bool - wantQuery string - }{ - { - recursive: false, - waitIndex: uint64(0), - wantQuery: "recursive=false&wait=true&waitIndex=0", - }, - { - recursive: false, - waitIndex: uint64(12), - wantQuery: "recursive=false&wait=true&waitIndex=12", - }, - { - recursive: true, - waitIndex: uint64(12), - wantQuery: "recursive=true&wait=true&waitIndex=12", - }, - } - - for i, tt := range tests { - f := waitAction{ - Key: "/foo/bar", - WaitIndex: tt.waitIndex, - Recursive: tt.recursive, - } - got := *f.HTTPRequest(ep) - - wantURL := baseWantURL - wantURL.RawQuery = tt.wantQuery - - err := assertRequest(got, "GET", wantURL, wantHeader, nil) - if err != nil { - t.Errorf("#%d: unexpected error: %#v", i, err) - } - } -} - -func TestSetAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act setAction - wantURL string - wantBody string - }{ - // default prefix - { - act: setAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - wantBody: "value=", - }, - - // non-default prefix - { - act: setAction{ - Prefix: "/pfx", - Key: "foo", - }, - wantURL: "http://example.com/pfx/foo", - wantBody: "value=", - }, - - // no prefix - { - act: setAction{ - Key: "foo", - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // Key with path separators - { - act: setAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - wantBody: "value=", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: setAction{ - Prefix: "/foo/", - Key: "/bar", - }, - wantURL: "http://example.com/foo/bar", - wantBody: "value=", - }, - - // Key with trailing slash - { - act: setAction{ - Key: "/foo/", - }, - wantURL: "http://example.com/foo/", - wantBody: "value=", - }, - - // Value is set - { - act: setAction{ - Key: "foo", - Value: "baz", - }, - wantURL: "http://example.com/foo", - wantBody: "value=baz", - }, - - // PrevExist set, but still ignored - { - act: setAction{ - Key: "foo", - PrevExist: PrevIgnore, - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // PrevExist set to true - { - act: setAction{ - Key: "foo", - PrevExist: PrevExist, - }, - wantURL: "http://example.com/foo?prevExist=true", - wantBody: "value=", - }, - - // PrevExist set to false - { - act: setAction{ - Key: "foo", - PrevExist: PrevNoExist, - }, - wantURL: "http://example.com/foo?prevExist=false", - wantBody: "value=", - }, - - // PrevValue is urlencoded - { - act: setAction{ - Key: "foo", - PrevValue: "bar baz", - }, - wantURL: "http://example.com/foo?prevValue=bar+baz", - wantBody: "value=", - }, - - // PrevIndex is set - { - act: setAction{ - Key: "foo", - PrevIndex: uint64(12), - }, - wantURL: "http://example.com/foo?prevIndex=12", - wantBody: "value=", - }, - - // TTL is set - { - act: setAction{ - Key: "foo", - TTL: 3 * time.Minute, - }, - wantURL: "http://example.com/foo", - wantBody: "ttl=180&value=", - }, - - // Refresh is set - { - act: setAction{ - Key: "foo", - TTL: 3 * time.Minute, - Refresh: true, - }, - wantURL: "http://example.com/foo", - wantBody: "refresh=true&ttl=180&value=", - }, - - // Dir is set - { - act: setAction{ - Key: "foo", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - // Dir is set with a value - { - act: setAction{ - Key: "foo", - Value: "bar", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - // Dir is set with PrevExist set to true - { - act: setAction{ - Key: "foo", - PrevExist: PrevExist, - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true&prevExist=true", - wantBody: "", - }, - // Dir is set with PrevValue - { - act: setAction{ - Key: "foo", - PrevValue: "bar", - Dir: true, - }, - wantURL: "http://example.com/foo?dir=true", - wantBody: "", - }, - // NoValueOnSuccess is set - { - act: setAction{ - Key: "foo", - NoValueOnSuccess: true, - }, - wantURL: "http://example.com/foo?noValueOnSuccess=true", - wantBody: "value=", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "PUT", u, wantHeader, []byte(tt.wantBody)); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestCreateInOrderAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act createInOrderAction - wantURL string - wantBody string - }{ - // default prefix - { - act: createInOrderAction{ - Prefix: defaultV2KeysPrefix, - Dir: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - wantBody: "value=", - }, - - // non-default prefix - { - act: createInOrderAction{ - Prefix: "/pfx", - Dir: "foo", - }, - wantURL: "http://example.com/pfx/foo", - wantBody: "value=", - }, - - // no prefix - { - act: createInOrderAction{ - Dir: "foo", - }, - wantURL: "http://example.com/foo", - wantBody: "value=", - }, - - // Key with path separators - { - act: createInOrderAction{ - Prefix: defaultV2KeysPrefix, - Dir: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - wantBody: "value=", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: createInOrderAction{ - Prefix: "/foo/", - Dir: "/bar", - }, - wantURL: "http://example.com/foo/bar", - wantBody: "value=", - }, - - // Key with trailing slash - { - act: createInOrderAction{ - Dir: "/foo/", - }, - wantURL: "http://example.com/foo/", - wantBody: "value=", - }, - - // Value is set - { - act: createInOrderAction{ - Dir: "foo", - Value: "baz", - }, - wantURL: "http://example.com/foo", - wantBody: "value=baz", - }, - // TTL is set - { - act: createInOrderAction{ - Dir: "foo", - TTL: 3 * time.Minute, - }, - wantURL: "http://example.com/foo", - wantBody: "ttl=180&value=", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "POST", u, wantHeader, []byte(tt.wantBody)); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func TestDeleteAction(t *testing.T) { - wantHeader := http.Header(map[string][]string{ - "Content-Type": {"application/x-www-form-urlencoded"}, - }) - - tests := []struct { - act deleteAction - wantURL string - }{ - // default prefix - { - act: deleteAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo", - }, - wantURL: "http://example.com/v2/keys/foo", - }, - - // non-default prefix - { - act: deleteAction{ - Prefix: "/pfx", - Key: "foo", - }, - wantURL: "http://example.com/pfx/foo", - }, - - // no prefix - { - act: deleteAction{ - Key: "foo", - }, - wantURL: "http://example.com/foo", - }, - - // Key with path separators - { - act: deleteAction{ - Prefix: defaultV2KeysPrefix, - Key: "foo/bar/baz", - }, - wantURL: "http://example.com/v2/keys/foo/bar/baz", - }, - - // Key with leading slash, Prefix with trailing slash - { - act: deleteAction{ - Prefix: "/foo/", - Key: "/bar", - }, - wantURL: "http://example.com/foo/bar", - }, - - // Key with trailing slash - { - act: deleteAction{ - Key: "/foo/", - }, - wantURL: "http://example.com/foo/", - }, - - // Recursive set to true - { - act: deleteAction{ - Key: "foo", - Recursive: true, - }, - wantURL: "http://example.com/foo?recursive=true", - }, - - // PrevValue is urlencoded - { - act: deleteAction{ - Key: "foo", - PrevValue: "bar baz", - }, - wantURL: "http://example.com/foo?prevValue=bar+baz", - }, - - // PrevIndex is set - { - act: deleteAction{ - Key: "foo", - PrevIndex: uint64(12), - }, - wantURL: "http://example.com/foo?prevIndex=12", - }, - } - - for i, tt := range tests { - u, err := url.Parse(tt.wantURL) - if err != nil { - t.Errorf("#%d: unable to use wantURL fixture: %v", i, err) - } - - got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"}) - if err := assertRequest(*got, "DELETE", u, wantHeader, nil); err != nil { - t.Errorf("#%d: %v", i, err) - } - } -} - -func assertRequest(got http.Request, wantMethod string, wantURL *url.URL, wantHeader http.Header, wantBody []byte) error { - if wantMethod != got.Method { - return fmt.Errorf("want.Method=%#v got.Method=%#v", wantMethod, got.Method) - } - - if !reflect.DeepEqual(wantURL, got.URL) { - return fmt.Errorf("want.URL=%#v got.URL=%#v", wantURL, got.URL) - } - - if !reflect.DeepEqual(wantHeader, got.Header) { - return fmt.Errorf("want.Header=%#v got.Header=%#v", wantHeader, got.Header) - } - - if got.Body == nil { - if wantBody != nil { - return fmt.Errorf("want.Body=%v got.Body=%v", wantBody, got.Body) - } - } else { - if wantBody == nil { - return fmt.Errorf("want.Body=%v got.Body=%s", wantBody, got.Body) - } - gotBytes, err := io.ReadAll(got.Body) - if err != nil { - return err - } - - if !reflect.DeepEqual(wantBody, gotBytes) { - return fmt.Errorf("want.Body=%s got.Body=%s", wantBody, gotBytes) - } - } - - return nil -} - -func TestUnmarshalSuccessfulResponse(t *testing.T) { - var expiration time.Time - expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z")) - - tests := []struct { - indexHdr string - clusterIDHdr string - body string - wantRes *Response - wantErr bool - }{ - // Neither PrevNode or Node - { - indexHdr: "1", - body: `{"action":"delete"}`, - wantRes: &Response{Action: "delete", Index: 1}, - wantErr: false, - }, - - // PrevNode - { - indexHdr: "15", - body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "delete", - Index: 15, - Node: nil, - PrevNode: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - }, - }, - wantErr: false, - }, - - // Node - { - indexHdr: "15", - body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`, - wantRes: &Response{ - Action: "get", - Index: 15, - Node: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - TTL: 10, - Expiration: &expiration, - }, - PrevNode: nil, - }, - wantErr: false, - }, - - // Node Dir - { - indexHdr: "15", - clusterIDHdr: "abcdef", - body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "get", - Index: 15, - Node: &Node{ - Key: "/foo", - Dir: true, - ModifiedIndex: 12, - CreatedIndex: 10, - }, - PrevNode: nil, - ClusterID: "abcdef", - }, - wantErr: false, - }, - - // PrevNode and Node - { - indexHdr: "15", - body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`, - wantRes: &Response{ - Action: "update", - Index: 15, - PrevNode: &Node{ - Key: "/foo", - Value: "baz", - ModifiedIndex: 10, - CreatedIndex: 10, - }, - Node: &Node{ - Key: "/foo", - Value: "bar", - ModifiedIndex: 12, - CreatedIndex: 10, - }, - }, - wantErr: false, - }, - - // Garbage in body - { - indexHdr: "", - body: `garbage`, - wantRes: nil, - wantErr: true, - }, - - // non-integer index - { - indexHdr: "poo", - body: `{}`, - wantRes: nil, - wantErr: true, - }, - } - - for i, tt := range tests { - h := make(http.Header) - h.Add("X-Etcd-Index", tt.indexHdr) - res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body)) - if tt.wantErr != (err != nil) { - t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err) - } - - if (res == nil) != (tt.wantRes == nil) { - t.Errorf("#%d: received res=%#v, but expected res=%#v", i, res, tt.wantRes) - continue - } else if tt.wantRes == nil { - // expected and successfully got nil response - continue - } - - if res.Action != tt.wantRes.Action { - t.Errorf("#%d: Action=%s, expected %s", i, res.Action, tt.wantRes.Action) - } - if res.Index != tt.wantRes.Index { - t.Errorf("#%d: Index=%d, expected %d", i, res.Index, tt.wantRes.Index) - } - if !reflect.DeepEqual(res.Node, tt.wantRes.Node) { - t.Errorf("#%d: Node=%v, expected %v", i, res.Node, tt.wantRes.Node) - } - } -} - -func TestUnmarshalFailedKeysResponse(t *testing.T) { - body := []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`) - - wantErr := Error{ - Code: 100, - Message: "Key not found", - Cause: "/foo", - Index: uint64(18), - } - - gotErr := unmarshalFailedKeysResponse(body) - if !reflect.DeepEqual(wantErr, gotErr) { - t.Errorf("unexpected error: want=%#v got=%#v", wantErr, gotErr) - } -} - -func TestUnmarshalFailedKeysResponseBadJSON(t *testing.T) { - err := unmarshalFailedKeysResponse([]byte(`{"er`)) - if err == nil { - t.Errorf("got nil error") - } else if _, ok := err.(Error); ok { - t.Errorf("error is of incorrect type *Error: %#v", err) - } -} - -func TestHTTPWatcherNextWaitAction(t *testing.T) { - initAction := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 19, - } - - client := &actionAssertingHTTPClient{ - t: t, - act: &initAction, - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"42"}}, - }, - body: []byte(`{"action":"update","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "update", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(21)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(42), - } - - wantNextWait := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 22, - } - - watcher := &httpWatcher{ - client: client, - nextWait: initAction, - } - - resp, err := watcher.Next(context.Background()) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("received incorrect Response: want=%#v got=%#v", wantResponse, resp) - } - - if !reflect.DeepEqual(wantNextWait, watcher.nextWait) { - t.Errorf("nextWait incorrect: want=%#v got=%#v", wantNextWait, watcher.nextWait) - } -} - -func TestHTTPWatcherNextFail(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusNotFound, - }, - body: []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - act := waitAction{ - Prefix: "/pants", - Key: "/foo/bar", - Recursive: true, - WaitIndex: 19, - } - - watcher := &httpWatcher{ - client: tt, - nextWait: act, - } - - resp, err := watcher.Next(context.Background()) - if err == nil { - t.Errorf("#%d: expected non-nil error", i) - } - if resp != nil { - t.Errorf("#%d: expected nil Response, got %#v", i, resp) - } - if !reflect.DeepEqual(act, watcher.nextWait) { - t.Errorf("#%d: nextWait changed: want=%#v got=%#v", i, act, watcher.nextWait) - } - } -} - -func TestHTTPKeysAPIWatcherAction(t *testing.T) { - tests := []struct { - key string - opts *WatcherOptions - want waitAction - }{ - { - key: "/foo", - opts: nil, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: false, - AfterIndex: 0, - }, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: true, - AfterIndex: 0, - }, - want: waitAction{ - Key: "/foo", - Recursive: true, - WaitIndex: 0, - }, - }, - - { - key: "/foo", - opts: &WatcherOptions{ - Recursive: false, - AfterIndex: 19, - }, - want: waitAction{ - Key: "/foo", - Recursive: false, - WaitIndex: 20, - }, - }, - } - - for i, tt := range tests { - testError := errors.New("fail!") - kAPI := &httpKeysAPI{ - client: &staticHTTPClient{err: testError}, - } - - want := &httpWatcher{ - client: &staticHTTPClient{err: testError}, - nextWait: tt.want, - } - - got := kAPI.Watcher(tt.key, tt.opts) - if !reflect.DeepEqual(want, got) { - t.Errorf("#%d: incorrect watcher: want=%#v got=%#v", i, want, got) - } - } -} - -func TestHTTPKeysAPISetAction(t *testing.T) { - tests := []struct { - key string - value string - opts *SetOptions - wantAction httpAction - }{ - // nil SetOptions - { - key: "/foo", - value: "bar", - opts: nil, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "", - PrevIndex: 0, - PrevExist: PrevIgnore, - TTL: 0, - }, - }, - // empty SetOptions - { - key: "/foo", - value: "bar", - opts: &SetOptions{}, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "", - PrevIndex: 0, - PrevExist: PrevIgnore, - TTL: 0, - }, - }, - // populated SetOptions - { - key: "/foo", - value: "bar", - opts: &SetOptions{ - PrevValue: "baz", - PrevIndex: 13, - PrevExist: PrevExist, - TTL: time.Minute, - Dir: true, - }, - wantAction: &setAction{ - Key: "/foo", - Value: "bar", - PrevValue: "baz", - PrevIndex: 13, - PrevExist: PrevExist, - TTL: time.Minute, - Dir: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Set(context.Background(), tt.key, tt.value, tt.opts) - } -} - -func TestHTTPKeysAPISetError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Set(context.Background(), "/foo", "bar", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPISetResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"21"}}, - }, - body: []byte(`{"action":"set","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":21},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "set", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(21), ModifiedIndex: uint64(21)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(21), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Set(context.Background(), "/foo/bar/baz", "snarf", nil) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPIGetAction(t *testing.T) { - tests := []struct { - key string - opts *GetOptions - wantAction httpAction - }{ - // nil GetOptions - { - key: "/foo", - opts: nil, - wantAction: &getAction{ - Key: "/foo", - Sorted: false, - Recursive: false, - }, - }, - // empty GetOptions - { - key: "/foo", - opts: &GetOptions{}, - wantAction: &getAction{ - Key: "/foo", - Sorted: false, - Recursive: false, - }, - }, - // populated GetOptions - { - key: "/foo", - opts: &GetOptions{ - Sort: true, - Recursive: true, - Quorum: true, - }, - wantAction: &getAction{ - Key: "/foo", - Sorted: true, - Recursive: true, - Quorum: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Get(context.Background(), tt.key, tt.opts) - } -} - -func TestHTTPKeysAPIGetError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Get(context.Background(), "/foo", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPIGetResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"42"}}, - }, - body: []byte(`{"action":"get","node":{"key":"/pants/foo/bar","modifiedIndex":25,"createdIndex":19,"nodes":[{"key":"/pants/foo/bar/baz","value":"snarf","createdIndex":21,"modifiedIndex":25}]}}`), - } - - wantResponse := &Response{ - Action: "get", - Node: &Node{ - Key: "/pants/foo/bar", - Nodes: []*Node{ - {Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: 21, ModifiedIndex: 25}, - }, - CreatedIndex: uint64(19), - ModifiedIndex: uint64(25), - }, - Index: uint64(42), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Get(context.Background(), "/foo/bar", &GetOptions{Recursive: true}) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPIDeleteAction(t *testing.T) { - tests := []struct { - key string - opts *DeleteOptions - wantAction httpAction - }{ - // nil DeleteOptions - { - key: "/foo", - opts: nil, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "", - PrevIndex: 0, - Recursive: false, - }, - }, - // empty DeleteOptions - { - key: "/foo", - opts: &DeleteOptions{}, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "", - PrevIndex: 0, - Recursive: false, - }, - }, - // populated DeleteOptions - { - key: "/foo", - opts: &DeleteOptions{ - PrevValue: "baz", - PrevIndex: 13, - Recursive: true, - }, - wantAction: &deleteAction{ - Key: "/foo", - PrevValue: "baz", - PrevIndex: 13, - Recursive: true, - }, - }, - } - - for i, tt := range tests { - client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction} - kAPI := httpKeysAPI{client: client} - kAPI.Delete(context.Background(), tt.key, tt.opts) - } -} - -func TestHTTPKeysAPIDeleteError(t *testing.T) { - tests := []httpClient{ - // generic HTTP client failure - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unusable status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusTeapot, - }, - }, - - // etcd Error response - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), - }, - } - - for i, tt := range tests { - kAPI := httpKeysAPI{client: tt} - resp, err := kAPI.Delete(context.Background(), "/foo", nil) - if err == nil { - t.Errorf("#%d: received nil error", i) - } - if resp != nil { - t.Errorf("#%d: received non-nil Response: %#v", i, resp) - } - } -} - -func TestHTTPKeysAPIDeleteResponse(t *testing.T) { - client := &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - Header: http.Header{"X-Etcd-Index": []string{"22"}}, - }, - body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), - } - - wantResponse := &Response{ - Action: "delete", - Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)}, - PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, - Index: uint64(22), - } - - kAPI := &httpKeysAPI{client: client, prefix: "/pants"} - resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil) - if err != nil { - t.Errorf("non-nil error: %#v", err) - } - if !reflect.DeepEqual(wantResponse, resp) { - t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) - } -} - -func TestHTTPKeysAPICreateAction(t *testing.T) { - act := &setAction{ - Key: "/foo", - Value: "bar", - PrevExist: PrevNoExist, - PrevIndex: 0, - PrevValue: "", - TTL: 0, - } - - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.Create(context.Background(), "/foo", "bar") -} - -func TestHTTPKeysAPICreateInOrderAction(t *testing.T) { - act := &createInOrderAction{ - Dir: "/foo", - Value: "bar", - TTL: 0, - } - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.CreateInOrder(context.Background(), "/foo", "bar", nil) -} - -func TestHTTPKeysAPIUpdateAction(t *testing.T) { - act := &setAction{ - Key: "/foo", - Value: "bar", - PrevExist: PrevExist, - PrevIndex: 0, - PrevValue: "", - TTL: 0, - } - - kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}} - kAPI.Update(context.Background(), "/foo", "bar") -} - -func TestNodeTTLDuration(t *testing.T) { - tests := []struct { - node *Node - want time.Duration - }{ - { - node: &Node{TTL: 0}, - want: 0, - }, - { - node: &Node{TTL: 97}, - want: 97 * time.Second, - }, - } - - for i, tt := range tests { - got := tt.node.TTLDuration() - if tt.want != got { - t.Errorf("#%d: incorrect duration: want=%v got=%v", i, tt.want, got) - } - } -} diff --git a/client/v2/main_test.go b/client/v2/main_test.go deleted file mode 100644 index 2a0195aadd6..00000000000 --- a/client/v2/main_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client_test - -import ( - "net/http" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func exampleEndpoints() []string { return nil } -func exampleTransport() *http.Transport { return nil } - -func forUnitTestsRunInMockedContext(mocking func(), example func()) { - mocking() - // TODO: Call 'example' when mocking() provides realistic mocking of transport. - - // The real testing logic of examples gets executed - // as part of ./tests/integration/client/example/... -} - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/client/v2/members_test.go b/client/v2/members_test.go deleted file mode 100644 index ecea78096c8..00000000000 --- a/client/v2/members_test.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "encoding/json" - "errors" - "net/http" - "net/url" - "reflect" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -func TestMembersAPIActionList(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionList{} - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members", - } - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "GET", wantURL, http.Header{}, nil) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionAdd(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionAdd{ - peerURLs: types.URLs([]url.URL{ - {Scheme: "https", Host: "127.0.0.1:8081"}, - {Scheme: "http", Host: "127.0.0.1:8080"}, - }), - } - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members", - } - wantHeader := http.Header{ - "Content-Type": []string{"application/json"}, - } - wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "POST", wantURL, wantHeader, wantBody) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionUpdate(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionUpdate{ - memberID: "0xabcd", - peerURLs: types.URLs([]url.URL{ - {Scheme: "https", Host: "127.0.0.1:8081"}, - {Scheme: "http", Host: "127.0.0.1:8080"}, - }), - } - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members/0xabcd", - } - wantHeader := http.Header{ - "Content-Type": []string{"application/json"}, - } - wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`) - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "PUT", wantURL, wantHeader, wantBody) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionRemove(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionRemove{memberID: "XXX"} - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members/XXX", - } - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "DELETE", wantURL, http.Header{}, nil) - if err != nil { - t.Error(err.Error()) - } -} - -func TestMembersAPIActionLeader(t *testing.T) { - ep := url.URL{Scheme: "http", Host: "example.com"} - act := &membersAPIActionLeader{} - - wantURL := &url.URL{ - Scheme: "http", - Host: "example.com", - Path: "/v2/members/leader", - } - - got := *act.HTTPRequest(ep) - err := assertRequest(got, "GET", wantURL, http.Header{}, nil) - if err != nil { - t.Error(err.Error()) - } -} - -func TestAssertStatusCode(t *testing.T) { - if err := assertStatusCode(404, 400); err == nil { - t.Errorf("assertStatusCode failed to detect conflict in 400 vs 404") - } - - if err := assertStatusCode(404, 400, 404); err != nil { - t.Errorf("assertStatusCode found conflict in (404,400) vs 400: %v", err) - } -} - -func TestV2MembersURL(t *testing.T) { - got := v2MembersURL(url.URL{ - Scheme: "http", - Host: "foo.example.com:4002", - Path: "/pants", - }) - want := &url.URL{ - Scheme: "http", - Host: "foo.example.com:4002", - Path: "/pants/v2/members", - } - - if !reflect.DeepEqual(want, got) { - t.Fatalf("v2MembersURL got %#v, want %#v", got, want) - } -} - -func TestMemberUnmarshal(t *testing.T) { - tests := []struct { - body []byte - wantMember Member - wantError bool - }{ - // no URLs, just check ID & Name - { - body: []byte(`{"id": "c", "name": "dungarees"}`), - wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil}, - }, - - // both client and peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:2379", - }, - ClientURLs: []string{ - "http://127.0.0.1:2379", - }, - }, - }, - - // multiple peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:2379", - "https://example.com", - }, - ClientURLs: nil, - }, - }, - - // multiple client URLs - { - body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: nil, - ClientURLs: []string{ - "http://127.0.0.1:2379", - "https://example.com", - }, - }, - }, - - // invalid JSON - { - body: []byte(`{"peerU`), - wantError: true, - }, - } - - for i, tt := range tests { - got := Member{} - err := json.Unmarshal(tt.body, &got) - if tt.wantError != (err != nil) { - t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err) - continue - } - - if !reflect.DeepEqual(tt.wantMember, got) { - t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got) - } - } -} - -func TestMemberCollectionUnmarshalFail(t *testing.T) { - mc := &memberCollection{} - if err := mc.UnmarshalJSON([]byte(`{`)); err == nil { - t.Errorf("got nil error") - } -} - -func TestMemberCollectionUnmarshal(t *testing.T) { - tests := []struct { - body []byte - want memberCollection - }{ - { - body: []byte(`{}`), - want: memberCollection([]Member{}), - }, - { - body: []byte(`{"members":[]}`), - want: memberCollection([]Member{}), - }, - { - body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), - want: memberCollection( - []Member{ - { - ID: "2745e2525fce8fe", - Name: "node3", - PeerURLs: []string{ - "http://127.0.0.1:7003", - }, - ClientURLs: []string{ - "http://127.0.0.1:4003", - }, - }, - { - ID: "42134f434382925", - Name: "node1", - PeerURLs: []string{ - "http://127.0.0.1:2380", - "http://127.0.0.1:7001", - }, - ClientURLs: []string{ - "http://127.0.0.1:2379", - "http://127.0.0.1:4001", - }, - }, - { - ID: "94088180e21eb87b", - Name: "node2", - PeerURLs: []string{ - "http://127.0.0.1:7002", - }, - ClientURLs: []string{ - "http://127.0.0.1:4002", - }, - }, - }, - ), - }, - } - - for i, tt := range tests { - var got memberCollection - err := json.Unmarshal(tt.body, &got) - if err != nil { - t.Errorf("#%d: unexpected error: %v", i, err) - continue - } - - if !reflect.DeepEqual(tt.want, got) { - t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got) - } - } -} - -func TestMemberCreateRequestMarshal(t *testing.T) { - req := memberCreateOrUpdateRequest{ - PeerURLs: types.URLs([]url.URL{ - {Scheme: "http", Host: "127.0.0.1:8081"}, - {Scheme: "https", Host: "127.0.0.1:8080"}, - }), - } - want := []byte(`{"peerURLs":["http://127.0.0.1:8081","https://127.0.0.1:8080"]}`) - - got, err := json.Marshal(&req) - if err != nil { - t.Fatalf("Marshal returned unexpected err=%v", err) - } - - if !reflect.DeepEqual(want, got) { - t.Fatalf("Failed to marshal memberCreateRequest: want=%s, got=%s", want, got) - } -} - -func TestHTTPMembersAPIAddSuccess(t *testing.T) { - wantAction := &membersAPIActionAdd{ - peerURLs: types.URLs([]url.URL{ - {Scheme: "http", Host: "127.0.0.1:7002"}, - }), - } - - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusCreated, - }, - body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`), - }, - } - - wantResponseMember := &Member{ - ID: "94088180e21eb87b", - PeerURLs: []string{"http://127.0.0.1:7002"}, - } - - m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002") - if err != nil { - t.Errorf("got non-nil err: %#v", err) - } - if !reflect.DeepEqual(wantResponseMember, m) { - t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m) - } -} - -func TestHTTPMembersAPIAddError(t *testing.T) { - okPeer := "http://example.com:2379" - tests := []struct { - peerURL string - client httpClient - - // if wantErr == nil, assert that the returned error is non-nil - // if wantErr != nil, assert that the returned error matches - wantErr error - }{ - // malformed peer URL - { - peerURL: ":", - }, - - // generic httpClient failure - { - peerURL: okPeer, - client: &staticHTTPClient{err: errors.New("fail!")}, - }, - - // unrecognized HTTP status code - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{StatusCode: http.StatusTeapot}, - }, - }, - - // unmarshal body into membersError on StatusConflict - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusConflict, - }, - body: []byte(`{"message":"fail!"}`), - }, - wantErr: membersError{Message: "fail!"}, - }, - - // fail to unmarshal body on StatusConflict - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusConflict, - }, - body: []byte(`{"`), - }, - }, - - // fail to unmarshal body on StatusCreated - { - peerURL: okPeer, - client: &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusCreated, - }, - body: []byte(`{"id":"XX`), - }, - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt.client} - m, err := mAPI.Add(context.Background(), tt.peerURL) - if err == nil { - t.Errorf("#%d: got nil err", i) - } - if tt.wantErr != nil && !reflect.DeepEqual(tt.wantErr, err) { - t.Errorf("#%d: incorrect error: want=%#v got=%#v", i, tt.wantErr, err) - } - if m != nil { - t.Errorf("#%d: got non-nil Member", i) - } - } -} - -func TestHTTPMembersAPIRemoveSuccess(t *testing.T) { - wantAction := &membersAPIActionRemove{ - memberID: "94088180e21eb87b", - } - - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusNoContent, - }, - }, - } - - if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err != nil { - t.Errorf("got non-nil err: %#v", err) - } -} - -func TestHTTPMembersAPIRemoveFail(t *testing.T) { - tests := []httpClient{ - // generic error - &staticHTTPClient{ - err: errors.New("fail!"), - }, - - // unexpected HTTP status code - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusInternalServerError, - }, - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt} - if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err == nil { - t.Errorf("#%d: got nil err", i) - } - } -} - -func TestHTTPMembersAPIListSuccess(t *testing.T) { - wantAction := &membersAPIActionList{} - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`), - }, - } - - wantResponseMembers := []Member{ - { - ID: "94088180e21eb87b", - Name: "node2", - PeerURLs: []string{"http://127.0.0.1:7002"}, - ClientURLs: []string{"http://127.0.0.1:4002"}, - }, - } - - m, err := mAPI.List(context.Background()) - if err != nil { - t.Errorf("got non-nil err: %#v", err) - } - if !reflect.DeepEqual(wantResponseMembers, m) { - t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m) - } -} - -func TestHTTPMembersAPIListError(t *testing.T) { - tests := []httpClient{ - // generic httpClient failure - &staticHTTPClient{err: errors.New("fail!")}, - - // unrecognized HTTP status code - &staticHTTPClient{ - resp: http.Response{StatusCode: http.StatusTeapot}, - }, - - // fail to unmarshal body on StatusOK - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`[{"id":"XX`), - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt} - ms, err := mAPI.List(context.Background()) - if err == nil { - t.Errorf("#%d: got nil err", i) - } - if ms != nil { - t.Errorf("#%d: got non-nil Member slice", i) - } - } -} - -func TestHTTPMembersAPILeaderSuccess(t *testing.T) { - wantAction := &membersAPIActionLeader{} - mAPI := &httpMembersAPI{ - client: &actionAssertingHTTPClient{ - t: t, - act: wantAction, - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}`), - }, - } - - wantResponseMember := &Member{ - ID: "94088180e21eb87b", - Name: "node2", - PeerURLs: []string{"http://127.0.0.1:7002"}, - ClientURLs: []string{"http://127.0.0.1:4002"}, - } - - m, err := mAPI.Leader(context.Background()) - if err != nil { - t.Errorf("err = %v, want %v", err, nil) - } - if !reflect.DeepEqual(wantResponseMember, m) { - t.Errorf("incorrect member: member = %v, want %v", wantResponseMember, m) - } -} - -func TestHTTPMembersAPILeaderError(t *testing.T) { - tests := []httpClient{ - // generic httpClient failure - &staticHTTPClient{err: errors.New("fail!")}, - - // unrecognized HTTP status code - &staticHTTPClient{ - resp: http.Response{StatusCode: http.StatusTeapot}, - }, - - // fail to unmarshal body on StatusOK - &staticHTTPClient{ - resp: http.Response{ - StatusCode: http.StatusOK, - }, - body: []byte(`[{"id":"XX`), - }, - } - - for i, tt := range tests { - mAPI := &httpMembersAPI{client: tt} - m, err := mAPI.Leader(context.Background()) - if err == nil { - t.Errorf("#%d: err = nil, want not nil", i) - } - if m != nil { - t.Errorf("member slice = %v, want nil", m) - } - } -} diff --git a/client/v2/util.go b/client/v2/util.go deleted file mode 100644 index 15a8babff4d..00000000000 --- a/client/v2/util.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "regexp" -) - -var ( - roleNotFoundRegExp *regexp.Regexp - userNotFoundRegExp *regexp.Regexp -) - -func init() { - roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") - userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") -} - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} - -// IsRoleNotFound returns true if the error means role not found of v2 API. -func IsRoleNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return roleNotFoundRegExp.MatchString(ae.Message) - } - return false -} - -// IsUserNotFound returns true if the error means user not found of v2 API. -func IsUserNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return userNotFoundRegExp.MatchString(ae.Message) - } - return false -} diff --git a/client/v3/LICENSE b/client/v3/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/client/v3/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client/v3/README.md b/client/v3/README.md deleted file mode 100644 index af0087ebcc0..00000000000 --- a/client/v3/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# etcd/client/v3 - -[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3) - -`etcd/clientv3` is the official Go etcd client for v3. - -## Install - -```bash -go get go.etcd.io/etcd/client/v3 -``` - -## Get started - -Create client using `clientv3.New`: - -```go -import clientv3 "go.etcd.io/etcd/client/v3" - -func main() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, - }) - if err != nil { - // handle error! - } - defer cli.Close() -} -``` - -etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses -[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. -If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, -pass `context.WithTimeout` to APIs: - -```go -ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := cli.Put(ctx, "sample_key", "sample_value") -cancel() -if err != nil { - // handle error! -} -// use the response -``` - -For full compatibility, it is recommended to install released versions of clients using go modules. - -## Error Handling - -etcd client returns 2 types of errors: - -1. context error: canceled or deadline exceeded. -2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/api/v3rpc/rpctypes). - -Here is the example code to handle client errors: - -```go -resp, err := cli.Put(ctx, "", "") -if err != nil { - switch err { - case context.Canceled: - log.Fatalf("ctx is canceled by another routine: %v", err) - case context.DeadlineExceeded: - log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) - case rpctypes.ErrEmptyKey: - log.Fatalf("client-side error: %v", err) - default: - log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) - } -} -``` - -## Metrics - -The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/main/tests/integration/clientv3/examples/example_metrics_test.go). - -## Namespacing - -The [namespace](https://godoc.org/go.etcd.io/etcd/client/v3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. - -## Request size limit - -Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. - -## Examples - -More code [examples](https://github.com/etcd-io/etcd/tree/main/tests/integration/clientv3/examples) can be found at [GoDoc](https://pkg.go.dev/go.etcd.io/etcd/client/v3). diff --git a/client/v3/auth.go b/client/v3/auth.go deleted file mode 100644 index ae85ec9a942..00000000000 --- a/client/v3/auth.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc" - - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -type ( - AuthEnableResponse pb.AuthEnableResponse - AuthDisableResponse pb.AuthDisableResponse - AuthStatusResponse pb.AuthStatusResponse - AuthenticateResponse pb.AuthenticateResponse - AuthUserAddResponse pb.AuthUserAddResponse - AuthUserDeleteResponse pb.AuthUserDeleteResponse - AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse - AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse - AuthUserGetResponse pb.AuthUserGetResponse - AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse - AuthRoleAddResponse pb.AuthRoleAddResponse - AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse - AuthRoleGetResponse pb.AuthRoleGetResponse - AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse - AuthRoleDeleteResponse pb.AuthRoleDeleteResponse - AuthUserListResponse pb.AuthUserListResponse - AuthRoleListResponse pb.AuthRoleListResponse - - PermissionType authpb.Permission_Type - Permission authpb.Permission -) - -const ( - PermRead = authpb.READ - PermWrite = authpb.WRITE - PermReadWrite = authpb.READWRITE -) - -type UserAddOptions authpb.UserAddOptions - -type Auth interface { - // Authenticate login and get token - Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) - - // AuthEnable enables auth of an etcd cluster. - AuthEnable(ctx context.Context) (*AuthEnableResponse, error) - - // AuthDisable disables auth of an etcd cluster. - AuthDisable(ctx context.Context) (*AuthDisableResponse, error) - - // AuthStatus returns the status of auth of an etcd cluster. - AuthStatus(ctx context.Context) (*AuthStatusResponse, error) - - // UserAdd adds a new user to an etcd cluster. - UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) - - // UserAddWithOptions adds a new user to an etcd cluster with some options. - UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) - - // UserDelete deletes a user from an etcd cluster. - UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user. - UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to a user. - UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) - - // UserGet gets a detailed information of a user. - UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) - - // UserList gets a list of all users. - UserList(ctx context.Context) (*AuthUserListResponse, error) - - // UserRevokeRole revokes a role of a user. - UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role to an etcd cluster. - RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role. - RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) - - // RoleGet gets a detailed information of a role. - RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) - - // RoleList gets a list of all roles. - RoleList(ctx context.Context) (*AuthRoleListResponse, error) - - // RoleRevokePermission revokes a permission from a role. - RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) - - // RoleDelete deletes a role. - RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) -} - -type authClient struct { - remote pb.AuthClient - callOpts []grpc.CallOption -} - -func NewAuth(c *Client) Auth { - api := &authClient{remote: RetryAuthClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { - api := &authClient{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthenticateResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) - return (*AuthEnableResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) - return (*AuthDisableResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { - resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) - return (*AuthStatusResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) - return (*AuthUserListResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { - perm := &authpb.Permission{ - Key: []byte(key), - RangeEnd: []byte(rangeEnd), - PermType: authpb.Permission_Type(permType), - } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) -} - -func StrToPermissionType(s string) (PermissionType, error) { - val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] - if ok { - return PermissionType(val), nil - } - return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) -} diff --git a/client/v3/client.go b/client/v3/client.go deleted file mode 100644 index 0c91889fa33..00000000000 --- a/client/v3/client.go +++ /dev/null @@ -1,619 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - grpccredentials "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/status" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/client/v3/internal/endpoint" - "go.etcd.io/etcd/client/v3/internal/resolver" -) - -var ( - ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") - ErrOldCluster = errors.New("etcdclient: old cluster version") -) - -// Client provides and manages an etcd v3 client session. -type Client struct { - Cluster - KV - Lease - Watcher - Auth - Maintenance - - conn *grpc.ClientConn - - cfg Config - creds grpccredentials.TransportCredentials - resolver *resolver.EtcdManualResolver - - epMu *sync.RWMutex - endpoints []string - - ctx context.Context - cancel context.CancelFunc - - // Username is a user name for authentication. - Username string - // Password is a password for authentication. - Password string - authTokenBundle credentials.Bundle - - callOpts []grpc.CallOption - - lgMu *sync.RWMutex - lg *zap.Logger -} - -// New creates a new etcdv3 client from a given configuration. -func New(cfg Config) (*Client, error) { - if len(cfg.Endpoints) == 0 { - return nil, ErrNoAvailableEndpoints - } - - return newClient(&cfg) -} - -// NewCtxClient creates a client with a context but no underlying grpc -// connection. This is useful for embedded cases that override the -// service interface implementations and do not need connection management. -func NewCtxClient(ctx context.Context, opts ...Option) *Client { - cctx, cancel := context.WithCancel(ctx) - c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)} - for _, opt := range opts { - opt(c) - } - if c.lg == nil { - c.lg = zap.NewNop() - } - return c -} - -// Option is a function type that can be passed as argument to NewCtxClient to configure client -type Option func(*Client) - -// NewFromURL creates a new etcdv3 client from a URL. -func NewFromURL(url string) (*Client, error) { - return New(Config{Endpoints: []string{url}}) -} - -// NewFromURLs creates a new etcdv3 client from URLs. -func NewFromURLs(urls []string) (*Client, error) { - return New(Config{Endpoints: urls}) -} - -// WithZapLogger is a NewCtxClient option that overrides the logger -func WithZapLogger(lg *zap.Logger) Option { - return func(c *Client) { - c.lg = lg - } -} - -// WithLogger overrides the logger. -// -// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config -// -// Does not changes grpcLogger, that can be explicitly configured -// using grpc_zap.ReplaceGrpcLoggerV2(..) method. -func (c *Client) WithLogger(lg *zap.Logger) *Client { - c.lgMu.Lock() - c.lg = lg - c.lgMu.Unlock() - return c -} - -// GetLogger gets the logger. -// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger. -func (c *Client) GetLogger() *zap.Logger { - c.lgMu.RLock() - l := c.lg - c.lgMu.RUnlock() - return l -} - -// Close shuts down the client's etcd connections. -func (c *Client) Close() error { - c.cancel() - if c.Watcher != nil { - c.Watcher.Close() - } - if c.Lease != nil { - c.Lease.Close() - } - if c.conn != nil { - return toErr(c.ctx, c.conn.Close()) - } - return c.ctx.Err() -} - -// Ctx is a context for "out of band" messages (e.g., for sending -// "clean up" message when another context is canceled). It is -// canceled on client Close(). -func (c *Client) Ctx() context.Context { return c.ctx } - -// Endpoints lists the registered endpoints for the client. -func (c *Client) Endpoints() []string { - // copy the slice; protect original endpoints from being changed - c.epMu.RLock() - defer c.epMu.RUnlock() - eps := make([]string, len(c.endpoints)) - copy(eps, c.endpoints) - return eps -} - -// SetEndpoints updates client's endpoints. -func (c *Client) SetEndpoints(eps ...string) { - c.epMu.Lock() - defer c.epMu.Unlock() - c.endpoints = eps - - c.resolver.SetEndpoints(eps) -} - -// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c *Client) Sync(ctx context.Context) error { - mresp, err := c.MemberList(ctx) - if err != nil { - return err - } - var eps []string - for _, m := range mresp.Members { - if len(m.Name) != 0 && !m.IsLearner { - eps = append(eps, m.ClientURLs...) - } - } - c.SetEndpoints(eps...) - c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps)) - return nil -} - -func (c *Client) autoSync() { - if c.cfg.AutoSyncInterval == time.Duration(0) { - return - } - - for { - select { - case <-c.ctx.Done(): - return - case <-time.After(c.cfg.AutoSyncInterval): - ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) - err := c.Sync(ctx) - cancel() - if err != nil && err != c.ctx.Err() { - c.lg.Info("Auto sync endpoints failed.", zap.Error(err)) - } - } - } -} - -// dialSetupOpts gives the dial opts prior to any authentication. -func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { - if c.cfg.DialKeepAliveTime > 0 { - params := keepalive.ClientParameters{ - Time: c.cfg.DialKeepAliveTime, - Timeout: c.cfg.DialKeepAliveTimeout, - PermitWithoutStream: c.cfg.PermitWithoutStream, - } - opts = append(opts, grpc.WithKeepaliveParams(params)) - } - opts = append(opts, dopts...) - - if creds != nil { - opts = append(opts, grpc.WithTransportCredentials(creds)) - } else { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - // Interceptor retry and backoff. - // TODO: Replace all of clientv3/retry.go with RetryPolicy: - // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130 - rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) - opts = append(opts, - // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. - // Streams that are safe to retry are enabled individually. - grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)), - grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)), - ) - - return opts, nil -} - -// Dial connects to a single endpoint using the client's config. -func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { - creds := c.credentialsForEndpoint(ep) - - // Using ad-hoc created resolver, to guarantee only explicitly given - // endpoint is used. - return c.dial(creds, grpc.WithResolvers(resolver.New(ep))) -} - -func (c *Client) getToken(ctx context.Context) error { - var err error // return last error in a case of fail - - if c.Username == "" || c.Password == "" { - return nil - } - - resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) - if err != nil { - if err == rpctypes.ErrAuthNotEnabled { - c.authTokenBundle.UpdateAuthToken("") - return nil - } - return err - } - c.authTokenBundle.UpdateAuthToken(resp.Token) - return nil -} - -// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host -// of the provided endpoint determines the scheme used for all endpoints of the client connection. -func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - creds := c.credentialsForEndpoint(c.Endpoints()[0]) - opts := append(dopts, grpc.WithResolvers(c.resolver)) - return c.dial(creds, opts...) -} - -// dial configures and dials any grpc balancer target. -func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := c.dialSetupOpts(creds, dopts...) - if err != nil { - return nil, fmt.Errorf("failed to configure dialer: %v", err) - } - if c.authTokenBundle != nil { - opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) - } - - opts = append(opts, c.cfg.DialOptions...) - - dctx := c.ctx - if c.cfg.DialTimeout > 0 { - var cancel context.CancelFunc - dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) - defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? - } - target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0])) - conn, err := grpc.DialContext(dctx, target, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -func authority(endpoint string) string { - spl := strings.SplitN(endpoint, "://", 2) - if len(spl) < 2 { - if strings.HasPrefix(endpoint, "unix:") { - return endpoint[len("unix:"):] - } - if strings.HasPrefix(endpoint, "unixs:") { - return endpoint[len("unixs:"):] - } - return endpoint - } - return spl[1] -} - -func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials { - r := endpoint.RequiresCredentials(ep) - switch r { - case endpoint.CREDS_DROP: - return nil - case endpoint.CREDS_OPTIONAL: - return c.creds - case endpoint.CREDS_REQUIRE: - if c.creds != nil { - return c.creds - } - return credentials.NewBundle(credentials.Config{}).TransportCredentials() - default: - panic(fmt.Errorf("unsupported CredsRequirement: %v", r)) - } -} - -func newClient(cfg *Config) (*Client, error) { - if cfg == nil { - cfg = &Config{} - } - var creds grpccredentials.TransportCredentials - if cfg.TLS != nil { - creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() - } - - // use a temporary skeleton client to bootstrap first connection - baseCtx := context.TODO() - if cfg.Context != nil { - baseCtx = cfg.Context - } - - ctx, cancel := context.WithCancel(baseCtx) - client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, - epMu: new(sync.RWMutex), - callOpts: defaultCallOpts, - lgMu: new(sync.RWMutex), - } - - var err error - if cfg.Logger != nil { - client.lg = cfg.Logger - } else if cfg.LogConfig != nil { - client.lg, err = cfg.LogConfig.Build() - } else { - client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) - if client.lg != nil { - client.lg = client.lg.Named("etcd-client") - } - } - if err != nil { - return nil, err - } - - if cfg.Username != "" && cfg.Password != "" { - client.Username = cfg.Username - client.Password = cfg.Password - client.authTokenBundle = credentials.NewBundle(credentials.Config{}) - } - if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { - if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { - return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) - } - callOpts := []grpc.CallOption{ - defaultWaitForReady, - defaultMaxCallSendMsgSize, - defaultMaxCallRecvMsgSize, - } - if cfg.MaxCallSendMsgSize > 0 { - callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) - } - if cfg.MaxCallRecvMsgSize > 0 { - callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) - } - client.callOpts = callOpts - } - - client.resolver = resolver.New(cfg.Endpoints...) - - if len(cfg.Endpoints) < 1 { - client.cancel() - return nil, errors.New("at least one Endpoint is required in client config") - } - client.SetEndpoints(cfg.Endpoints...) - - // Use a provided endpoint target so that for https:// without any tls config given, then - // grpc will assume the certificate server name is the endpoint host. - conn, err := client.dialWithBalancer() - if err != nil { - client.cancel() - client.resolver.Close() - // TODO: Error like `fmt.Errorf(dialing [%s] failed: %v, strings.Join(cfg.Endpoints, ";"), err)` would help with debugging a lot. - return nil, err - } - client.conn = conn - - client.Cluster = NewCluster(client) - client.KV = NewKV(client) - client.Lease = NewLease(client) - client.Watcher = NewWatcher(client) - client.Auth = NewAuth(client) - client.Maintenance = NewMaintenance(client) - - //get token with established connection - ctx, cancel = client.ctx, func() {} - if client.cfg.DialTimeout > 0 { - ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout) - } - err = client.getToken(ctx) - if err != nil { - client.Close() - cancel() - //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err) - return nil, err - } - cancel() - - if cfg.RejectOldCluster { - if err := client.checkVersion(); err != nil { - client.Close() - return nil, err - } - } - - go client.autoSync() - return client, nil -} - -// roundRobinQuorumBackoff retries against quorum between each backoff. -// This is intended for use with a round robin load balancer. -func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { - return func(attempt uint) time.Duration { - // after each round robin across quorum, backoff for our wait between duration - n := uint(len(c.Endpoints())) - quorum := (n/2 + 1) - if attempt%quorum == 0 { - c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) - return jitterUp(waitBetween, jitterFraction) - } - c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) - return 0 - } -} - -func (c *Client) checkVersion() (err error) { - var wg sync.WaitGroup - - eps := c.Endpoints() - errc := make(chan error, len(eps)) - ctx, cancel := context.WithCancel(c.ctx) - if c.cfg.DialTimeout > 0 { - cancel() - ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) - } - - wg.Add(len(eps)) - for _, ep := range eps { - // if cluster is current, any endpoint gives a recent version - go func(e string) { - defer wg.Done() - resp, rerr := c.Status(ctx, e) - if rerr != nil { - errc <- rerr - return - } - vs := strings.Split(resp.Version, ".") - maj, min := 0, 0 - if len(vs) >= 2 { - var serr error - if maj, serr = strconv.Atoi(vs[0]); serr != nil { - errc <- serr - return - } - if min, serr = strconv.Atoi(vs[1]); serr != nil { - errc <- serr - return - } - } - if maj < 3 || (maj == 3 && min < 4) { - rerr = ErrOldCluster - } - errc <- rerr - }(ep) - } - // wait for success - for range eps { - if err = <-errc; err != nil { - break - } - } - cancel() - wg.Wait() - return err -} - -// ActiveConnection returns the current in-use connection -func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } - -// isHaltErr returns true if the given error and context indicate no forward -// progress can be made, even after reconnecting. -func isHaltErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return true - } - if err == nil { - return false - } - ev, _ := status.FromError(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - // Treat Internal codes as if something failed, leaving the - // system in an inconsistent state, but retrying could make progress. - // (e.g., failed in middle of send, corrupted frame) - // TODO: are permanent Internal errors possible from grpc? - return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal -} - -// isUnavailableErr returns true if the given error is an unavailable error -func isUnavailableErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return false - } - if err == nil { - return false - } - ev, ok := status.FromError(err) - if ok { - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - return ev.Code() == codes.Unavailable - } - return false -} - -func toErr(ctx context.Context, err error) error { - if err == nil { - return nil - } - err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { - return err - } - if ev, ok := status.FromError(err); ok { - code := ev.Code() - switch code { - case codes.DeadlineExceeded: - fallthrough - case codes.Canceled: - if ctx.Err() != nil { - err = ctx.Err() - } - } - } - return err -} - -func canceledByCaller(stopCtx context.Context, err error) bool { - if stopCtx.Err() == nil || err == nil { - return false - } - - return err == context.Canceled || err == context.DeadlineExceeded -} - -// IsConnCanceled returns true, if error is from a closed gRPC connection. -// ref. https://github.com/grpc/grpc-go/pull/1854 -func IsConnCanceled(err error) bool { - if err == nil { - return false - } - - // >= gRPC v1.23.x - s, ok := status.FromError(err) - if ok { - // connection is canceled or server has already closed the connection - return s.Code() == codes.Canceled || s.Message() == "transport is closing" - } - - // >= gRPC v1.10.x - if err == context.Canceled { - return true - } - - // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' - return strings.Contains(err.Error(), "grpc: the client connection is closing") -} diff --git a/client/v3/client_test.go b/client/v3/client_test.go deleted file mode 100644 index 0f52ad5d375..00000000000 --- a/client/v3/client_test.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/testutil" - - "google.golang.org/grpc" -) - -func NewClient(t *testing.T, cfg Config) (*Client, error) { - if cfg.Logger == nil { - cfg.Logger = zaptest.NewLogger(t).Named("client") - } - return New(cfg) -} - -func TestDialCancel(t *testing.T) { - testutil.RegisterLeakDetection(t) - - // accept first connection so client is created with dial timeout - ln, err := net.Listen("unix", "dialcancel:12345") - if err != nil { - t.Fatal(err) - } - defer ln.Close() - - ep := "unix://dialcancel:12345" - cfg := Config{ - Endpoints: []string{ep}, - DialTimeout: 30 * time.Second} - c, err := NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - - // connect to ipv4 black hole so dial blocks - c.SetEndpoints("http://254.0.0.1:12345") - - // issue Get to force redial attempts - getc := make(chan struct{}) - go func() { - defer close(getc) - // Get may hang forever on grpc's Stream.Header() if its - // context is never canceled. - c.Get(c.Ctx(), "abc") - }() - - // wait a little bit so client close is after dial starts - time.Sleep(100 * time.Millisecond) - - donec := make(chan struct{}) - go func() { - defer close(donec) - c.Close() - }() - - select { - case <-time.After(5 * time.Second): - t.Fatalf("failed to close") - case <-donec: - } - select { - case <-time.After(5 * time.Second): - t.Fatalf("get failed to exit") - case <-getc: - } -} - -func TestDialTimeout(t *testing.T) { - testutil.RegisterLeakDetection(t) - - wantError := context.DeadlineExceeded - - // grpc.WithBlock to block until connection up or timeout - testCfgs := []Config{ - { - Endpoints: []string{"http://254.0.0.1:12345"}, - DialTimeout: 2 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - }, - { - Endpoints: []string{"http://254.0.0.1:12345"}, - DialTimeout: time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - Username: "abc", - Password: "def", - }, - } - - for i, cfg := range testCfgs { - donec := make(chan error, 1) - go func(cfg Config, i int) { - // without timeout, dial continues forever on ipv4 black hole - c, err := NewClient(t, cfg) - if c != nil || err == nil { - t.Errorf("#%d: new client should fail", i) - } - donec <- err - }(cfg, i) - - time.Sleep(10 * time.Millisecond) - - select { - case err := <-donec: - t.Errorf("#%d: dial didn't wait (%v)", i, err) - default: - } - - select { - case <-time.After(5 * time.Second): - t.Errorf("#%d: failed to timeout dial on time", i) - case err := <-donec: - if err.Error() != wantError.Error() { - t.Errorf("#%d: unexpected error '%v', want '%v'", i, err, wantError) - } - } - } -} - -func TestDialNoTimeout(t *testing.T) { - cfg := Config{Endpoints: []string{"127.0.0.1:12345"}} - c, err := NewClient(t, cfg) - if c == nil || err != nil { - t.Fatalf("new client with DialNoWait should succeed, got %v", err) - } - c.Close() -} - -func TestIsHaltErr(t *testing.T) { - assert.Equal(t, - isHaltErr(context.TODO(), errors.New("etcdserver: some etcdserver error")), - true, - "error created by errors.New should be unavailable error", - ) - assert.Equal(t, - isHaltErr(context.TODO(), rpctypes.ErrGRPCStopped), - false, - fmt.Sprintf(`error "%v" should not be halt error`, rpctypes.ErrGRPCStopped), - ) - assert.Equal(t, - isHaltErr(context.TODO(), rpctypes.ErrGRPCNoLeader), - false, - fmt.Sprintf(`error "%v" should not be halt error`, rpctypes.ErrGRPCNoLeader), - ) - ctx, cancel := context.WithCancel(context.TODO()) - assert.Equal(t, - isHaltErr(ctx, nil), - false, - "no error and active context should be halt error", - ) - cancel() - assert.Equal(t, - isHaltErr(ctx, nil), - true, - "cancel on context should be halte error", - ) -} - -func TestIsUnavailableErr(t *testing.T) { - assert.Equal(t, - isUnavailableErr(context.TODO(), errors.New("etcdserver: some etcdserver error")), - false, - "error created by errors.New should not be unavailable error", - ) - assert.Equal(t, - isUnavailableErr(context.TODO(), rpctypes.ErrGRPCStopped), - true, - fmt.Sprintf(`error "%v" should be unavailable error`, rpctypes.ErrGRPCStopped), - ) - assert.Equal(t, - isUnavailableErr(context.TODO(), rpctypes.ErrGRPCNotCapable), - false, - fmt.Sprintf("error %v should not be unavailable error", rpctypes.ErrGRPCNotCapable), - ) - ctx, cancel := context.WithCancel(context.TODO()) - assert.Equal(t, - isUnavailableErr(ctx, nil), - false, - "no error and active context should not be unavailable error", - ) - cancel() - assert.Equal(t, - isUnavailableErr(ctx, nil), - false, - "cancel on context should not be unavailable error", - ) -} - -func TestCloseCtxClient(t *testing.T) { - ctx := context.Background() - c := NewCtxClient(ctx) - err := c.Close() - // Close returns ctx.toErr, a nil error means an open Done channel - if err == nil { - t.Errorf("failed to Close the client. %v", err) - } -} - -func TestWithLogger(t *testing.T) { - ctx := context.Background() - c := NewCtxClient(ctx) - if c.lg == nil { - t.Errorf("unexpected nil in *zap.Logger") - } - - c.WithLogger(nil) - if c.lg != nil { - t.Errorf("WithLogger should modify *zap.Logger") - } -} - -func TestZapWithLogger(t *testing.T) { - ctx := context.Background() - lg := zap.NewNop() - c := NewCtxClient(ctx, WithZapLogger(lg)) - - if c.lg != lg { - t.Errorf("WithZapLogger should modify *zap.Logger") - } -} - -func TestAuthTokenBundleNoOverwrite(t *testing.T) { - // This call in particular changes working directory to the tmp dir of - // the test. The `etcd-auth-test:0` can be created in local directory, - // not exceeding the longest allowed path on OsX. - testutil.BeforeTest(t) - - // Create a mock AuthServer to handle Authenticate RPCs. - lis, err := net.Listen("unix", "etcd-auth-test:0") - if err != nil { - t.Fatal(err) - } - defer lis.Close() - addr := "unix://" + lis.Addr().String() - srv := grpc.NewServer() - etcdserverpb.RegisterAuthServer(srv, mockAuthServer{}) - go srv.Serve(lis) - defer srv.Stop() - - // Create a client, which should call Authenticate on the mock server to - // exchange username/password for an auth token. - c, err := NewClient(t, Config{ - DialTimeout: 5 * time.Second, - Endpoints: []string{addr}, - Username: "foo", - Password: "bar", - }) - if err != nil { - t.Fatal(err) - } - defer c.Close() - oldTokenBundle := c.authTokenBundle - - // Call the public Dial again, which should preserve the original - // authTokenBundle. - gc, err := c.Dial(addr) - if err != nil { - t.Fatal(err) - } - defer gc.Close() - newTokenBundle := c.authTokenBundle - - if oldTokenBundle != newTokenBundle { - t.Error("Client.authTokenBundle has been overwritten during Client.Dial") - } -} - -func TestSyncFiltersMembers(t *testing.T) { - c, _ := NewClient(t, Config{Endpoints: []string{"http://254.0.0.1:12345"}}) - defer c.Close() - c.Cluster = &mockCluster{ - []*etcdserverpb.Member{ - {ID: 0, Name: "", ClientURLs: []string{"http://254.0.0.1:12345"}, IsLearner: false}, - {ID: 1, Name: "isStarted", ClientURLs: []string{"http://254.0.0.2:12345"}, IsLearner: true}, - {ID: 2, Name: "isStartedAndNotLearner", ClientURLs: []string{"http://254.0.0.3:12345"}, IsLearner: false}, - }, - } - c.Sync(context.Background()) - - endpoints := c.Endpoints() - if len(endpoints) != 1 || endpoints[0] != "http://254.0.0.3:12345" { - t.Error("Client.Sync uses learner and/or non-started member client URLs") - } -} - -func TestClientRejectOldCluster(t *testing.T) { - testutil.BeforeTest(t) - var tests = []struct { - name string - endpoints []string - versions []string - expectedError error - }{ - { - name: "all new versions with the same value", - endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"}, - versions: []string{"3.5.4", "3.5.4", "3.5.4"}, - expectedError: nil, - }, - { - name: "all new versions with different values", - endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"}, - versions: []string{"3.5.4", "3.5.4", "3.4.0"}, - expectedError: nil, - }, - { - name: "all old versions with different values", - endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"}, - versions: []string{"3.3.0", "3.3.0", "3.4.0"}, - expectedError: ErrOldCluster, - }, - { - name: "all old versions with the same value", - endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"}, - versions: []string{"3.3.0", "3.3.0", "3.3.0"}, - expectedError: ErrOldCluster, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if len(tt.endpoints) != len(tt.versions) || len(tt.endpoints) == 0 { - t.Errorf("Unexpected endpoints and versions length, len(endpoints):%d, len(versions):%d", len(tt.endpoints), len(tt.versions)) - return - } - endpointToVersion := make(map[string]string) - for j := range tt.endpoints { - endpointToVersion[tt.endpoints[j]] = tt.versions[j] - } - c := &Client{ - ctx: context.Background(), - endpoints: tt.endpoints, - epMu: new(sync.RWMutex), - Maintenance: &mockMaintenance{ - Version: endpointToVersion, - }, - } - - if err := c.checkVersion(); err != tt.expectedError { - t.Errorf("heckVersion err:%v", err) - } - }) - - } - -} - -type mockMaintenance struct { - Version map[string]string -} - -func (mm mockMaintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - return &StatusResponse{Version: mm.Version[endpoint]}, nil -} - -func (mm mockMaintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - return nil, nil -} - -func (mm mockMaintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - return nil, nil -} - -func (mm mockMaintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) { - return nil, nil -} - -type mockAuthServer struct { - *etcdserverpb.UnimplementedAuthServer -} - -func (mockAuthServer) Authenticate(context.Context, *etcdserverpb.AuthenticateRequest) (*etcdserverpb.AuthenticateResponse, error) { - return &etcdserverpb.AuthenticateResponse{Token: "mock-token"}, nil -} - -type mockCluster struct { - members []*etcdserverpb.Member -} - -func (mc *mockCluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - return &MemberListResponse{Members: mc.members}, nil -} - -func (mc *mockCluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - return nil, nil -} - -func (mc *mockCluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - return nil, nil -} - -func (mc *mockCluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - return nil, nil -} - -func (mc *mockCluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - return nil, nil -} - -func (mc *mockCluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { - return nil, nil -} diff --git a/client/v3/clientv3util/example_key_test.go b/client/v3/clientv3util/example_key_test.go deleted file mode 100644 index fbbbe417260..00000000000 --- a/client/v3/clientv3util/example_key_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3util_test - -import ( - "context" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/clientv3util" -) - -func ExampleKeyMissing() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"127.0.0.1:2379"}, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - kvc := clientv3.NewKV(cli) - - // perform a put only if key is missing - // It is useful to do the check atomically to avoid overwriting - // the existing key which would generate potentially unwanted events, - // unless of course you wanted to do an overwrite no matter what. - _, err = kvc.Txn(context.Background()). - If(clientv3util.KeyMissing("purpleidea")). - Then(clientv3.OpPut("purpleidea", "hello world")). - Commit() - if err != nil { - log.Fatal(err) - } -} - -func ExampleKeyExists() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"127.0.0.1:2379"}, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - kvc := clientv3.NewKV(cli) - - // perform a delete only if key already exists - _, err = kvc.Txn(context.Background()). - If(clientv3util.KeyExists("purpleidea")). - Then(clientv3.OpDelete("purpleidea")). - Commit() - if err != nil { - log.Fatal(err) - } -} diff --git a/client/v3/clientv3util/util.go b/client/v3/clientv3util/util.go deleted file mode 100644 index 144777bd2c7..00000000000 --- a/client/v3/clientv3util/util.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3util contains utility functions derived from clientv3. -package clientv3util - -import ( - clientv3 "go.etcd.io/etcd/client/v3" -) - -// KeyExists returns a comparison operation that evaluates to true iff the given -// key exists. It does this by checking if the key `Version` is greater than 0. -// It is a useful guard in transaction delete operations. -func KeyExists(key string) clientv3.Cmp { - return clientv3.Compare(clientv3.Version(key), ">", 0) -} - -// KeyMissing returns a comparison operation that evaluates to true iff the -// given key does not exist. -func KeyMissing(key string) clientv3.Cmp { - return clientv3.Compare(clientv3.Version(key), "=", 0) -} diff --git a/client/v3/cluster.go b/client/v3/cluster.go deleted file mode 100644 index 92d7cdb56b0..00000000000 --- a/client/v3/cluster.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - - "google.golang.org/grpc" -) - -type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse - MemberPromoteResponse pb.MemberPromoteResponse -) - -type Cluster interface { - // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) - - // MemberAdd adds a new member into the cluster. - MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberAddAsLearner adds a new learner member into the cluster. - MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) - - // MemberUpdate updates the peer addresses of the member. - MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) - - // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. - MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) -} - -type cluster struct { - remote pb.ClusterClient - callOpts []grpc.CallOption -} - -func NewCluster(c *Client) Cluster { - api := &cluster{remote: RetryClusterClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { - api := &cluster{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - return c.memberAdd(ctx, peerAddrs, false) -} - -func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - return c.memberAdd(ctx, peerAddrs, true) -} - -func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - r := &pb.MemberAddRequest{ - PeerURLs: peerAddrs, - IsLearner: isLearner, - } - resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberAddResponse)(resp), nil -} - -func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberRemoveResponse)(resp), nil -} - -func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - // fail-fast before panic in rafthttp - if _, err := types.NewURLs(peerAddrs); err != nil { - return nil, err - } - - // it is safe to retry on update. - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...) - if err == nil { - return (*MemberListResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { - r := &pb.MemberPromoteRequest{ID: id} - resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*MemberPromoteResponse)(resp), nil -} diff --git a/client/v3/compact_op_test.go b/client/v3/compact_op_test.go deleted file mode 100644 index f483322adf5..00000000000 --- a/client/v3/compact_op_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "reflect" - "testing" - - "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -func TestCompactOp(t *testing.T) { - req1 := OpCompact(100, WithCompactPhysical()).toRequest() - req2 := &etcdserverpb.CompactionRequest{Revision: 100, Physical: true} - if !reflect.DeepEqual(req1, req2) { - t.Fatalf("expected %+v, got %+v", req2, req1) - } -} diff --git a/client/v3/compare.go b/client/v3/compare.go deleted file mode 100644 index e2967cf38ed..00000000000 --- a/client/v3/compare.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -type CompareTarget int -type CompareResult int - -const ( - CompareVersion CompareTarget = iota - CompareCreated - CompareModified - CompareValue -) - -type Cmp pb.Compare - -func Compare(cmp Cmp, result string, v interface{}) Cmp { - var r pb.Compare_CompareResult - - switch result { - case "=": - r = pb.Compare_EQUAL - case "!=": - r = pb.Compare_NOT_EQUAL - case ">": - r = pb.Compare_GREATER - case "<": - r = pb.Compare_LESS - default: - panic("Unknown result op") - } - - cmp.Result = r - switch cmp.Target { - case pb.Compare_VALUE: - val, ok := v.(string) - if !ok { - panic("bad compare value") - } - cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} - case pb.Compare_VERSION: - cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} - case pb.Compare_CREATE: - cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} - case pb.Compare_MOD: - cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} - case pb.Compare_LEASE: - cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} - default: - panic("Unknown compare type") - } - return cmp -} - -func Value(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} -} - -func Version(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} -} - -func CreateRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} -} - -func ModRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_MOD} -} - -// LeaseValue compares a key's LeaseID to a value of your choosing. The empty -// LeaseID is 0, otherwise known as `NoLease`. -func LeaseValue(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} -} - -// KeyBytes returns the byte slice holding with the comparison key. -func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } - -// WithKeyBytes sets the byte slice for the comparison key. -func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } - -// ValueBytes returns the byte slice holding the comparison value, if any. -func (cmp *Cmp) ValueBytes() []byte { - if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { - return tu.Value - } - return nil -} - -// WithValueBytes sets the byte slice for the comparison's value. -func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } - -// WithRange sets the comparison to scan the range [key, end). -func (cmp Cmp) WithRange(end string) Cmp { - cmp.RangeEnd = []byte(end) - return cmp -} - -// WithPrefix sets the comparison to scan all keys prefixed by the key. -func (cmp Cmp) WithPrefix() Cmp { - cmp.RangeEnd = getPrefix(cmp.Key) - return cmp -} - -// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. -func mustInt64(val interface{}) int64 { - if v, ok := val.(int64); ok { - return v - } - if v, ok := val.(int); ok { - return int64(v) - } - panic("bad value") -} - -// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an -// int64 otherwise. -func mustInt64orLeaseID(val interface{}) int64 { - if v, ok := val.(LeaseID); ok { - return int64(v) - } - return mustInt64(val) -} diff --git a/client/v3/concurrency/election.go b/client/v3/concurrency/election.go deleted file mode 100644 index 31e93d24280..00000000000 --- a/client/v3/concurrency/election.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "errors" - "fmt" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" -) - -var ( - ErrElectionNotLeader = errors.New("election: not leader") - ErrElectionNoLeader = errors.New("election: no leader") -) - -type Election struct { - session *Session - - keyPrefix string - - leaderKey string - leaderRev int64 - leaderSession *Session - hdr *pb.ResponseHeader -} - -// NewElection returns a new election on a given key prefix. -func NewElection(s *Session, pfx string) *Election { - return &Election{session: s, keyPrefix: pfx + "/"} -} - -// ResumeElection initializes an election with a known leader. -func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { - return &Election{ - keyPrefix: pfx, - session: s, - leaderKey: leaderKey, - leaderRev: leaderRev, - leaderSession: s, - } -} - -// Campaign puts a value as eligible for the election on the prefix -// key. -// Multiple sessions can participate in the election for the -// same prefix, but only one can be the leader at a time. -// -// If the context is 'context.TODO()/context.Background()', the Campaign -// will continue to be blocked for other keys to be deleted, unless server -// returns a non-recoverable error (e.g. ErrCompacted). -// Otherwise, until the context is not cancelled or timed-out, Campaign will -// continue to be blocked until it becomes the leader. -func (e *Election) Campaign(ctx context.Context, val string) error { - s := e.session - client := e.session.Client() - - k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) - txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) - txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) - txn = txn.Else(v3.OpGet(k)) - resp, err := txn.Commit() - if err != nil { - return err - } - e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s - if !resp.Succeeded { - kv := resp.Responses[0].GetResponseRange().Kvs[0] - e.leaderRev = kv.CreateRevision - if string(kv.Value) != val { - if err = e.Proclaim(ctx, val); err != nil { - e.Resign(ctx) - return err - } - } - } - - _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) - if err != nil { - // clean up in case of context cancel - select { - case <-ctx.Done(): - e.Resign(client.Ctx()) - default: - e.leaderSession = nil - } - return err - } - e.hdr = resp.Header - - return nil -} - -// Proclaim lets the leader announce a new value without another election. -func (e *Election) Proclaim(ctx context.Context, val string) error { - if e.leaderSession == nil { - return ErrElectionNotLeader - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - txn := client.Txn(ctx).If(cmp) - txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) - tresp, terr := txn.Commit() - if terr != nil { - return terr - } - if !tresp.Succeeded { - e.leaderKey = "" - return ErrElectionNotLeader - } - - e.hdr = tresp.Header - return nil -} - -// Resign lets a leader start a new election. -func (e *Election) Resign(ctx context.Context) (err error) { - if e.leaderSession == nil { - return nil - } - client := e.session.Client() - cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) - resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() - if err == nil { - e.hdr = resp.Header - } - e.leaderKey = "" - e.leaderSession = nil - return err -} - -// Leader returns the leader value for the current election. -func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { - client := e.session.Client() - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return nil, err - } else if len(resp.Kvs) == 0 { - // no leader currently elected - return nil, ErrElectionNoLeader - } - return resp, nil -} - -// Observe returns a channel that reliably observes ordered leader proposals -// as GetResponse values on every current elected leader key. It will not -// necessarily fetch all historical leader updates, but will always post the -// most recent leader value. -// -// The channel closes when the context is canceled or the underlying watcher -// is otherwise disrupted. -func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { - retc := make(chan v3.GetResponse) - go e.observe(ctx, retc) - return retc -} - -func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { - client := e.session.Client() - - defer close(ch) - for { - resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) - if err != nil { - return - } - - var kv *mvccpb.KeyValue - var hdr *pb.ResponseHeader - - if len(resp.Kvs) == 0 { - cctx, cancel := context.WithCancel(ctx) - // wait for first key put on prefix - opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} - wch := client.Watch(cctx, e.keyPrefix, opts...) - for kv == nil { - wr, ok := <-wch - if !ok || wr.Err() != nil { - cancel() - return - } - // only accept puts; a delete will make observe() spin - for _, ev := range wr.Events { - if ev.Type == mvccpb.PUT { - hdr, kv = &wr.Header, ev.Kv - // may have multiple revs; hdr.rev = the last rev - // set to kv's rev in case batch has multiple Puts - hdr.Revision = kv.ModRevision - break - } - } - } - cancel() - } else { - hdr, kv = resp.Header, resp.Kvs[0] - } - - select { - case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: - case <-ctx.Done(): - return - } - - cctx, cancel := context.WithCancel(ctx) - wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) - keyDeleted := false - for !keyDeleted { - wr, ok := <-wch - if !ok { - cancel() - return - } - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - keyDeleted = true - break - } - resp.Header = &wr.Header - resp.Kvs = []*mvccpb.KeyValue{ev.Kv} - select { - case ch <- *resp: - case <-cctx.Done(): - cancel() - return - } - } - } - cancel() - } -} - -// Key returns the leader key if elected, empty string otherwise. -func (e *Election) Key() string { return e.leaderKey } - -// Rev returns the leader key's creation revision, if elected. -func (e *Election) Rev() int64 { return e.leaderRev } - -// Header is the response header from the last successful election proposal. -func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/client/v3/concurrency/example_election_test.go b/client/v3/concurrency/example_election_test.go deleted file mode 120000 index a76f0a7f4ef..00000000000 --- a/client/v3/concurrency/example_election_test.go +++ /dev/null @@ -1 +0,0 @@ -../../../tests/integration/clientv3/concurrency/example_election_test.go \ No newline at end of file diff --git a/client/v3/concurrency/example_mutex_test.go b/client/v3/concurrency/example_mutex_test.go deleted file mode 120000 index 053eb74ad6a..00000000000 --- a/client/v3/concurrency/example_mutex_test.go +++ /dev/null @@ -1 +0,0 @@ -../../../tests/integration/clientv3/concurrency/example_mutex_test.go \ No newline at end of file diff --git a/client/v3/concurrency/example_stm_test.go b/client/v3/concurrency/example_stm_test.go deleted file mode 120000 index d63639ecc68..00000000000 --- a/client/v3/concurrency/example_stm_test.go +++ /dev/null @@ -1 +0,0 @@ -../../../tests/integration/clientv3/concurrency/example_stm_test.go \ No newline at end of file diff --git a/client/v3/concurrency/key.go b/client/v3/concurrency/key.go deleted file mode 100644 index 8a5d6e1f175..00000000000 --- a/client/v3/concurrency/key.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "errors" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" -) - -func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - var wr v3.WatchResponse - wch := client.Watch(cctx, key, v3.WithRev(rev)) - for wr = range wch { - for _, ev := range wr.Events { - if ev.Type == mvccpb.DELETE { - return nil - } - } - } - if err := wr.Err(); err != nil { - return err - } - if err := ctx.Err(); err != nil { - return err - } - return errors.New("lost watcher waiting for delete") -} - -// waitDeletes efficiently waits until all keys matching the prefix and no greater -// than the create revision are deleted. -func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { - getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) - for { - resp, err := client.Get(ctx, pfx, getOpts...) - if err != nil { - return nil, err - } - if len(resp.Kvs) == 0 { - return resp.Header, nil - } - lastKey := string(resp.Kvs[0].Key) - if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { - return nil, err - } - } -} diff --git a/client/v3/concurrency/main_test.go b/client/v3/concurrency/main_test.go deleted file mode 100644 index d8819be04d2..00000000000 --- a/client/v3/concurrency/main_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func exampleEndpoints() []string { return nil } - -func forUnitTestsRunInMockedContext(mocking func(), example func()) { - mocking() - // TODO: Call 'example' when mocking() provides realistic mocking of transport. - - // The real testing logic of examples gets executed - // as part of ./tests/integration/clientv3/integration/... -} - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/client/v3/concurrency/mutex.go b/client/v3/concurrency/mutex.go deleted file mode 100644 index 7080f0b08dd..00000000000 --- a/client/v3/concurrency/mutex.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - v3 "go.etcd.io/etcd/client/v3" -) - -// ErrLocked is returned by TryLock when Mutex is already locked by another session. -var ErrLocked = errors.New("mutex: Locked by another session") -var ErrSessionExpired = errors.New("mutex: session is expired") -var ErrLockReleased = errors.New("mutex: lock has already been released") - -// Mutex implements the sync Locker interface with etcd -type Mutex struct { - s *Session - - pfx string - myKey string - myRev int64 - hdr *pb.ResponseHeader -} - -func NewMutex(s *Session, pfx string) *Mutex { - return &Mutex{s, pfx + "/", "", -1, nil} -} - -// TryLock locks the mutex if not already locked by another session. -// If lock is held by another session, return immediately after attempting necessary cleanup -// The ctx argument is used for the sending/receiving Txn RPC. -func (m *Mutex) TryLock(ctx context.Context) error { - resp, err := m.tryAcquire(ctx) - if err != nil { - return err - } - // if no key on prefix / the minimum rev is key, already hold the lock - ownerKey := resp.Responses[1].GetResponseRange().Kvs - if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { - m.hdr = resp.Header - return nil - } - client := m.s.Client() - // Cannot lock, so delete the key - if _, err := client.Delete(ctx, m.myKey); err != nil { - return err - } - m.myKey = "\x00" - m.myRev = -1 - return ErrLocked -} - -// Lock locks the mutex with a cancelable context. If the context is canceled -// while trying to acquire the lock, the mutex tries to clean its stale lock entry. -func (m *Mutex) Lock(ctx context.Context) error { - resp, err := m.tryAcquire(ctx) - if err != nil { - return err - } - // if no key on prefix / the minimum rev is key, already hold the lock - ownerKey := resp.Responses[1].GetResponseRange().Kvs - if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { - m.hdr = resp.Header - return nil - } - client := m.s.Client() - // wait for deletion revisions prior to myKey - // TODO: early termination if the session key is deleted before other session keys with smaller revisions. - _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) - // release lock key if wait failed - if werr != nil { - m.Unlock(client.Ctx()) - return werr - } - - // make sure the session is not expired, and the owner key still exists. - gresp, werr := client.Get(ctx, m.myKey) - if werr != nil { - m.Unlock(client.Ctx()) - return werr - } - - if len(gresp.Kvs) == 0 { // is the session key lost? - return ErrSessionExpired - } - m.hdr = gresp.Header - - return nil -} - -func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) { - s := m.s - client := m.s.Client() - - m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) - cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) - // put self in lock waiters via myKey; oldest waiter holds lock - put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) - // reuse key in case this session already holds the lock - get := v3.OpGet(m.myKey) - // fetch current holder to complete uncontended path with only one RPC - getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) - resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() - if err != nil { - return nil, err - } - m.myRev = resp.Header.Revision - if !resp.Succeeded { - m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision - } - return resp, nil -} - -func (m *Mutex) Unlock(ctx context.Context) error { - if m.myKey == "" || m.myRev <= 0 || m.myKey == "\x00" { - return ErrLockReleased - } - - if !strings.HasPrefix(m.myKey, m.pfx) { - return fmt.Errorf("invalid key %q, it should have prefix %q", m.myKey, m.pfx) - } - - client := m.s.Client() - if _, err := client.Delete(ctx, m.myKey); err != nil { - return err - } - m.myKey = "\x00" - m.myRev = -1 - return nil -} - -func (m *Mutex) IsOwner() v3.Cmp { - return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) -} - -func (m *Mutex) Key() string { return m.myKey } - -// Header is the response header received from etcd on acquiring the lock. -func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } - -type lockerMutex struct{ *Mutex } - -func (lm *lockerMutex) Lock() { - client := lm.s.Client() - if err := lm.Mutex.Lock(client.Ctx()); err != nil { - panic(err) - } -} -func (lm *lockerMutex) Unlock() { - client := lm.s.Client() - if err := lm.Mutex.Unlock(client.Ctx()); err != nil { - panic(err) - } -} - -// NewLocker creates a sync.Locker backed by an etcd mutex. -func NewLocker(s *Session, pfx string) sync.Locker { - return &lockerMutex{NewMutex(s, pfx)} -} diff --git a/client/v3/config.go b/client/v3/config.go deleted file mode 100644 index 4a26714a864..00000000000 --- a/client/v3/config.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "crypto/tls" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc" - - "go.etcd.io/etcd/client/pkg/v3/transport" -) - -type Config struct { - // Endpoints is a list of URLs. - Endpoints []string `json:"endpoints"` - - // AutoSyncInterval is the interval to update endpoints with its latest members. - // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - - // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration `json:"dial-timeout"` - - // DialKeepAliveTime is the time after which client pings the server to see if - // transport is alive. - DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` - - // DialKeepAliveTimeout is the time that the client waits for a response for the - // keep-alive probe. If the response is not received in this time, the connection is closed. - DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` - - // MaxCallSendMsgSize is the client-side request send limit in bytes. - // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). - // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. - // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). - MaxCallSendMsgSize int - - // MaxCallRecvMsgSize is the client-side response receive limit. - // If 0, it defaults to "math.MaxInt32", because range response can - // easily exceed request send limits. - // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. - // ("--max-recv-bytes" flag to etcd). - MaxCallRecvMsgSize int - - // TLS holds the client secure credentials, if any. - TLS *tls.Config - - // Username is a user name for authentication. - Username string `json:"username"` - - // Password is a password for authentication. - Password string `json:"password"` - - // RejectOldCluster when set will refuse to create a client against an outdated cluster. - RejectOldCluster bool `json:"reject-old-cluster"` - - // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). - // For example, pass "grpc.WithBlock()" to block until the underlying connection is up. - // Without this, Dial returns immediately and connecting the server happens in background. - DialOptions []grpc.DialOption - - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context - - // Logger sets client-side logger. - // If nil, fallback to building LogConfig. - Logger *zap.Logger - - // LogConfig configures client-side logger. - // If nil, use the default logger. - // TODO: configure gRPC logger - LogConfig *zap.Config - - // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). - PermitWithoutStream bool `json:"permit-without-stream"` - - // TODO: support custom balancer picker -} - -// ConfigSpec is the configuration from users, which comes from command-line flags, -// environment variables or config file. It is a fully declarative configuration, -// and can be serialized & deserialized to/from JSON. -type ConfigSpec struct { - Endpoints []string `json:"endpoints"` - RequestTimeout time.Duration `json:"request-timeout"` - DialTimeout time.Duration `json:"dial-timeout"` - KeepAliveTime time.Duration `json:"keepalive-time"` - KeepAliveTimeout time.Duration `json:"keepalive-timeout"` - Secure *SecureConfig `json:"secure"` - Auth *AuthConfig `json:"auth"` -} - -type SecureConfig struct { - Cert string `json:"cert"` - Key string `json:"key"` - Cacert string `json:"cacert"` - ServerName string `json:"server-name"` - - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipVerify bool `json:"insecure-skip-tls-verify"` -} - -type AuthConfig struct { - Username string `json:"username"` - Password string `json:"password"` -} - -func (cfg AuthConfig) Empty() bool { - return cfg.Username == "" && cfg.Password == "" -} - -// NewClientConfig creates a Config based on the provided ConfigSpec. -func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) { - tlsCfg, err := newTLSConfig(confSpec.Secure, lg) - if err != nil { - return nil, err - } - - cfg := &Config{ - Endpoints: confSpec.Endpoints, - DialTimeout: confSpec.DialTimeout, - DialKeepAliveTime: confSpec.KeepAliveTime, - DialKeepAliveTimeout: confSpec.KeepAliveTimeout, - TLS: tlsCfg, - } - - if confSpec.Auth != nil { - cfg.Username = confSpec.Auth.Username - cfg.Password = confSpec.Auth.Password - } - - return cfg, nil -} - -func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) { - var ( - tlsCfg *tls.Config - err error - ) - - if scfg == nil { - return nil, nil - } - - if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" { - cfgtls := &transport.TLSInfo{ - CertFile: scfg.Cert, - KeyFile: scfg.Key, - TrustedCAFile: scfg.Cacert, - ServerName: scfg.ServerName, - Logger: lg, - } - if tlsCfg, err = cfgtls.ClientConfig(); err != nil { - return nil, err - } - } - - // If key/cert is not given but user wants secure connection, we - // should still setup an empty tls configuration for gRPC to setup - // secure connection. - if tlsCfg == nil && !scfg.InsecureTransport { - tlsCfg = &tls.Config{} - } - - // If the user wants to skip TLS verification then we should set - // the InsecureSkipVerify flag in tls configuration. - if scfg.InsecureSkipVerify { - if tlsCfg == nil { - tlsCfg = &tls.Config{} - } - tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify - } - - return tlsCfg, nil -} diff --git a/client/v3/config_test.go b/client/v3/config_test.go deleted file mode 100644 index a99c3fd5864..00000000000 --- a/client/v3/config_test.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "crypto/tls" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/transport" -) - -func TestNewClientConfig(t *testing.T) { - cases := []struct { - name string - spec ConfigSpec - expectedConf Config - }{ - { - name: "only has basic info", - spec: ConfigSpec{ - Endpoints: []string{"http://192.168.0.10:2379"}, - DialTimeout: 2 * time.Second, - KeepAliveTime: 3 * time.Second, - KeepAliveTimeout: 5 * time.Second, - }, - expectedConf: Config{ - Endpoints: []string{"http://192.168.0.10:2379"}, - DialTimeout: 2 * time.Second, - DialKeepAliveTime: 3 * time.Second, - DialKeepAliveTimeout: 5 * time.Second, - }, - }, - { - name: "auth enabled", - spec: ConfigSpec{ - Endpoints: []string{"http://192.168.0.12:2379"}, - DialTimeout: 1 * time.Second, - KeepAliveTime: 4 * time.Second, - KeepAliveTimeout: 6 * time.Second, - Auth: &AuthConfig{ - Username: "test", - Password: "changeme", - }, - }, - expectedConf: Config{ - Endpoints: []string{"http://192.168.0.12:2379"}, - DialTimeout: 1 * time.Second, - DialKeepAliveTime: 4 * time.Second, - DialKeepAliveTimeout: 6 * time.Second, - Username: "test", - Password: "changeme", - }, - }, - { - name: "default secure transport", - spec: ConfigSpec{ - Endpoints: []string{"http://192.168.0.10:2379"}, - DialTimeout: 2 * time.Second, - KeepAliveTime: 3 * time.Second, - KeepAliveTimeout: 5 * time.Second, - Secure: &SecureConfig{ - InsecureTransport: false, - }, - }, - expectedConf: Config{ - Endpoints: []string{"http://192.168.0.10:2379"}, - DialTimeout: 2 * time.Second, - DialKeepAliveTime: 3 * time.Second, - DialKeepAliveTimeout: 5 * time.Second, - TLS: &tls.Config{}, - }, - }, - { - name: "default secure transport and skip TLS verification", - spec: ConfigSpec{ - Endpoints: []string{"http://192.168.0.13:2379"}, - DialTimeout: 1 * time.Second, - KeepAliveTime: 3 * time.Second, - KeepAliveTimeout: 5 * time.Second, - Secure: &SecureConfig{ - InsecureTransport: false, - InsecureSkipVerify: true, - }, - }, - expectedConf: Config{ - Endpoints: []string{"http://192.168.0.13:2379"}, - DialTimeout: 1 * time.Second, - DialKeepAliveTime: 3 * time.Second, - DialKeepAliveTimeout: 5 * time.Second, - TLS: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - }, - { - name: "insecure transport and skip TLS verification", - spec: ConfigSpec{ - Endpoints: []string{"http://192.168.0.13:2379"}, - DialTimeout: 1 * time.Second, - KeepAliveTime: 3 * time.Second, - KeepAliveTimeout: 5 * time.Second, - Secure: &SecureConfig{ - InsecureTransport: true, - InsecureSkipVerify: true, - }, - }, - expectedConf: Config{ - Endpoints: []string{"http://192.168.0.13:2379"}, - DialTimeout: 1 * time.Second, - DialKeepAliveTime: 3 * time.Second, - DialKeepAliveTimeout: 5 * time.Second, - TLS: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel) - - cfg, err := NewClientConfig(&tc.spec, lg) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - assert.Equal(t, tc.expectedConf, *cfg) - }) - } -} - -func TestNewClientConfigWithSecureCfg(t *testing.T) { - tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - scfg := &SecureConfig{ - Cert: tls.CertFile, - Key: tls.KeyFile, - Cacert: tls.TrustedCAFile, - } - - cfg, err := NewClientConfig(&ConfigSpec{ - Endpoints: []string{"http://192.168.0.13:2379"}, - DialTimeout: 2 * time.Second, - KeepAliveTime: 3 * time.Second, - KeepAliveTimeout: 5 * time.Second, - Secure: scfg, - }, nil) - if err != nil || cfg == nil || cfg.TLS == nil { - t.Fatalf("Unexpected result client config: %v", err) - } -} diff --git a/client/v3/credentials/credentials_test.go b/client/v3/credentials/credentials_test.go deleted file mode 100644 index 5111a2ad5ec..00000000000 --- a/client/v3/credentials/credentials_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package credentials - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" -) - -func TestUpdateAuthToken(t *testing.T) { - bundle := NewBundle(Config{}) - ctx := context.TODO() - - metadataBeforeUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx) - assert.Empty(t, metadataBeforeUpdate) - - bundle.UpdateAuthToken("abcdefg") - - metadataAfterUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx) - assert.Equal(t, metadataAfterUpdate[rpctypes.TokenFieldNameGRPC], "abcdefg") -} diff --git a/client/v3/ctx_test.go b/client/v3/ctx_test.go deleted file mode 100644 index 097b6a3e6b1..00000000000 --- a/client/v3/ctx_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "reflect" - "testing" - - "google.golang.org/grpc/metadata" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" -) - -func TestMetadataWithRequireLeader(t *testing.T) { - ctx := context.TODO() - _, ok := metadata.FromOutgoingContext(ctx) - if ok { - t.Fatal("expected no outgoing metadata ctx key") - } - - // add a conflicting key with some other value - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, "invalid") - // add a key, and expect not be overwritten - md.Set("hello", "1", "2") - ctx = metadata.NewOutgoingContext(ctx, md) - - // expect overwrites but still keep other keys - ctx = WithRequireLeader(ctx) - md, ok = metadata.FromOutgoingContext(ctx) - if !ok { - t.Fatal("expected outgoing metadata ctx key") - } - if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) { - t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss) - } - if ss := md.Get("hello"); !reflect.DeepEqual(ss, []string{"1", "2"}) { - t.Fatalf("unexpected metadata for 'hello' %v", ss) - } -} - -func TestMetadataWithClientAPIVersion(t *testing.T) { - ctx := withVersion(WithRequireLeader(context.TODO())) - - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - t.Fatal("expected outgoing metadata ctx key") - } - if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) { - t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss) - } - if ss := md.Get(rpctypes.MetadataClientAPIVersionKey); !reflect.DeepEqual(ss, []string{version.APIVersion}) { - t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataClientAPIVersionKey, ss) - } -} diff --git a/client/v3/doc.go b/client/v3/doc.go deleted file mode 100644 index bd820d3d79e..00000000000 --- a/client/v3/doc.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3 implements the official Go etcd client for v3. -// -// Create client using `clientv3.New`: -// -// // expect dial time-out on ipv4 blackhole -// _, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"http://254.0.0.1:12345"}, -// DialTimeout: 2 * time.Second, -// }) -// -// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 -// if err == context.DeadlineExceeded { -// // handle errors -// } -// -// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 -// if err == grpc.ErrClientConnTimeout { -// // handle errors -// } -// -// cli, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, -// DialTimeout: 5 * time.Second, -// }) -// if err != nil { -// // handle error! -// } -// defer cli.Close() -// -// Make sure to close the client after using it. If the client is not closed, the -// connection will have leaky goroutines. -// -// To specify a client request timeout, wrap the context with context.WithTimeout: -// -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// defer cancel() -// resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// if err != nil { -// // handle error! -// } -// // use the response -// -// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. -// Clients are safe for concurrent use by multiple goroutines. -// -// etcd client returns 2 types of errors: -// -// 1. context error: canceled or deadline exceeded. -// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go -// -// Here is the example code to handle client errors: -// -// resp, err := kvc.Put(ctx, "", "") -// if err != nil { -// if err == context.Canceled { -// // ctx is canceled by another routine -// } else if err == context.DeadlineExceeded { -// // ctx is attached with a deadline and it exceeded -// } else if err == rpctypes.ErrEmptyKey { -// // client-side error: key is not provided -// } else if ev, ok := status.FromError(err); ok { -// code := ev.Code() -// if code == codes.DeadlineExceeded { -// // server-side context might have timed-out first (due to clock skew) -// // while original client-side context is not timed-out yet -// } -// } else { -// // bad cluster endpoints, which are not etcd servers -// } -// } -// -// go func() { cli.Close() }() -// _, err := kvc.Get(ctx, "a") -// if err != nil { -// // with etcd clientv3 <= v3.3 -// if err == context.Canceled { -// // grpc balancer calls 'Get' with an inflight client.Close -// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x -// // grpc balancer calls 'Get' after client.Close. -// } -// // with etcd clientv3 >= v3.4 -// if clientv3.IsConnCanceled(err) { -// // gRPC client connection is closed -// } -// } -// -// The grpc load balancer is registered statically and is shared across etcd clients. -// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment -// variable. E.g. "ETCD_CLIENT_DEBUG=1". -package clientv3 diff --git a/client/v3/example_auth_test.go b/client/v3/example_auth_test.go deleted file mode 120000 index 7a25cc2a033..00000000000 --- a/client/v3/example_auth_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_auth_test.go \ No newline at end of file diff --git a/client/v3/example_cluster_test.go b/client/v3/example_cluster_test.go deleted file mode 120000 index 302451f8113..00000000000 --- a/client/v3/example_cluster_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_cluster_test.go \ No newline at end of file diff --git a/client/v3/example_kv_test.go b/client/v3/example_kv_test.go deleted file mode 120000 index 0b3bd875e92..00000000000 --- a/client/v3/example_kv_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_kv_test.go \ No newline at end of file diff --git a/client/v3/example_lease_test.go b/client/v3/example_lease_test.go deleted file mode 120000 index d1cf744bb6d..00000000000 --- a/client/v3/example_lease_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_lease_test.go \ No newline at end of file diff --git a/client/v3/example_maintenance_test.go b/client/v3/example_maintenance_test.go deleted file mode 120000 index d8bcb642a24..00000000000 --- a/client/v3/example_maintenance_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_maintenance_test.go \ No newline at end of file diff --git a/client/v3/example_metrics_test.go b/client/v3/example_metrics_test.go deleted file mode 120000 index a363c3c4fe0..00000000000 --- a/client/v3/example_metrics_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_metrics_test.go \ No newline at end of file diff --git a/client/v3/example_test.go b/client/v3/example_test.go deleted file mode 120000 index ddacab0d6c4..00000000000 --- a/client/v3/example_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_test.go \ No newline at end of file diff --git a/client/v3/example_watch_test.go b/client/v3/example_watch_test.go deleted file mode 120000 index fb748bed55d..00000000000 --- a/client/v3/example_watch_test.go +++ /dev/null @@ -1 +0,0 @@ -../../tests/integration/clientv3/examples/example_watch_test.go \ No newline at end of file diff --git a/client/v3/experimental/recipes/client.go b/client/v3/experimental/recipes/client.go deleted file mode 100644 index 6dd5b13a6a5..00000000000 --- a/client/v3/experimental/recipes/client.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipe - -import ( - "context" - "errors" - - spb "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" -) - -var ( - ErrKeyExists = errors.New("key already exists") - ErrWaitMismatch = errors.New("unexpected wait result") - ErrTooManyClients = errors.New("too many clients") - ErrNoWatcher = errors.New("no watcher channel") -) - -// deleteRevKey deletes a key by revision, returning false if key is missing -func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) { - cmp := v3.Compare(v3.ModRevision(key), "=", rev) - req := v3.OpDelete(key) - txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() - if err != nil { - return false, err - } else if !txnresp.Succeeded { - return false, nil - } - return true, nil -} - -func claimFirstKey(kv v3.KV, kvs []*spb.KeyValue) (*spb.KeyValue, error) { - for _, k := range kvs { - ok, err := deleteRevKey(kv, string(k.Key), k.ModRevision) - if err != nil { - return nil, err - } else if ok { - return k, nil - } - } - return nil, nil -} diff --git a/client/v3/experimental/recipes/double_barrier.go b/client/v3/experimental/recipes/double_barrier.go deleted file mode 100644 index cc2416db23b..00000000000 --- a/client/v3/experimental/recipes/double_barrier.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipe - -import ( - "context" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -// DoubleBarrier blocks processes on Enter until an expected count enters, then -// blocks again on Leave until all processes have left. -type DoubleBarrier struct { - s *concurrency.Session - ctx context.Context - - key string // key for the collective barrier - count int - myKey *EphemeralKV // current key for this process on the barrier -} - -func NewDoubleBarrier(s *concurrency.Session, key string, count int) *DoubleBarrier { - return &DoubleBarrier{ - s: s, - ctx: context.TODO(), - key: key, - count: count, - } -} - -// Enter waits for "count" processes to enter the barrier then returns -func (b *DoubleBarrier) Enter() error { - client := b.s.Client() - - // Check the entered clients before creating the UniqueEphemeralKey, - // fail the request if there are already too many clients. - if resp1, err := b.enteredClients(client); err != nil { - return err - } else if len(resp1.Kvs) >= b.count { - return ErrTooManyClients - } - - ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters") - if err != nil { - return err - } - b.myKey = ek - - // Check the entered clients after creating the UniqueEphemeralKey - resp2, err := b.enteredClients(client) - if err != nil { - return err - } - if len(resp2.Kvs) >= b.count { - lastWaiter := resp2.Kvs[b.count-1] - if ek.rev > lastWaiter.CreateRevision { - // delete itself now, otherwise other processes may need to wait - // until these keys are automatically deleted when the related - // lease expires. - if err = b.myKey.Delete(); err != nil { - // Nothing to do here. We have to wait for the key to be - // deleted when the lease expires. - } - return ErrTooManyClients - } - - if ek.rev == lastWaiter.CreateRevision { - // TODO(ahrtr): we might need to compare ek.key and - // string(lastWaiter.Key), they should be equal. - // unblock all other waiters - _, err = client.Put(b.ctx, b.key+"/ready", "") - return err - } - } - - _, err = WaitEvents( - client, - b.key+"/ready", - ek.Revision(), - []mvccpb.Event_EventType{mvccpb.PUT}) - return err -} - -// enteredClients gets all the entered clients, which are ordered by the -// createRevision in ascending order. -func (b *DoubleBarrier) enteredClients(cli *clientv3.Client) (*clientv3.GetResponse, error) { - resp, err := cli.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix(), - clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend)) - if err != nil { - return nil, err - } - - return resp, nil -} - -// Leave waits for "count" processes to leave the barrier then returns -func (b *DoubleBarrier) Leave() error { - client := b.s.Client() - resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix()) - if err != nil { - return err - } - if len(resp.Kvs) == 0 { - return nil - } - - lowest, highest := resp.Kvs[0], resp.Kvs[0] - for _, k := range resp.Kvs { - if k.ModRevision < lowest.ModRevision { - lowest = k - } - if k.ModRevision > highest.ModRevision { - highest = k - } - } - isLowest := string(lowest.Key) == b.myKey.Key() - - if len(resp.Kvs) == 1 && isLowest { - // this is the only node in the barrier; finish up - if _, err = client.Delete(b.ctx, b.key+"/ready"); err != nil { - return err - } - return b.myKey.Delete() - } - - // this ensures that if a process fails, the ephemeral lease will be - // revoked, its barrier key is removed, and the barrier can resume - - // lowest process in node => wait on highest process - if isLowest { - _, err = WaitEvents( - client, - string(highest.Key), - highest.ModRevision, - []mvccpb.Event_EventType{mvccpb.DELETE}) - if err != nil { - return err - } - return b.Leave() - } - - // delete self and wait on lowest process - if err = b.myKey.Delete(); err != nil { - return err - } - - key := string(lowest.Key) - _, err = WaitEvents( - client, - key, - lowest.ModRevision, - []mvccpb.Event_EventType{mvccpb.DELETE}) - if err != nil { - return err - } - return b.Leave() -} diff --git a/client/v3/experimental/recipes/key.go b/client/v3/experimental/recipes/key.go deleted file mode 100644 index 10362c18fbe..00000000000 --- a/client/v3/experimental/recipes/key.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipe - -import ( - "context" - "fmt" - "strings" - "time" - - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -// RemoteKV is a key/revision pair created by the client and stored on etcd -type RemoteKV struct { - kv v3.KV - key string - rev int64 - val string -} - -func newKey(kv v3.KV, key string, leaseID v3.LeaseID) (*RemoteKV, error) { - return newKV(kv, key, "", leaseID) -} - -func newKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (*RemoteKV, error) { - rev, err := putNewKV(kv, key, val, leaseID) - if err != nil { - return nil, err - } - return &RemoteKV{kv, key, rev, val}, nil -} - -func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) { - for { - newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) - rev, err := putNewKV(kv, newKey, val, v3.NoLease) - if err == nil { - return &RemoteKV{kv, newKey, rev, val}, nil - } - if err != ErrKeyExists { - return nil, err - } - } -} - -// putNewKV attempts to create the given key, only succeeding if the key did -// not yet exist. -func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) { - cmp := v3.Compare(v3.Version(key), "=", 0) - req := v3.OpPut(key, val, v3.WithLease(leaseID)) - txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() - if err != nil { - return 0, err - } - if !txnresp.Succeeded { - return 0, ErrKeyExists - } - return txnresp.Header.Revision, nil -} - -// newSequentialKV allocates a new sequential key /nnnnn with a given -// prefix and value. Note: a bookkeeping node __ is also allocated. -func newSequentialKV(kv v3.KV, prefix, val string) (*RemoteKV, error) { - resp, err := kv.Get(context.TODO(), prefix, v3.WithLastKey()...) - if err != nil { - return nil, err - } - - // add 1 to last key, if any - newSeqNum := 0 - if len(resp.Kvs) != 0 { - fields := strings.Split(string(resp.Kvs[0].Key), "/") - _, serr := fmt.Sscanf(fields[len(fields)-1], "%d", &newSeqNum) - if serr != nil { - return nil, serr - } - newSeqNum++ - } - newKey := fmt.Sprintf("%s/%016d", prefix, newSeqNum) - - // base prefix key must be current (i.e., <=) with the server update; - // the base key is important to avoid the following: - // N1: LastKey() == 1, start txn. - // N2: new Key 2, new Key 3, Delete Key 2 - // N1: txn succeeds allocating key 2 when it shouldn't - baseKey := "__" + prefix - - // current revision might contain modification so +1 - cmp := v3.Compare(v3.ModRevision(baseKey), "<", resp.Header.Revision+1) - reqPrefix := v3.OpPut(baseKey, "") - reqnewKey := v3.OpPut(newKey, val) - - txn := kv.Txn(context.TODO()) - txnresp, err := txn.If(cmp).Then(reqPrefix, reqnewKey).Commit() - if err != nil { - return nil, err - } - if !txnresp.Succeeded { - return newSequentialKV(kv, prefix, val) - } - return &RemoteKV{kv, newKey, txnresp.Header.Revision, val}, nil -} - -func (rk *RemoteKV) Key() string { return rk.key } -func (rk *RemoteKV) Revision() int64 { return rk.rev } -func (rk *RemoteKV) Value() string { return rk.val } - -func (rk *RemoteKV) Delete() error { - if rk.kv == nil { - return nil - } - _, err := rk.kv.Delete(context.TODO(), rk.key) - rk.kv = nil - return err -} - -func (rk *RemoteKV) Put(val string) error { - _, err := rk.kv.Put(context.TODO(), rk.key, val) - return err -} - -// EphemeralKV is a new key associated with a session lease -type EphemeralKV struct{ RemoteKV } - -// newEphemeralKV creates a new key/value pair associated with a session lease -func newEphemeralKV(s *concurrency.Session, key, val string) (*EphemeralKV, error) { - k, err := newKV(s.Client(), key, val, s.Lease()) - if err != nil { - return nil, err - } - return &EphemeralKV{*k}, nil -} - -// newUniqueEphemeralKey creates a new unique valueless key associated with a session lease -func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) { - return newUniqueEphemeralKV(s, prefix, "") -} - -// newUniqueEphemeralKV creates a new unique key/value pair associated with a session lease -func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) { - for { - newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) - ek, err = newEphemeralKV(s, newKey, val) - if err == nil || err != ErrKeyExists { - break - } - } - return ek, err -} diff --git a/client/v3/experimental/recipes/queue.go b/client/v3/experimental/recipes/queue.go deleted file mode 100644 index 9c6b0378e11..00000000000 --- a/client/v3/experimental/recipes/queue.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipe - -import ( - "context" - - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" -) - -// Queue implements a multi-reader, multi-writer distributed queue. -type Queue struct { - client *v3.Client - ctx context.Context - - keyPrefix string -} - -func NewQueue(client *v3.Client, keyPrefix string) *Queue { - return &Queue{client, context.TODO(), keyPrefix} -} - -func (q *Queue) Enqueue(val string) error { - _, err := newUniqueKV(q.client, q.keyPrefix, val) - return err -} - -// Dequeue returns Enqueue()'d elements in FIFO order. If the -// queue is empty, Dequeue blocks until elements are available. -func (q *Queue) Dequeue() (string, error) { - // TODO: fewer round trips by fetching more than one key - resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...) - if err != nil { - return "", err - } - - kv, err := claimFirstKey(q.client, resp.Kvs) - if err != nil { - return "", err - } else if kv != nil { - return string(kv.Value), nil - } else if resp.More { - // missed some items, retry to read in more - return q.Dequeue() - } - - // nothing yet; wait on elements - ev, err := WaitPrefixEvents( - q.client, - q.keyPrefix, - resp.Header.Revision, - []mvccpb.Event_EventType{mvccpb.PUT}) - if err != nil { - return "", err - } - - ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision) - if err != nil { - return "", err - } else if !ok { - return q.Dequeue() - } - return string(ev.Kv.Value), err -} diff --git a/client/v3/experimental/recipes/watch.go b/client/v3/experimental/recipes/watch.go deleted file mode 100644 index 92e7bc648f4..00000000000 --- a/client/v3/experimental/recipes/watch.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipe - -import ( - "context" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" -) - -// WaitEvents waits on a key until it observes the given events and returns the final one. -func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - wc := c.Watch(ctx, key, clientv3.WithRev(rev)) - if wc == nil { - return nil, ErrNoWatcher - } - return waitEvents(wc, evs), nil -} - -func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev)) - if wc == nil { - return nil, ErrNoWatcher - } - return waitEvents(wc, evs), nil -} - -func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event { - i := 0 - for wresp := range wc { - for _, ev := range wresp.Events { - if ev.Type == evs[i] { - i++ - if i == len(evs) { - return ev - } - } - } - } - return nil -} diff --git a/client/v3/go.mod b/client/v3/go.mod deleted file mode 100644 index 14ecdc8f239..00000000000 --- a/client/v3/go.mod +++ /dev/null @@ -1,54 +0,0 @@ -module go.etcd.io/etcd/client/v3 - -go 1.19 - -require ( - github.com/dustin/go-humanize v1.0.1 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/prometheus/client_golang v1.14.0 - github.com/stretchr/testify v1.8.1 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.uber.org/zap v1.24.0 - google.golang.org/grpc v1.51.0 - sigs.k8s.io/yaml v1.3.0 -) - -require ( - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace ( - go.etcd.io/etcd/api/v3 => ../../api - go.etcd.io/etcd/client/pkg/v3 => ../pkg -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/pkg/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY -) diff --git a/client/v3/go.sum b/client/v3/go.sum deleted file mode 100644 index 2ac2610010b..00000000000 --- a/client/v3/go.sum +++ /dev/null @@ -1,554 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/client/v3/internal/endpoint/endpoint.go b/client/v3/internal/endpoint/endpoint.go deleted file mode 100644 index f6674235cd9..00000000000 --- a/client/v3/internal/endpoint/endpoint.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package endpoint - -import ( - "fmt" - "net" - "net/url" - "path" - "strings" -) - -type CredsRequirement int - -const ( - // CREDS_REQUIRE - Credentials/certificate required for thi type of connection. - CREDS_REQUIRE CredsRequirement = iota - // CREDS_DROP - Credentials/certificate not needed and should get ignored. - CREDS_DROP - // CREDS_OPTIONAL - Credentials/certificate might be used if supplied - CREDS_OPTIONAL -) - -func extractHostFromHostPort(ep string) string { - host, _, err := net.SplitHostPort(ep) - if err != nil { - return ep - } - return host -} - -func extractHostFromPath(pathStr string) string { - return extractHostFromHostPort(path.Base(pathStr)) -} - -// mustSplit2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func mustSplit2(s, sep string) (string, string) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep)) - } - return spl[0], spl[1] -} - -func schemeToCredsRequirement(schema string) CredsRequirement { - switch schema { - case "https", "unixs": - return CREDS_REQUIRE - case "http": - return CREDS_DROP - case "unix": - // Preserving previous behavior from: - // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212 - // that likely was a bug due to missing 'fallthrough'. - // At the same time it seems legit to let the users decide whether they - // want credential control or not (and 'unixs' schema is not a standard thing). - return CREDS_OPTIONAL - case "": - return CREDS_OPTIONAL - default: - return CREDS_OPTIONAL - } -} - -// This function translates endpoints names supported by etcd server into -// endpoints as supported by grpc with additional information -// (server_name for cert validation, requireCreds - whether certs are needed). -// The main differences: -// - etcd supports unixs & https names as opposed to unix & http to -// distinguish need to configure certificates. -// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. -// - etcd supports unix(s)://local-file naming schema -// (as opposed to unix:local-file canonical name used by grpc for current dir files). -// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) -// is considered serverName - to allow local testing of cert-protected communication. -// -// See more: -// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 -// - https://golang.org/pkg/net/#Dial -// - https://github.com/grpc/grpc/blob/master/doc/naming.md -func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) { - if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") { - if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { - // absolute path case - schema, absolutePath := mustSplit2(ep, "://") - return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema) - } - if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { - // legacy etcd local path - schema, localPath := mustSplit2(ep, "://") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) - } - schema, localPath := mustSplit2(ep, ":") - return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) - } - - if strings.Contains(ep, "://") { - url, err := url.Parse(ep) - if err != nil { - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL - } - if url.Scheme == "http" || url.Scheme == "https" { - return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme) - } - return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme) - } - // Handles plain addresses like 10.0.0.44:437. - return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL -} - -// RequiresCredentials returns whether given endpoint requires -// credentials/certificates for connection. -func RequiresCredentials(ep string) CredsRequirement { - _, _, requireCreds := translateEndpoint(ep) - return requireCreds -} - -// Interpret endpoint parses an endpoint of the form -// (http|https)://*|(unix|unixs)://) -// and returns low-level address (supported by 'net') to connect to, -// and a server name used for x509 certificate matching. -func Interpret(ep string) (address string, serverName string) { - addr, serverName, _ := translateEndpoint(ep) - return addr, serverName -} diff --git a/client/v3/internal/endpoint/endpoint_test.go b/client/v3/internal/endpoint/endpoint_test.go deleted file mode 100644 index bc6cd71399c..00000000000 --- a/client/v3/internal/endpoint/endpoint_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package endpoint - -import ( - "testing" -) - -func Test_interpret(t *testing.T) { - tests := []struct { - endpoint string - wantAddress string - wantServerName string - wantRequiresCreds CredsRequirement - }{ - {"127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_OPTIONAL}, - {"localhost", "localhost", "localhost", CREDS_OPTIONAL}, - {"localhost:8080", "localhost:8080", "localhost", CREDS_OPTIONAL}, - - {"unix:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL}, - {"unix:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL}, - - {"unix://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL}, - {"unix://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL}, - - {"unixs:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE}, - {"unixs:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE}, - {"unixs://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE}, - {"unixs://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE}, - - {"http://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_DROP}, - {"http://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_DROP}, - {"https://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_REQUIRE}, - {"https://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE}, - {"https://localhost:20000", "localhost:20000", "localhost", CREDS_REQUIRE}, - - {"unix:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_OPTIONAL}, - {"unixs:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_REQUIRE}, - {"unix:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_OPTIONAL}, - {"unixs:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_REQUIRE}, - {"etcd.io", "etcd.io", "etcd.io", CREDS_OPTIONAL}, - {"http://etcd.io/abc", "etcd.io", "etcd.io", CREDS_DROP}, - {"dns://something-other", "dns://something-other", "something-other", CREDS_OPTIONAL}, - - {"http://[2001:db8:1f70::999:de8:7648:6e8]:100/", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_DROP}, - {"[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_OPTIONAL}, - {"unix:unexpected-file_name#123$456", "unix:unexpected-file_name#123$456", "unexpected-file_name#123$456", CREDS_OPTIONAL}, - } - for _, tt := range tests { - t.Run("Interpret_"+tt.endpoint, func(t *testing.T) { - gotAddress, gotServerName := Interpret(tt.endpoint) - if gotAddress != tt.wantAddress { - t.Errorf("Interpret() gotAddress = %v, want %v", gotAddress, tt.wantAddress) - } - if gotServerName != tt.wantServerName { - t.Errorf("Interpret() gotServerName = %v, want %v", gotServerName, tt.wantServerName) - } - }) - t.Run("RequiresCredentials_"+tt.endpoint, func(t *testing.T) { - requiresCreds := RequiresCredentials(tt.endpoint) - if requiresCreds != tt.wantRequiresCreds { - t.Errorf("RequiresCredentials() got = %v, want %v", requiresCreds, tt.wantRequiresCreds) - } - }) - } -} - -func Test_extractHostFromHostPort(t *testing.T) { - tests := []struct { - ep string - want string - }{ - {ep: "localhost", want: "localhost"}, - {ep: "localhost:8080", want: "localhost"}, - {ep: "192.158.7.14:8080", want: "192.158.7.14"}, - {ep: "192.158.7.14:8080", want: "192.158.7.14"}, - {ep: "[2001:db8:1f70::999:de8:7648:6e8]", want: "[2001:db8:1f70::999:de8:7648:6e8]"}, - {ep: "[2001:db8:1f70::999:de8:7648:6e8]:100", want: "2001:db8:1f70::999:de8:7648:6e8"}, - } - for _, tt := range tests { - t.Run(tt.ep, func(t *testing.T) { - if got := extractHostFromHostPort(tt.ep); got != tt.want { - t.Errorf("extractHostFromHostPort() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/client/v3/internal/resolver/resolver.go b/client/v3/internal/resolver/resolver.go deleted file mode 100644 index b5c9de00786..00000000000 --- a/client/v3/internal/resolver/resolver.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resolver - -import ( - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" - - "go.etcd.io/etcd/client/v3/internal/endpoint" -) - -const ( - Schema = "etcd-endpoints" -) - -// EtcdManualResolver is a Resolver (and resolver.Builder) that can be updated -// using SetEndpoints. -type EtcdManualResolver struct { - *manual.Resolver - endpoints []string - serviceConfig *serviceconfig.ParseResult -} - -func New(endpoints ...string) *EtcdManualResolver { - r := manual.NewBuilderWithScheme(Schema) - return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil} -} - -// Build returns itself for Resolver, because it's both a builder and a resolver. -func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`) - if r.serviceConfig.Err != nil { - return nil, r.serviceConfig.Err - } - res, err := r.Resolver.Build(target, cc, opts) - if err != nil { - return nil, err - } - // Populates endpoints stored in r into ClientConn (cc). - r.updateState() - return res, nil -} - -func (r *EtcdManualResolver) SetEndpoints(endpoints []string) { - r.endpoints = endpoints - r.updateState() -} - -func (r EtcdManualResolver) updateState() { - if r.CC != nil { - addresses := make([]resolver.Address, len(r.endpoints)) - for i, ep := range r.endpoints { - addr, serverName := endpoint.Interpret(ep) - addresses[i] = resolver.Address{Addr: addr, ServerName: serverName} - } - state := resolver.State{ - Addresses: addresses, - ServiceConfig: r.serviceConfig, - } - r.UpdateState(state) - } -} diff --git a/client/v3/kv.go b/client/v3/kv.go deleted file mode 100644 index f50f9595ce1..00000000000 --- a/client/v3/kv.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - - "google.golang.org/grpc" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" -) - -type ( - CompactResponse pb.CompactionResponse - PutResponse pb.PutResponse - GetResponse pb.RangeResponse - DeleteResponse pb.DeleteRangeResponse - TxnResponse pb.TxnResponse -) - -type KV interface { - // Put puts a key-value pair into etcd. - // Note that key,value can be plain bytes array and string is - // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte{0x10, 0x20}). - Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) - - // Get retrieves keys. - // By default, Get will return the value for "key", if any. - // When passed WithRange(end), Get will return the keys in the range [key, end). - // When passed WithFromKey(), Get returns keys greater than or equal to key. - // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; - // if the required revision is compacted, the request will fail with ErrCompacted . - // When passed WithLimit(limit), the number of returned keys is bounded by limit. - // When passed WithSort(), the keys will be sorted. - Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) - - // Delete deletes a key, or optionally using WithRange(end), [key, end). - Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) - - // Compact compacts etcd KV history before the given rev. - Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - - // Do applies a single Op on KV without a transaction. - // Do is useful when creating arbitrary operations to be issued at a - // later time; the user can range over the operations, calling Do to - // execute them. Get/Put/Delete, on the other hand, are best suited - // for when the operation should be issued at the time of declaration. - Do(ctx context.Context, op Op) (OpResponse, error) - - // Txn creates a transaction. - Txn(ctx context.Context) Txn -} - -type OpResponse struct { - put *PutResponse - get *GetResponse - del *DeleteResponse - txn *TxnResponse -} - -func (op OpResponse) Put() *PutResponse { return op.put } -func (op OpResponse) Get() *GetResponse { return op.get } -func (op OpResponse) Del() *DeleteResponse { return op.del } -func (op OpResponse) Txn() *TxnResponse { return op.txn } - -func (resp *PutResponse) OpResponse() OpResponse { - return OpResponse{put: resp} -} -func (resp *GetResponse) OpResponse() OpResponse { - return OpResponse{get: resp} -} -func (resp *DeleteResponse) OpResponse() OpResponse { - return OpResponse{del: resp} -} -func (resp *TxnResponse) OpResponse() OpResponse { - return OpResponse{txn: resp} -} - -type kv struct { - remote pb.KVClient - callOpts []grpc.CallOption -} - -func NewKV(c *Client) KV { - api := &kv{remote: RetryKVClient(c)} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { - api := &kv{remote: remote} - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { - r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) -} - -func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { - r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) -} - -func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { - r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) -} - -func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*CompactResponse)(resp), err -} - -func (kv *kv) Txn(ctx context.Context) Txn { - return &txn{ - kv: kv, - ctx: ctx, - callOpts: kv.callOpts, - } -} - -func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - var err error - switch op.t { - case tRange: - if op.IsSortOptionValid() { - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil - } - } else { - err = rpctypes.ErrInvalidSortOption - } - case tPut: - var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - resp, err = kv.remote.Put(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{put: (*PutResponse)(resp)}, nil - } - case tDeleteRange: - var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) - if err == nil { - return OpResponse{del: (*DeleteResponse)(resp)}, nil - } - case tTxn: - var resp *pb.TxnResponse - resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) - if err == nil { - return OpResponse{txn: (*TxnResponse)(resp)}, nil - } - default: - panic("Unknown op") - } - return OpResponse{}, toErr(ctx, err) -} diff --git a/client/v3/lease.go b/client/v3/lease.go deleted file mode 100644 index 60d7dd18e83..00000000000 --- a/client/v3/lease.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "sync" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -type ( - LeaseRevokeResponse pb.LeaseRevokeResponse - LeaseID int64 -) - -// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. -type LeaseGrantResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 - Error string -} - -// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. -type LeaseKeepAliveResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 -} - -// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. -type LeaseTimeToLiveResponse struct { - *pb.ResponseHeader - ID LeaseID `json:"id"` - - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. - TTL int64 `json:"ttl"` - - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `json:"granted-ttl"` - - // Keys is the list of keys attached to this lease. - Keys [][]byte `json:"keys"` -} - -// LeaseStatus represents a lease status. -type LeaseStatus struct { - ID LeaseID `json:"id"` - // TODO: TTL int64 -} - -// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. -type LeaseLeasesResponse struct { - *pb.ResponseHeader - Leases []LeaseStatus `json:"leases"` -} - -const ( - // defaultTTL is the assumed lease TTL used for the first keepalive - // deadline before the actual TTL is known to the client. - defaultTTL = 5 * time.Second - // NoLease is a lease ID for the absence of a lease. - NoLease LeaseID = 0 - - // retryConnWait is how long to wait before retrying request due to an error - retryConnWait = 500 * time.Millisecond -) - -// LeaseResponseChSize is the size of buffer to store unsent lease responses. -// WARNING: DO NOT UPDATE. -// Only for testing purposes. -var LeaseResponseChSize = 16 - -// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. -// -// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. -type ErrKeepAliveHalted struct { - Reason error -} - -func (e ErrKeepAliveHalted) Error() string { - s := "etcdclient: leases keep alive halted" - if e.Reason != nil { - s += ": " + e.Reason.Error() - } - return s -} - -type Lease interface { - // Grant creates a new lease. - Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) - - // Revoke revokes the given lease. - Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) - - // TimeToLive retrieves the lease information of the given lease ID. - TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) - - // Leases retrieves all leases. - Leases(ctx context.Context) (*LeaseLeasesResponse, error) - - // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted - // to the channel are not consumed promptly the channel may become full. When full, the lease - // client will continue sending keep alive requests to the etcd server, but will drop responses - // until there is capacity on the channel to send more responses. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or - // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error - // containing the error reason. - // - // The returned "LeaseKeepAliveResponse" channel closes if underlying keep - // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. - // - // TODO(v4.0): post errors to last keep alive message before closing - // (see https://github.com/etcd-io/etcd/pull/7866) - KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - - // KeepAliveOnce renews the lease once. The response corresponds to the - // first message from calling KeepAlive. If the response has a recoverable - // error, KeepAliveOnce will retry the RPC with a new keep alive message. - // - // In most of the cases, Keepalive should be used instead of KeepAliveOnce. - KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) - - // Close releases all resources Lease keeps for efficient communication - // with the etcd server. - Close() error -} - -type lessor struct { - mu sync.Mutex // guards all fields - - // donec is closed and loopErr is set when recvKeepAliveLoop stops - donec chan struct{} - loopErr error - - remote pb.LeaseClient - - stream pb.Lease_LeaseKeepAliveClient - streamCancel context.CancelFunc - - stopCtx context.Context - stopCancel context.CancelFunc - - keepAlives map[LeaseID]*keepAlive - - // firstKeepAliveTimeout is the timeout for the first keepalive request - // before the actual TTL is known to the lease client - firstKeepAliveTimeout time.Duration - - // firstKeepAliveOnce ensures stream starts after first KeepAlive call. - firstKeepAliveOnce sync.Once - - callOpts []grpc.CallOption - - lg *zap.Logger -} - -// keepAlive multiplexes a keepalive for a lease over multiple channels -type keepAlive struct { - chs []chan<- *LeaseKeepAliveResponse - ctxs []context.Context - // deadline is the time the keep alive channels close if no response - deadline time.Time - // nextKeepAlive is when to send the next keep alive message - nextKeepAlive time.Time - // donec is closed on lease revoke, expiration, or cancel. - donec chan struct{} -} - -func NewLease(c *Client) Lease { - return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) -} - -func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { - l := &lessor{ - donec: make(chan struct{}), - keepAlives: make(map[LeaseID]*keepAlive), - remote: remote, - firstKeepAliveTimeout: keepAliveTimeout, - lg: c.lg, - } - if l.firstKeepAliveTimeout == time.Second { - l.firstKeepAliveTimeout = defaultTTL - } - if c != nil { - l.callOpts = c.callOpts - } - reqLeaderCtx := WithRequireLeader(context.Background()) - l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) - return l -} - -func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil -} - -func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { - resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) - if err == nil { - leases := make([]LeaseStatus, len(resp.Leases)) - for i := range resp.Leases { - leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} - } - return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil - } - return nil, toErr(ctx, err) -} - -func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { - ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) - - l.mu.Lock() - // ensure that recvKeepAliveLoop is still running - select { - case <-l.donec: - err := l.loopErr - l.mu.Unlock() - close(ch) - return ch, ErrKeepAliveHalted{Reason: err} - default: - } - ka, ok := l.keepAlives[id] - if !ok { - // create fresh keep alive - ka = &keepAlive{ - chs: []chan<- *LeaseKeepAliveResponse{ch}, - ctxs: []context.Context{ctx}, - deadline: time.Now().Add(l.firstKeepAliveTimeout), - nextKeepAlive: time.Now(), - donec: make(chan struct{}), - } - l.keepAlives[id] = ka - } else { - // add channel and context to existing keep alive - ka.ctxs = append(ka.ctxs, ctx) - ka.chs = append(ka.chs, ch) - } - l.mu.Unlock() - - go l.keepAliveCtxCloser(ctx, id, ka.donec) - l.firstKeepAliveOnce.Do(func() { - go l.recvKeepAliveLoop() - go l.deadlineLoop() - }) - - return ch, nil -} - -func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - for { - resp, err := l.keepAliveOnce(ctx, id) - if err == nil { - if resp.TTL <= 0 { - err = rpctypes.ErrLeaseNotFound - } - return resp, err - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) Close() error { - l.stopCancel() - // close for synchronous teardown if stream goroutines never launched - l.firstKeepAliveOnce.Do(func() { close(l.donec) }) - <-l.donec - return nil -} - -func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { - select { - case <-donec: - return - case <-l.donec: - return - case <-ctx.Done(): - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[id] - if !ok { - return - } - - // close channel and remove context if still associated with keep alive - for i, c := range ka.ctxs { - if c == ctx { - close(ka.chs[i]) - ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) - ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) - break - } - } - // remove if no one more listeners - if len(ka.chs) == 0 { - delete(l.keepAlives, id) - } -} - -// closeRequireLeader scans keepAlives for ctxs that have require leader -// and closes the associated channels. -func (l *lessor) closeRequireLeader() { - l.mu.Lock() - defer l.mu.Unlock() - for _, ka := range l.keepAlives { - reqIdxs := 0 - // find all required leader channels, close, mark as nil - for i, ctx := range ka.ctxs { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - continue - } - ks := md[rpctypes.MetadataRequireLeaderKey] - if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { - continue - } - close(ka.chs[i]) - ka.chs[i] = nil - reqIdxs++ - } - if reqIdxs == 0 { - continue - } - // remove all channels that required a leader from keepalive - newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) - newCtxs := make([]context.Context, len(newChs)) - newIdx := 0 - for i := range ka.chs { - if ka.chs[i] == nil { - continue - } - newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] - newIdx++ - } - ka.chs, ka.ctxs = newChs, newCtxs - } -} - -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - - defer func() { - if err := stream.CloseSend(); err != nil { - if ferr == nil { - ferr = toErr(ctx, err) - } - return - } - }() - - err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) - if err != nil { - return nil, toErr(ctx, err) - } - - resp, rerr := stream.Recv() - if rerr != nil { - return nil, toErr(ctx, rerr) - } - - karesp = &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - return karesp, nil -} - -func (l *lessor) recvKeepAliveLoop() (gerr error) { - defer func() { - l.mu.Lock() - close(l.donec) - l.loopErr = gerr - for _, ka := range l.keepAlives { - ka.close() - } - l.keepAlives = make(map[LeaseID]*keepAlive) - l.mu.Unlock() - }() - - for { - stream, err := l.resetRecv() - if err != nil { - l.lg.Warn("error occurred during lease keep alive loop", - zap.Error(err), - ) - if canceledByCaller(l.stopCtx, err) { - return err - } - } else { - for { - resp, err := stream.Recv() - if err != nil { - if canceledByCaller(l.stopCtx, err) { - return err - } - - if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { - l.closeRequireLeader() - } - break - } - - l.recvKeepAlive(resp) - } - } - - select { - case <-time.After(retryConnWait): - case <-l.stopCtx.Done(): - return l.stopCtx.Err() - } - } -} - -// resetRecv opens a new lease stream and starts sending keep alive requests. -func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { - sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) - if err != nil { - cancel() - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stream != nil && l.streamCancel != nil { - l.streamCancel() - } - - l.streamCancel = cancel - l.stream = stream - - go l.sendKeepAliveLoop(stream) - return stream, nil -} - -// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse -func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[karesp.ID] - if !ok { - return - } - - if karesp.TTL <= 0 { - // lease expired; close all keep alive channels - delete(l.keepAlives, karesp.ID) - ka.close() - return - } - - // send update to all channels - nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) - ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) - for _, ch := range ka.chs { - select { - case ch <- karesp: - default: - if l.lg != nil { - l.lg.Warn("lease keepalive response queue is full; dropping response send", - zap.Int("queue-size", len(ch)), - zap.Int("queue-capacity", cap(ch)), - ) - } - } - // still advance in order to rate-limit keep-alive sends - ka.nextKeepAlive = nextKeepAlive - } -} - -// deadlineLoop reaps any keep alive channels that have not received a response -// within the lease TTL -func (l *lessor) deadlineLoop() { - for { - select { - case <-time.After(time.Second): - case <-l.donec: - return - } - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.deadline.Before(now) { - // waited too long for response; lease may be expired - ka.close() - delete(l.keepAlives, id) - } - } - l.mu.Unlock() - } -} - -// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. -func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { - for { - var tosend []LeaseID - - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.nextKeepAlive.Before(now) { - tosend = append(tosend, id) - } - } - l.mu.Unlock() - - for _, id := range tosend { - r := &pb.LeaseKeepAliveRequest{ID: int64(id)} - if err := stream.Send(r); err != nil { - l.lg.Warn("error occurred during lease keep alive request sending", - zap.Error(err), - ) - return - } - } - - select { - case <-time.After(retryConnWait): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - } -} - -func (ka *keepAlive) close() { - close(ka.donec) - for _, ch := range ka.chs { - close(ch) - } -} diff --git a/client/v3/leasing/doc.go b/client/v3/leasing/doc.go deleted file mode 100644 index c38af3562b7..00000000000 --- a/client/v3/leasing/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package leasing serves linearizable reads from a local cache by acquiring -// exclusive write access to keys through a client-side leasing protocol. This -// leasing layer can either directly wrap the etcd client or it can be exposed -// through the etcd grpc proxy server, granting multiple clients write access. -// -// First, create a leasing KV from a clientv3.Client 'cli': -// -// lkv, err := leasing.NewKV(cli, "leasing-prefix") -// if err != nil { -// // handle error -// } -// -// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's -// key locally. On the server, the leasing key is stored to "leasing-prefix/abc": -// -// resp, err := lkv.Get(context.TODO(), "abc") -// -// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime: -// -// resp, err = lkv.Get(context.TODO(), "abc") -// -// If another leasing client writes to a leased key, then the owner relinquishes its exclusive -// access, permitting the writer to modify the key: -// -// lkv2, err := leasing.NewKV(cli, "leasing-prefix") -// if err != nil { -// // handle error -// } -// lkv2.Put(context.TODO(), "abc", "456") -// resp, err = lkv.Get("abc") -package leasing diff --git a/client/v3/leasing/kv.go b/client/v3/leasing/kv.go deleted file mode 100644 index f0cded20fea..00000000000 --- a/client/v3/leasing/kv.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasing - -import ( - "context" - "strings" - "sync" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type leasingKV struct { - cl *v3.Client - kv v3.KV - pfx string - leases leaseCache - - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup - - sessionOpts []concurrency.SessionOption - session *concurrency.Session - sessionc chan struct{} -} - -var closedCh chan struct{} - -func init() { - closedCh = make(chan struct{}) - close(closedCh) -} - -// NewKV wraps a KV instance so that all requests are wired through a leasing protocol. -func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) { - cctx, cancel := context.WithCancel(cl.Ctx()) - lkv := &leasingKV{ - cl: cl, - kv: cl.KV, - pfx: pfx, - leases: leaseCache{revokes: make(map[string]time.Time)}, - ctx: cctx, - cancel: cancel, - sessionOpts: opts, - sessionc: make(chan struct{}), - } - lkv.wg.Add(2) - go func() { - defer lkv.wg.Done() - lkv.monitorSession() - }() - go func() { - defer lkv.wg.Done() - lkv.leases.clearOldRevokes(cctx) - }() - return lkv, lkv.Close, lkv.waitSession(cctx) -} - -func (lkv *leasingKV) Close() { - lkv.cancel() - lkv.wg.Wait() -} - -func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) { - return lkv.get(ctx, v3.OpGet(key, opts...)) -} - -func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) { - return lkv.put(ctx, v3.OpPut(key, val, opts...)) -} - -func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) { - return lkv.delete(ctx, v3.OpDelete(key, opts...)) -} - -func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) { - switch { - case op.IsGet(): - resp, err := lkv.get(ctx, op) - return resp.OpResponse(), err - case op.IsPut(): - resp, err := lkv.put(ctx, op) - return resp.OpResponse(), err - case op.IsDelete(): - resp, err := lkv.delete(ctx, op) - return resp.OpResponse(), err - case op.IsTxn(): - cmps, thenOps, elseOps := op.Txn() - resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit() - return resp.OpResponse(), err - } - return v3.OpResponse{}, nil -} - -func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) { - return lkv.kv.Compact(ctx, rev, opts...) -} - -func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn { - return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx} -} - -func (lkv *leasingKV) monitorSession() { - for lkv.ctx.Err() == nil { - if lkv.session != nil { - select { - case <-lkv.session.Done(): - case <-lkv.ctx.Done(): - return - } - } - lkv.leases.mu.Lock() - select { - case <-lkv.sessionc: - lkv.sessionc = make(chan struct{}) - default: - } - lkv.leases.entries = make(map[string]*leaseKey) - lkv.leases.mu.Unlock() - - s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...) - if err != nil { - continue - } - - lkv.leases.mu.Lock() - lkv.session = s - close(lkv.sessionc) - lkv.leases.mu.Unlock() - } -} - -func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) { - cctx, cancel := context.WithCancel(lkv.ctx) - defer cancel() - for cctx.Err() == nil { - if rev == 0 { - resp, err := lkv.kv.Get(ctx, lkv.pfx+key) - if err != nil { - continue - } - rev = resp.Header.Revision - if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" { - lkv.rescind(cctx, key, rev) - return - } - } - wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1)) - for resp := range wch { - for _, ev := range resp.Events { - if string(ev.Kv.Value) != "REVOKE" { - continue - } - if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() { - lkv.rescind(cctx, key, ev.Kv.ModRevision) - } - return - } - } - rev = 0 - } -} - -// rescind releases a lease from this client. -func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) { - if lkv.leases.Evict(key) > rev { - return - } - cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev) - op := v3.OpDelete(lkv.pfx + key) - for ctx.Err() == nil { - if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil { - return - } - } -} - -func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1)) - for resp := range wch { - for _, ev := range resp.Events { - if ev.Type == v3.EventTypeDelete { - return ctx.Err() - } - } - } - return ctx.Err() -} - -func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) { - key := string(op.KeyBytes()) - wc, rev := lkv.leases.Lock(key) - cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1) - resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit() - switch { - case err != nil: - lkv.leases.Evict(key) - fallthrough - case !resp.Succeeded: - if wc != nil { - close(wc) - } - return nil, nil, err - } - return resp, wc, nil -} - -func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) { - if err := lkv.waitSession(ctx); err != nil { - return nil, err - } - for ctx.Err() == nil { - resp, wc, err := lkv.tryModifyOp(ctx, op) - if err != nil || wc == nil { - resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op) - } - if err != nil { - return nil, err - } - if resp.Succeeded { - lkv.leases.mu.Lock() - lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header) - lkv.leases.mu.Unlock() - pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut()) - pr.Header = resp.Header - } - if wc != nil { - close(wc) - } - if resp.Succeeded { - return pr, nil - } - } - return nil, ctx.Err() -} - -func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) { - for ctx.Err() == nil { - if err := lkv.waitSession(ctx); err != nil { - return nil, err - } - lcmp := v3.Cmp{Key: []byte(key), Target: pb.Compare_LEASE} - resp, err := lkv.kv.Txn(ctx).If( - v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0), - v3.Compare(lcmp, "=", 0)). - Then( - op, - v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))). - Else( - op, - v3.OpGet(lkv.pfx+key), - ).Commit() - if err == nil { - if !resp.Succeeded { - kvs := resp.Responses[1].GetResponseRange().Kvs - // if txn failed since already owner, lease is acquired - resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID() - } - return resp, nil - } - // retry if transient error - if _, ok := err.(rpctypes.EtcdError); ok { - return nil, err - } - if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { - return nil, err - } - } - return nil, ctx.Err() -} - -func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) { - do := func() (*v3.GetResponse, error) { - r, err := lkv.kv.Do(ctx, op) - return r.Get(), err - } - if !lkv.readySession() { - return do() - } - - if resp, ok := lkv.leases.Get(ctx, op); resp != nil { - return resp, nil - } else if !ok || op.IsSerializable() { - // must be handled by server or can skip linearization - return do() - } - - key := string(op.KeyBytes()) - if !lkv.leases.MayAcquire(key) { - resp, err := lkv.kv.Do(ctx, op) - return resp.Get(), err - } - - resp, err := lkv.acquire(ctx, key, v3.OpGet(key)) - if err != nil { - return nil, err - } - getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange()) - getResp.Header = resp.Header - if resp.Succeeded { - getResp = lkv.leases.Add(key, getResp, op) - lkv.wg.Add(1) - go func() { - defer lkv.wg.Done() - lkv.monitorLease(ctx, key, resp.Header.Revision) - }() - } - return getResp, nil -} - -func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) { - lkey, lend := lkv.pfx+key, lkv.pfx+end - resp, err := lkv.kv.Txn(ctx).If( - v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1), - ).Then( - v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()), - v3.OpDelete(key, v3.WithRange(end)), - ).Commit() - if err != nil { - lkv.leases.EvictRange(key, end) - return nil, err - } - if !resp.Succeeded { - return nil, nil - } - for _, kv := range resp.Responses[0].GetResponseRange().Kvs { - lkv.leases.Delete(string(kv.Key), resp.Header) - } - delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange()) - delResp.Header = resp.Header - return delResp, nil -} - -func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) { - key, end := string(op.KeyBytes()), string(op.RangeBytes()) - for ctx.Err() == nil { - maxLeaseRev, err := lkv.revokeRange(ctx, key, end) - if err != nil { - return nil, err - } - wcs := lkv.leases.LockRange(key, end) - delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end) - closeAll(wcs) - if err != nil || delResp != nil { - return delResp, err - } - } - return nil, ctx.Err() -} - -func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) { - if err := lkv.waitSession(ctx); err != nil { - return nil, err - } - if len(op.RangeBytes()) > 0 { - return lkv.deleteRange(ctx, op) - } - key := string(op.KeyBytes()) - for ctx.Err() == nil { - resp, wc, err := lkv.tryModifyOp(ctx, op) - if err != nil || wc == nil { - resp, err = lkv.revoke(ctx, key, op) - } - if err != nil { - // don't know if delete was processed - lkv.leases.Evict(key) - return nil, err - } - if resp.Succeeded { - dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange()) - dr.Header = resp.Header - lkv.leases.Delete(key, dr.Header) - } - if wc != nil { - close(wc) - } - if resp.Succeeded { - return dr, nil - } - } - return nil, ctx.Err() -} - -func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) { - rev := lkv.leases.Rev(key) - txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op) - resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit() - if err != nil || resp.Succeeded { - return resp, err - } - return resp, lkv.waitRescind(ctx, key, resp.Header.Revision) -} - -func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) { - lkey, lend := lkv.pfx+begin, "" - if len(end) > 0 { - lend = lkv.pfx + end - } - leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend)) - if err != nil { - return 0, err - } - return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs) -} - -func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) { - maxLeaseRev := int64(0) - for _, kv := range kvs { - if rev := kv.CreateRevision; rev > maxLeaseRev { - maxLeaseRev = rev - } - if v3.LeaseID(kv.Lease) == lkv.leaseID() { - // don't revoke own keys - continue - } - key := strings.TrimPrefix(string(kv.Key), lkv.pfx) - if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil { - return 0, err - } - } - return maxLeaseRev, nil -} - -func (lkv *leasingKV) waitSession(ctx context.Context) error { - lkv.leases.mu.RLock() - sessionc := lkv.sessionc - lkv.leases.mu.RUnlock() - select { - case <-sessionc: - return nil - case <-lkv.ctx.Done(): - return lkv.ctx.Err() - case <-ctx.Done(): - return ctx.Err() - } -} - -func (lkv *leasingKV) readySession() bool { - lkv.leases.mu.RLock() - defer lkv.leases.mu.RUnlock() - if lkv.session == nil { - return false - } - select { - case <-lkv.session.Done(): - default: - return true - } - return false -} - -func (lkv *leasingKV) leaseID() v3.LeaseID { - lkv.leases.mu.RLock() - defer lkv.leases.mu.RUnlock() - return lkv.session.Lease() -} diff --git a/client/v3/leasing/txn.go b/client/v3/leasing/txn.go deleted file mode 100644 index 30c6aa2e4d7..00000000000 --- a/client/v3/leasing/txn.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasing - -import ( - "context" - "strings" - - v3pb "go.etcd.io/etcd/api/v3/etcdserverpb" - v3 "go.etcd.io/etcd/client/v3" -) - -type txnLeasing struct { - v3.Txn - lkv *leasingKV - ctx context.Context - cs []v3.Cmp - opst []v3.Op - opse []v3.Op -} - -func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn { - txn.cs = append(txn.cs, cs...) - txn.Txn = txn.Txn.If(cs...) - return txn -} - -func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn { - txn.opst = append(txn.opst, ops...) - txn.Txn = txn.Txn.Then(ops...) - return txn -} - -func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn { - txn.opse = append(txn.opse, ops...) - txn.Txn = txn.Txn.Else(ops...) - return txn -} - -func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) { - if resp, err := txn.eval(); resp != nil || err != nil { - return resp, err - } - return txn.serverTxn() -} - -func (txn *txnLeasing) eval() (*v3.TxnResponse, error) { - // TODO: wait on keys in comparisons - thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse) - ops := make([]v3.Op, 0, len(thenOps)+len(elseOps)) - ops = append(ops, thenOps...) - ops = append(ops, elseOps...) - - for _, ch := range txn.lkv.leases.NotifyOps(ops) { - select { - case <-ch: - case <-txn.ctx.Done(): - return nil, txn.ctx.Err() - } - } - - txn.lkv.leases.mu.RLock() - defer txn.lkv.leases.mu.RUnlock() - succeeded, ok := txn.lkv.leases.evalCmp(txn.cs) - if !ok || txn.lkv.leases.header == nil { - return nil, nil - } - if ops = txn.opst; !succeeded { - ops = txn.opse - } - - resps, ok := txn.lkv.leases.evalOps(ops) - if !ok { - return nil, nil - } - return &v3.TxnResponse{Header: copyHeader(txn.lkv.leases.header), Succeeded: succeeded, Responses: resps}, nil -} - -// fallback computes the ops to fetch all possible conflicting -// leasing keys for a list of ops. -func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) { - for _, op := range ops { - if op.IsGet() { - continue - } - lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), "" - if len(op.RangeBytes()) > 0 { - lend = txn.lkv.pfx + string(op.RangeBytes()) - } - fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend))) - } - return fbOps -} - -func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) { - seen := make(map[string]bool) - for _, op := range ops { - key := string(op.KeyBytes()) - if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] { - continue - } - rev := txn.lkv.leases.Rev(key) - cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1)) - seen[key] = true - } - return cmps -} - -func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) { - for _, op := range ops { - if op.IsGet() || len(op.RangeBytes()) == 0 { - continue - } - - key, end := string(op.KeyBytes()), string(op.RangeBytes()) - maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end) - if err != nil { - return nil, err - } - - opts := append(v3.WithLastRev(), v3.WithRange(end)) - getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...) - if err != nil { - return nil, err - } - maxModRev := int64(0) - if len(getResp.Kvs) > 0 { - maxModRev = getResp.Kvs[0].ModRevision - } - - noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1) - noLeaseUpdate := v3.Compare( - v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end), - "<", - maxRevLK+1) - cmps = append(cmps, noKeyUpdate, noLeaseUpdate) - } - return cmps, nil -} - -func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) { - cmps := txn.guardKeys(ops) - rangeCmps, err := txn.guardRanges(ops) - return append(cmps, rangeCmps...), err -} - -func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) { - ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn}) - txn.lkv.leases.mu.Lock() - for _, op := range ops { - key := string(op.KeyBytes()) - if op.IsDelete() && len(op.RangeBytes()) > 0 { - end := string(op.RangeBytes()) - for k := range txn.lkv.leases.entries { - if inRange(k, key, end) { - txn.lkv.leases.delete(k, txnResp.Header) - } - } - } else if op.IsDelete() { - txn.lkv.leases.delete(key, txnResp.Header) - } - if op.IsPut() { - txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header) - } - } - txn.lkv.leases.mu.Unlock() -} - -func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error { - for _, resp := range fbResps { - _, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs) - if err != nil { - return err - } - } - return nil -} - -func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) { - if err := txn.lkv.waitSession(txn.ctx); err != nil { - return nil, err - } - - userOps := gatherOps(append(txn.opst, txn.opse...)) - userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse) - fbOps := txn.fallback(userOps) - - defer closeAll(txn.lkv.leases.LockWriteOps(userOps)) - for { - cmps, err := txn.guard(userOps) - if err != nil { - return nil, err - } - resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit() - if err != nil { - for _, cmp := range cmps { - txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx)) - } - return nil, err - } - if resp.Succeeded { - txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn) - userResp := resp.Responses[0].GetResponseTxn() - userResp.Header = resp.Header - return (*v3.TxnResponse)(userResp), nil - } - if err := txn.revokeFallback(resp.Responses); err != nil { - return nil, err - } - } -} diff --git a/client/v3/leasing/util.go b/client/v3/leasing/util.go deleted file mode 100644 index b6a520f03f0..00000000000 --- a/client/v3/leasing/util.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasing - -import ( - "bytes" - - v3pb "go.etcd.io/etcd/api/v3/etcdserverpb" - v3 "go.etcd.io/etcd/client/v3" -) - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool { - var result int - if len(resp.Kvs) != 0 { - kv := resp.Kvs[0] - switch tcmp.Target { - case v3pb.Compare_VALUE: - if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Value); tv != nil { - result = bytes.Compare(kv.Value, tv.Value) - } - case v3pb.Compare_CREATE: - if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_CreateRevision); tv != nil { - result = compareInt64(kv.CreateRevision, tv.CreateRevision) - } - case v3pb.Compare_MOD: - if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_ModRevision); tv != nil { - result = compareInt64(kv.ModRevision, tv.ModRevision) - } - case v3pb.Compare_VERSION: - if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Version); tv != nil { - result = compareInt64(kv.Version, tv.Version) - } - } - } - switch tcmp.Result { - case v3pb.Compare_EQUAL: - return result == 0 - case v3pb.Compare_NOT_EQUAL: - return result != 0 - case v3pb.Compare_GREATER: - return result > 0 - case v3pb.Compare_LESS: - return result < 0 - } - return true -} - -func gatherOps(ops []v3.Op) (ret []v3.Op) { - for _, op := range ops { - if !op.IsTxn() { - ret = append(ret, op) - continue - } - _, thenOps, elseOps := op.Txn() - ret = append(ret, gatherOps(append(thenOps, elseOps...))...) - } - return ret -} - -func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) { - for i, op := range ops { - if !op.IsTxn() { - ret = append(ret, op) - continue - } - _, thenOps, elseOps := op.Txn() - if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded { - ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...) - } else { - ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...) - } - } - return ret -} - -func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader { - h := *hdr - return &h -} - -func closeAll(chs []chan<- struct{}) { - for _, ch := range chs { - close(ch) - } -} diff --git a/client/v3/logger.go b/client/v3/logger.go deleted file mode 100644 index 300363cd25b..00000000000 --- a/client/v3/logger.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "log" - "os" - - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zapgrpc" - "google.golang.org/grpc/grpclog" - - "go.etcd.io/etcd/client/pkg/v3/logutil" -) - -func init() { - // We override grpc logger only when the environment variable is set - // in order to not interfere by default with user's code or other libraries. - if os.Getenv("ETCD_CLIENT_DEBUG") != "" { - lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) - if err != nil { - panic(err) - } - lg = lg.Named("etcd-client") - grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) - } -} - -// SetLogger sets grpc logger. -// -// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2. -func SetLogger(l grpclog.LoggerV2) { - grpclog.SetLoggerV2(l) -} - -// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level. -func etcdClientDebugLevel() zapcore.Level { - envLevel := os.Getenv("ETCD_CLIENT_DEBUG") - if envLevel == "" || envLevel == "true" { - return zapcore.InfoLevel - } - var l zapcore.Level - if err := l.Set(envLevel); err != nil { - log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'") - return zapcore.InfoLevel - } - return l -} diff --git a/client/v3/main_test.go b/client/v3/main_test.go deleted file mode 100644 index 4007d77bc5a..00000000000 --- a/client/v3/main_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -const ( - dialTimeout = 5 * time.Second - requestTimeout = 10 * time.Second -) - -func exampleEndpoints() []string { return nil } - -func forUnitTestsRunInMockedContext(mocking func(), example func()) { - mocking() - // TODO: Call 'example' when mocking() provides realistic mocking of transport. - - // The real testing logic of examples gets executed - // as part of ./tests/integration/clientv3/integration/... -} - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/client/v3/maintenance.go b/client/v3/maintenance.go deleted file mode 100644 index 082b77f1a5a..00000000000 --- a/client/v3/maintenance.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "fmt" - "io" - - "go.uber.org/zap" - "google.golang.org/grpc" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -type ( - DefragmentResponse pb.DefragmentResponse - AlarmResponse pb.AlarmResponse - AlarmMember pb.AlarmMember - StatusResponse pb.StatusResponse - HashKVResponse pb.HashKVResponse - MoveLeaderResponse pb.MoveLeaderResponse - DowngradeResponse pb.DowngradeResponse - - DowngradeAction pb.DowngradeRequest_DowngradeAction -) - -const ( - DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE) - DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE) - DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL) -) - -type Maintenance interface { - // AlarmList gets all active alarms. - AlarmList(ctx context.Context) (*AlarmResponse, error) - - // AlarmDisarm disarms a given alarm. - AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - - // Defragment releases wasted space from internal fragmentation on a given etcd member. - // Defragment is only needed when deleting a large number of keys and want to reclaim - // the resources. - // Defragment is an expensive operation. User should avoid defragmenting multiple members - // at the same time. - // To defragment multiple members in the cluster, user need to call defragment multiple - // times with different endpoints. - Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) - - // Status gets the status of the endpoint. - Status(ctx context.Context, endpoint string) (*StatusResponse, error) - - // HashKV returns a hash of the KV state at the time of the RPC. - // If revision is zero, the hash is computed on all keys. If the revision - // is non-zero, the hash is computed on all keys at or below the given revision. - HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) - - // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it. - // If the context "ctx" is canceled or timed out, reading from returned - // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). - SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) - - // Snapshot provides a reader for a point-in-time snapshot of etcd. - // If the context "ctx" is canceled or timed out, reading from returned - // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). - // Deprecated: use SnapshotWithVersion instead. - Snapshot(ctx context.Context) (io.ReadCloser, error) - - // MoveLeader requests current leader to transfer its leadership to the transferee. - // Request must be made to the leader. - MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) - - // Downgrade requests downgrades, verifies feasibility or cancels downgrade - // on the cluster version. - // Supported since etcd 3.5. - Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) -} - -// SnapshotResponse is aggregated response from the snapshot stream. -// Consumer is responsible for closing steam by calling .Snapshot.Close() -type SnapshotResponse struct { - // Header is the first header in the snapshot stream, has the current key-value store information - // and indicates the point in time of the snapshot. - Header *pb.ResponseHeader - // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream. - Snapshot io.ReadCloser - // Version is the local version of server that created the snapshot. - // In cluster with binaries with different version, each cluster can return different result. - // Informs which etcd server version should be used when restoring the snapshot. - // Supported on etcd >= v3.6. - Version string -} - -type maintenance struct { - lg *zap.Logger - dial func(endpoint string) (pb.MaintenanceClient, func(), error) - remote pb.MaintenanceClient - callOpts []grpc.CallOption -} - -func NewMaintenance(c *Client) Maintenance { - api := &maintenance{ - lg: c.lg, - dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { - conn, err := c.Dial(endpoint) - if err != nil { - return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) - } - - cancel := func() { conn.Close() } - return RetryMaintenanceClient(c, conn), cancel, nil - }, - remote: RetryMaintenanceClient(c, c.conn), - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { - api := &maintenance{ - lg: c.lg, - dial: func(string) (pb.MaintenanceClient, func(), error) { - return remote, func() {}, nil - }, - remote: remote, - } - if c != nil { - api.callOpts = c.callOpts - } - return api -} - -func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_GET, - MemberID: 0, // all - Alarm: pb.AlarmType_NONE, // all - } - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_DEACTIVATE, - MemberID: am.MemberID, - Alarm: am.Alarm, - } - - if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { - ar, err := m.AlarmList(ctx) - if err != nil { - return nil, toErr(ctx, err) - } - ret := AlarmResponse{} - for _, am := range ar.Alarms { - dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) - if derr != nil { - return nil, toErr(ctx, derr) - } - ret.Alarms = append(ret.Alarms, dresp.Alarms...) - } - return &ret, nil - } - - resp, err := m.remote.Alarm(ctx, req, m.callOpts...) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*DefragmentResponse)(resp), nil -} - -func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*StatusResponse)(resp), nil -} - -func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { - remote, cancel, err := m.dial(endpoint) - if err != nil { - - return nil, toErr(ctx, err) - } - defer cancel() - resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) - if err != nil { - return nil, toErr(ctx, err) - } - return (*HashKVResponse)(resp), nil -} - -func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) - if err != nil { - return nil, toErr(ctx, err) - } - - m.lg.Info("opened snapshot stream; downloading") - pr, pw := io.Pipe() - - resp, err := ss.Recv() - if err != nil { - m.logAndCloseWithError(err, pw) - return nil, err - } - go func() { - // Saving response is blocking - err = m.save(resp, pw) - if err != nil { - m.logAndCloseWithError(err, pw) - return - } - for { - resp, err := ss.Recv() - if err != nil { - m.logAndCloseWithError(err, pw) - return - } - err = m.save(resp, pw) - if err != nil { - m.logAndCloseWithError(err, pw) - return - } - } - }() - - return &SnapshotResponse{ - Header: resp.GetHeader(), - Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, - Version: resp.GetVersion(), - }, err -} - -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) - if err != nil { - return nil, toErr(ctx, err) - } - - m.lg.Info("opened snapshot stream; downloading") - pr, pw := io.Pipe() - - go func() { - for { - resp, err := ss.Recv() - if err != nil { - m.logAndCloseWithError(err, pw) - return - } - err = m.save(resp, pw) - if err != nil { - m.logAndCloseWithError(err, pw) - return - } - } - }() - return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, err -} - -func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) { - switch err { - case io.EOF: - m.lg.Info("completed snapshot read; closing") - default: - m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) - } - pw.CloseWithError(err) -} - -func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error { - // can "resp == nil && err == nil" - // before we receive snapshot SHA digest? - // No, server sends EOF with an empty response - // after it sends SHA digest at the end - - if _, werr := pw.Write(resp.Blob); werr != nil { - return werr - } - return nil -} - -type snapshotReadCloser struct { - ctx context.Context - io.ReadCloser -} - -func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(p) - return n, toErr(rc.ctx, err) -} - -func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) - return (*MoveLeaderResponse)(resp), toErr(ctx, err) -} - -func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) { - var actionType pb.DowngradeRequest_DowngradeAction - switch action { - case DowngradeValidate: - actionType = pb.DowngradeRequest_VALIDATE - case DowngradeEnable: - actionType = pb.DowngradeRequest_ENABLE - case DowngradeCancel: - actionType = pb.DowngradeRequest_CANCEL - default: - return nil, errors.New("etcdclient: unknown downgrade action") - } - resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...) - return (*DowngradeResponse)(resp), toErr(ctx, err) -} diff --git a/client/v3/mirror/syncer.go b/client/v3/mirror/syncer.go deleted file mode 100644 index 3e83c989a87..00000000000 --- a/client/v3/mirror/syncer.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mirror implements etcd mirroring operations. -package mirror - -import ( - "context" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -const ( - batchLimit = 1000 -) - -// Syncer syncs with the key-value state of an etcd cluster. -type Syncer interface { - // SyncBase syncs the base state of the key-value state. - // The key-value state are sent through the returned chan. - SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) - // SyncUpdates syncs the updates of the key-value state. - // The update events are sent through the returned chan. - SyncUpdates(ctx context.Context) clientv3.WatchChan -} - -// NewSyncer creates a Syncer. -func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer { - return &syncer{c: c, prefix: prefix, rev: rev} -} - -type syncer struct { - c *clientv3.Client - rev int64 - prefix string -} - -func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) { - respchan := make(chan clientv3.GetResponse, 1024) - errchan := make(chan error, 1) - - // if rev is not specified, we will choose the most recent revision. - if s.rev == 0 { - // If len(s.prefix) == 0, we will check a random key to fetch the most recent - // revision (foo), otherwise we use the provided prefix. - checkPath := "foo" - if len(s.prefix) != 0 { - checkPath = s.prefix - } - resp, err := s.c.Get(ctx, checkPath) - if err != nil { - errchan <- err - close(respchan) - close(errchan) - return respchan, errchan - } - s.rev = resp.Header.Revision - } - - go func() { - defer close(respchan) - defer close(errchan) - - var key string - - opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev), - clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)} - - if len(s.prefix) == 0 { - // If len(s.prefix) == 0, we will sync the entire key-value space. - // We then range from the smallest key (0x00) to the end. - opts = append(opts, clientv3.WithFromKey()) - key = "\x00" - } else { - // If len(s.prefix) != 0, we will sync key-value space with given prefix. - // We then range from the prefix to the next prefix if exists. Or we will - // range from the prefix to the end if the next prefix does not exists. - opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix))) - key = s.prefix - } - - for { - resp, err := s.c.Get(ctx, key, opts...) - if err != nil { - errchan <- err - return - } - - respchan <- *resp - - if !resp.More { - return - } - // move to next key - key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0)) - } - }() - - return respchan, errchan -} - -func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan { - if s.rev == 0 { - panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?") - } - return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1)) -} diff --git a/client/v3/mock/mockserver/doc.go b/client/v3/mock/mockserver/doc.go deleted file mode 100644 index 030b3b2ffb7..00000000000 --- a/client/v3/mock/mockserver/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mockserver provides mock implementations for etcdserver's server interface. -package mockserver diff --git a/client/v3/namespace/doc.go b/client/v3/namespace/doc.go deleted file mode 100644 index 689e0e0bb38..00000000000 --- a/client/v3/namespace/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package namespace is a clientv3 wrapper that translates all keys to begin -// with a given prefix. -// -// First, create a client: -// -// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) -// if err != nil { -// // handle error! -// } -// -// Next, override the client interfaces: -// -// unprefixedKV := cli.KV -// cli.KV = namespace.NewKV(cli.KV, "my-prefix/") -// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/") -// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/") -// -// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/": -// -// cli.Put(context.TODO(), "abc", "123") -// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc") -// fmt.Printf("%s\n", resp.Kvs[0].Value) -// // Output: 123 -// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456") -// resp, _ = cli.Get(context.TODO(), "abc") -// fmt.Printf("%s\n", resp.Kvs[0].Value) -// // Output: 456 -package namespace diff --git a/client/v3/namespace/kv.go b/client/v3/namespace/kv.go deleted file mode 100644 index aa338d5356d..00000000000 --- a/client/v3/namespace/kv.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type kvPrefix struct { - clientv3.KV - pfx string -} - -// NewKV wraps a KV instance so that all requests -// are prefixed with a given string. -func NewKV(kv clientv3.KV, prefix string) clientv3.KV { - return &kvPrefix{kv, prefix} -} - -func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { - if len(key) == 0 { - return nil, rpctypes.ErrEmptyKey - } - op := kv.prefixOp(clientv3.OpPut(key, val, opts...)) - r, err := kv.KV.Do(ctx, op) - if err != nil { - return nil, err - } - put := r.Put() - kv.unprefixPutResponse(put) - return put, nil -} - -func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) { - return nil, rpctypes.ErrEmptyKey - } - getOp := clientv3.OpGet(key, opts...) - if !getOp.IsSortOptionValid() { - return nil, rpctypes.ErrInvalidSortOption - } - r, err := kv.KV.Do(ctx, kv.prefixOp(getOp)) - if err != nil { - return nil, err - } - get := r.Get() - kv.unprefixGetResponse(get) - return get, nil -} - -func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { - if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) { - return nil, rpctypes.ErrEmptyKey - } - r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...))) - if err != nil { - return nil, err - } - del := r.Del() - kv.unprefixDeleteResponse(del) - return del, nil -} - -func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { - if len(op.KeyBytes()) == 0 && !op.IsTxn() { - return clientv3.OpResponse{}, rpctypes.ErrEmptyKey - } - r, err := kv.KV.Do(ctx, kv.prefixOp(op)) - if err != nil { - return r, err - } - switch { - case r.Get() != nil: - kv.unprefixGetResponse(r.Get()) - case r.Put() != nil: - kv.unprefixPutResponse(r.Put()) - case r.Del() != nil: - kv.unprefixDeleteResponse(r.Del()) - case r.Txn() != nil: - kv.unprefixTxnResponse(r.Txn()) - } - return r, nil -} - -type txnPrefix struct { - clientv3.Txn - kv *kvPrefix -} - -func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { - return &txnPrefix{kv.KV.Txn(ctx), kv} -} - -func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { - txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...) - return txn -} - -func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { - txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...) - return txn -} - -func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { - txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...) - return txn -} - -func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { - resp, err := txn.Txn.Commit() - if err != nil { - return nil, err - } - txn.kv.unprefixTxnResponse(resp) - return resp, nil -} - -func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { - if !op.IsTxn() { - begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) - op.WithKeyBytes(begin) - op.WithRangeBytes(end) - return op - } - cmps, thenOps, elseOps := op.Txn() - return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps)) -} - -func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { - for i := range resp.Kvs { - resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) { - if resp.PrevKv != nil { - resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) { - for i := range resp.PrevKvs { - resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):] - } -} - -func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { - for _, r := range resp.Responses { - switch tv := r.Response.(type) { - case *pb.ResponseOp_ResponseRange: - if tv.ResponseRange != nil { - kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange)) - } - case *pb.ResponseOp_ResponsePut: - if tv.ResponsePut != nil { - kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut)) - } - case *pb.ResponseOp_ResponseDeleteRange: - if tv.ResponseDeleteRange != nil { - kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) - } - case *pb.ResponseOp_ResponseTxn: - if tv.ResponseTxn != nil { - kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn)) - } - default: - } - } -} - -func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { - return prefixInterval(kv.pfx, key, end) -} - -func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp { - newCmps := make([]clientv3.Cmp, len(cs)) - for i := range cs { - newCmps[i] = cs[i] - pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd) - newCmps[i].WithKeyBytes(pfxKey) - if len(cs[i].RangeEnd) != 0 { - newCmps[i].RangeEnd = endKey - } - } - return newCmps -} - -func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op { - newOps := make([]clientv3.Op, len(ops)) - for i := range ops { - newOps[i] = kv.prefixOp(ops[i]) - } - return newOps -} diff --git a/client/v3/namespace/lease.go b/client/v3/namespace/lease.go deleted file mode 100644 index b80b530467c..00000000000 --- a/client/v3/namespace/lease.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "bytes" - "context" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -type leasePrefix struct { - clientv3.Lease - pfx []byte -} - -// NewLease wraps a Lease interface to filter for only keys with a prefix -// and remove that prefix when fetching attached keys through TimeToLive. -func NewLease(l clientv3.Lease, prefix string) clientv3.Lease { - return &leasePrefix{l, []byte(prefix)} -} - -func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { - resp, err := l.Lease.TimeToLive(ctx, id, opts...) - if err != nil { - return nil, err - } - if len(resp.Keys) > 0 { - var outKeys [][]byte - for i := range resp.Keys { - if len(resp.Keys[i]) < len(l.pfx) { - // too short - continue - } - if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) { - // doesn't match prefix - continue - } - // strip prefix - outKeys = append(outKeys, resp.Keys[i][len(l.pfx):]) - } - resp.Keys = outKeys - } - return resp, nil -} diff --git a/client/v3/namespace/util_test.go b/client/v3/namespace/util_test.go deleted file mode 100644 index 9ba472b0a28..00000000000 --- a/client/v3/namespace/util_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "bytes" - "testing" -) - -func TestPrefixInterval(t *testing.T) { - tests := []struct { - pfx string - key []byte - end []byte - - wKey []byte - wEnd []byte - }{ - // single key - { - pfx: "pfx/", - key: []byte("a"), - - wKey: []byte("pfx/a"), - }, - // range - { - pfx: "pfx/", - key: []byte("abc"), - end: []byte("def"), - - wKey: []byte("pfx/abc"), - wEnd: []byte("pfx/def"), - }, - // one-sided range - { - pfx: "pfx/", - key: []byte("abc"), - end: []byte{0}, - - wKey: []byte("pfx/abc"), - wEnd: []byte("pfx0"), - }, - // one-sided range, end of keyspace - { - pfx: "\xff\xff", - key: []byte("abc"), - end: []byte{0}, - - wKey: []byte("\xff\xffabc"), - wEnd: []byte{0}, - }, - } - for i, tt := range tests { - pfxKey, pfxEnd := prefixInterval(tt.pfx, tt.key, tt.end) - if !bytes.Equal(pfxKey, tt.wKey) { - t.Errorf("#%d: expected key=%q, got key=%q", i, tt.wKey, pfxKey) - } - if !bytes.Equal(pfxEnd, tt.wEnd) { - t.Errorf("#%d: expected end=%q, got end=%q", i, tt.wEnd, pfxEnd) - } - } -} diff --git a/client/v3/namespace/watch.go b/client/v3/namespace/watch.go deleted file mode 100644 index edf1af87b58..00000000000 --- a/client/v3/namespace/watch.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package namespace - -import ( - "context" - "sync" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -type watcherPrefix struct { - clientv3.Watcher - pfx string - - wg sync.WaitGroup - stopc chan struct{} - stopOnce sync.Once -} - -// NewWatcher wraps a Watcher instance so that all Watch requests -// are prefixed with a given string and all Watch responses have -// the prefix removed. -func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { - return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} -} - -func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { - // since OpOption is opaque, determine range for prefixing through an OpGet - op := clientv3.OpGet(key, opts...) - end := op.RangeBytes() - pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end) - if pfxEnd != nil { - opts = append(opts, clientv3.WithRange(string(pfxEnd))) - } - - wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...) - - // translate watch events from prefixed to unprefixed - pfxWch := make(chan clientv3.WatchResponse) - w.wg.Add(1) - go func() { - defer func() { - close(pfxWch) - w.wg.Done() - }() - for wr := range wch { - for i := range wr.Events { - wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):] - if wr.Events[i].PrevKv != nil { - wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key - } - } - select { - case pfxWch <- wr: - case <-ctx.Done(): - return - case <-w.stopc: - return - } - } - }() - return pfxWch -} - -func (w *watcherPrefix) Close() error { - err := w.Watcher.Close() - w.stopOnce.Do(func() { close(w.stopc) }) - w.wg.Wait() - return err -} diff --git a/client/v3/naming/doc.go b/client/v3/naming/doc.go deleted file mode 100644 index f2050a6aa6c..00000000000 --- a/client/v3/naming/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package naming provides: -// - subpackage endpoints: an abstraction layer to store and read endpoints -// information from etcd. -// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC -// services based on the endpoints configuration -// -// To use, first import the packages: -// -// import ( -// "go.etcd.io/etcd/client/v3" -// "go.etcd.io/etcd/client/v3/naming/endpoints" -// "go.etcd.io/etcd/client/v3/naming/resolver" -// "google.golang.org/grpc" -// ) -// -// First, register new endpoint addresses for a service: -// -// func etcdAdd(c *clientv3.Client, service, addr string) error { -// em := endpoints.NewManager(c, service) -// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}); -// } -// -// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: -// -// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { -// etcdResolver, err := resolver.NewBuilder(c); -// if err { return nil, err } -// return grpc.Dial("etcd:///" + service, grpc.WithResolvers(etcdResolver)) -// } -// -// Optionally, force delete an endpoint: -// -// func etcdDelete(c *clientv3, service, addr string) error { -// em := endpoints.NewManager(c, service) -// return em.DeleteEndpoint(c.Ctx(), service+"/"+addr) -// } -// -// Or register an expiring endpoint with a lease: -// -// func etcdAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { -// em := endpoints.NewManager(c, service) -// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}, clientv3.WithLease(lid)); -// } -package naming diff --git a/client/v3/naming/endpoints/internal/update.go b/client/v3/naming/endpoints/internal/update.go deleted file mode 100644 index d42f49062a4..00000000000 --- a/client/v3/naming/endpoints/internal/update.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Operation describes action performed on endpoint (addition vs deletion). -// Must stay JSON-format compatible with: -// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Operation -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a persistent (JSON marshalled) format representing -// endpoint within the etcd storage. -// -// As the format can be persisted by one version of etcd client library and -// read by other the format must be kept backward compatible and -// in particular must be superset of the grpc(<=1.29.1) naming.Update structure: -// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Update -// -// Please document since which version of etcd-client given property is supported. -// Please keep the naming consistent with e.g. https://pkg.go.dev/google.golang.org/grpc/resolver#Address. -// -// Notice that it is not valid having both empty string Addr and nil Metadata in an Update. -type Update struct { - // Op indicates the operation of the update. - // Since etcd 3.1. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - // Since etcd 3.1. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - // Since etcd 3.1. - Metadata interface{} -} diff --git a/client/v3/naming/resolver/resolver.go b/client/v3/naming/resolver/resolver.go deleted file mode 100644 index 7b9f61d2e08..00000000000 --- a/client/v3/naming/resolver/resolver.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resolver - -import ( - "context" - "strings" - "sync" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/naming/endpoints" - - "google.golang.org/grpc/codes" - gresolver "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" -) - -type builder struct { - c *clientv3.Client -} - -func (b builder) Build(target gresolver.Target, cc gresolver.ClientConn, opts gresolver.BuildOptions) (gresolver.Resolver, error) { - // Refer to https://github.com/grpc/grpc-go/blob/16d3df80f029f57cff5458f1d6da6aedbc23545d/clientconn.go#L1587-L1611 - endpoint := target.URL.Path - if endpoint == "" { - endpoint = target.URL.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") - r := &resolver{ - c: b.c, - target: endpoint, - cc: cc, - } - r.ctx, r.cancel = context.WithCancel(context.Background()) - - em, err := endpoints.NewManager(r.c, r.target) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "resolver: failed to new endpoint manager: %s", err) - } - r.wch, err = em.NewWatchChannel(r.ctx) - if err != nil { - return nil, status.Errorf(codes.Internal, "resolver: failed to new watch channer: %s", err) - } - - r.wg.Add(1) - go r.watch() - return r, nil -} - -func (b builder) Scheme() string { - return "etcd" -} - -// NewBuilder creates a resolver builder. -func NewBuilder(client *clientv3.Client) (gresolver.Builder, error) { - return builder{c: client}, nil -} - -type resolver struct { - c *clientv3.Client - target string - cc gresolver.ClientConn - wch endpoints.WatchChannel - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup -} - -func (r *resolver) watch() { - defer r.wg.Done() - - allUps := make(map[string]*endpoints.Update) - for { - select { - case <-r.ctx.Done(): - return - case ups, ok := <-r.wch: - if !ok { - return - } - - for _, up := range ups { - switch up.Op { - case endpoints.Add: - allUps[up.Key] = up - case endpoints.Delete: - delete(allUps, up.Key) - } - } - - addrs := convertToGRPCAddress(allUps) - r.cc.UpdateState(gresolver.State{Addresses: addrs}) - } - } -} - -func convertToGRPCAddress(ups map[string]*endpoints.Update) []gresolver.Address { - var addrs []gresolver.Address - for _, up := range ups { - addr := gresolver.Address{ - Addr: up.Endpoint.Addr, - Metadata: up.Endpoint.Metadata, - } - addrs = append(addrs, addr) - } - return addrs -} - -// ResolveNow is a no-op here. -// It's just a hint, resolver can ignore this if it's not necessary. -func (r *resolver) ResolveNow(gresolver.ResolveNowOptions) {} - -func (r *resolver) Close() { - r.cancel() - r.wg.Wait() -} diff --git a/client/v3/op_test.go b/client/v3/op_test.go deleted file mode 100644 index f1890eafafc..00000000000 --- a/client/v3/op_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "reflect" - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -// TestOpWithSort tests if WithSort(ASCEND, KEY) and WithLimit are specified, -// RangeRequest ignores the SortOption to avoid unnecessarily fetching -// the entire key-space. -func TestOpWithSort(t *testing.T) { - opReq := OpGet("foo", WithSort(SortByKey, SortAscend), WithLimit(10)).toRequestOp().Request - q, ok := opReq.(*pb.RequestOp_RequestRange) - if !ok { - t.Fatalf("expected range request, got %v", reflect.TypeOf(opReq)) - } - req := q.RequestRange - wreq := &pb.RangeRequest{Key: []byte("foo"), SortOrder: pb.RangeRequest_NONE, Limit: 10} - if !reflect.DeepEqual(req, wreq) { - t.Fatalf("expected %+v, got %+v", wreq, req) - } -} - -func TestIsSortOptionValid(t *testing.T) { - rangeReqs := []struct { - sortOrder pb.RangeRequest_SortOrder - sortTarget pb.RangeRequest_SortTarget - expectedValid bool - }{ - { - sortOrder: pb.RangeRequest_ASCEND, - sortTarget: pb.RangeRequest_CREATE, - expectedValid: true, - }, - { - sortOrder: pb.RangeRequest_ASCEND, - sortTarget: 100, - expectedValid: false, - }, - { - sortOrder: 200, - sortTarget: pb.RangeRequest_MOD, - expectedValid: false, - }, - } - - for _, req := range rangeReqs { - getOp := Op{ - sort: &SortOption{ - Order: SortOrder(req.sortOrder), - Target: SortTarget(req.sortTarget), - }, - } - - actualRet := getOp.IsSortOptionValid() - if actualRet != req.expectedValid { - t.Errorf("expected sortOrder (%d) and sortTarget (%d) to be %t, but got %t", - req.sortOrder, req.sortTarget, req.expectedValid, actualRet) - } - } -} - -func TestIsOptsWithPrefix(t *testing.T) { - optswithprefix := []OpOption{WithPrefix()} - if !IsOptsWithPrefix(optswithprefix) { - t.Errorf("IsOptsWithPrefix = false, expected true") - } - - optswithfromkey := []OpOption{WithFromKey()} - if IsOptsWithPrefix(optswithfromkey) { - t.Errorf("IsOptsWithPrefix = true, expected false") - } -} - -func TestIsOptsWithFromKey(t *testing.T) { - optswithfromkey := []OpOption{WithFromKey()} - if !IsOptsWithFromKey(optswithfromkey) { - t.Errorf("IsOptsWithFromKey = false, expected true") - } - - optswithprefix := []OpOption{WithPrefix()} - if IsOptsWithFromKey(optswithprefix) { - t.Errorf("IsOptsWithFromKey = true, expected false") - } -} diff --git a/client/v3/options.go b/client/v3/options.go deleted file mode 100644 index cc10a03d76d..00000000000 --- a/client/v3/options.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "math" - "time" - - "google.golang.org/grpc" -) - -var ( - // client-side handling retrying of request failures where data was not written to the wire or - // where server indicates it did not process the data. gRPC default is "WaitForReady(false)" - // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to - // transient failures. - defaultWaitForReady = grpc.WaitForReady(true) - - // client-side request send limit, gRPC default is math.MaxInt32 - // Make sure that "client-side send limit < server-side default send/recv limit" - // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes - defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) - - // client-side response receive limit, gRPC default is 4MB - // Make sure that "client-side receive limit >= server-side default send/recv limit" - // because range response can easily exceed request send limits - // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway - defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) - - // client-side non-streaming retry limit, only applied to requests where server responds with - // a error code clearly indicating it was unable to process the request such as codes.Unavailable. - // If set to 0, retry is disabled. - defaultUnaryMaxRetries uint = 100 - - // client-side streaming retry limit, only applied to requests where server responds with - // a error code clearly indicating it was unable to process the request such as codes.Unavailable. - // If set to 0, retry is disabled. - defaultStreamMaxRetries = ^uint(0) // max uint - - // client-side retry backoff wait between requests. - defaultBackoffWaitBetween = 25 * time.Millisecond - - // client-side retry backoff default jitter fraction. - defaultBackoffJitterFraction = 0.10 -) - -// defaultCallOpts defines a list of default "gRPC.CallOption". -// Some options are exposed to "clientv3.Config". -// Defaults will be overridden by the settings in "clientv3.Config". -var defaultCallOpts = []grpc.CallOption{ - defaultWaitForReady, - defaultMaxCallSendMsgSize, - defaultMaxCallRecvMsgSize, -} - -// MaxLeaseTTL is the maximum lease TTL value -const MaxLeaseTTL = 9000000000 diff --git a/client/v3/ordering/doc.go b/client/v3/ordering/doc.go deleted file mode 100644 index 03588248bd6..00000000000 --- a/client/v3/ordering/doc.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ordering is a clientv3 wrapper that caches response header revisions -// to detect ordering violations from stale responses. Users may define a -// policy on how to handle the ordering violation, but typically the client -// should connect to another endpoint and reissue the request. -// -// The most common situation where an ordering violation happens is a client -// reconnects to a partitioned member and issues a serializable read. Since the -// partitioned member is likely behind the last member, it may return a Get -// response based on a store revision older than the store revision used to -// service a prior Get on the former endpoint. -// -// First, create a client: -// -// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) -// if err != nil { -// // handle error! -// } -// -// Next, override the client interface with the ordering wrapper: -// -// vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { -// return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev) -// } -// cli.KV = ordering.NewKV(cli.KV, vf) -// -// Now calls using 'cli' will reject order violations with an error. -package ordering diff --git a/client/v3/ordering/kv.go b/client/v3/ordering/kv.go deleted file mode 100644 index 9075cbf9890..00000000000 --- a/client/v3/ordering/kv.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ordering - -import ( - "context" - "sync" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -// kvOrdering ensures that serialized requests do not return -// get with revisions less than the previous -// returned revision. -type kvOrdering struct { - clientv3.KV - orderViolationFunc OrderViolationFunc - prevRev int64 - revMu sync.RWMutex -} - -func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering { - return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}} -} - -func (kv *kvOrdering) getPrevRev() int64 { - kv.revMu.RLock() - defer kv.revMu.RUnlock() - return kv.prevRev -} - -func (kv *kvOrdering) setPrevRev(currRev int64) { - kv.revMu.Lock() - defer kv.revMu.Unlock() - if currRev > kv.prevRev { - kv.prevRev = currRev - } -} - -func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - // prevRev is stored in a local variable in order to record the prevRev - // at the beginning of the Get operation, because concurrent - // access to kvOrdering could change the prevRev field in the - // middle of the Get operation. - prevRev := kv.getPrevRev() - op := clientv3.OpGet(key, opts...) - for { - r, err := kv.KV.Do(ctx, op) - if err != nil { - return nil, err - } - resp := r.Get() - if resp.Header.Revision == prevRev { - return resp, nil - } else if resp.Header.Revision > prevRev { - kv.setPrevRev(resp.Header.Revision) - return resp, nil - } - err = kv.orderViolationFunc(op, r, prevRev) - if err != nil { - return nil, err - } - } -} - -func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn { - return &txnOrdering{ - kv.KV.Txn(ctx), - kv, - ctx, - sync.Mutex{}, - []clientv3.Cmp{}, - []clientv3.Op{}, - []clientv3.Op{}, - } -} - -// txnOrdering ensures that serialized requests do not return -// txn responses with revisions less than the previous -// returned revision. -type txnOrdering struct { - clientv3.Txn - *kvOrdering - ctx context.Context - mu sync.Mutex - cmps []clientv3.Cmp - thenOps []clientv3.Op - elseOps []clientv3.Op -} - -func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - txn.cmps = cs - txn.Txn.If(cs...) - return txn -} - -func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - txn.thenOps = ops - txn.Txn.Then(ops...) - return txn -} - -func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - txn.elseOps = ops - txn.Txn.Else(ops...) - return txn -} - -func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) { - // prevRev is stored in a local variable in order to record the prevRev - // at the beginning of the Commit operation, because concurrent - // access to txnOrdering could change the prevRev field in the - // middle of the Commit operation. - prevRev := txn.getPrevRev() - opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps) - for { - opResp, err := txn.KV.Do(txn.ctx, opTxn) - if err != nil { - return nil, err - } - txnResp := opResp.Txn() - if txnResp.Header.Revision >= prevRev { - txn.setPrevRev(txnResp.Header.Revision) - return txnResp, nil - } - err = txn.orderViolationFunc(opTxn, opResp, prevRev) - if err != nil { - return nil, err - } - } -} diff --git a/client/v3/ordering/kv_test.go b/client/v3/ordering/kv_test.go deleted file mode 100644 index 2168c315752..00000000000 --- a/client/v3/ordering/kv_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ordering - -import ( - "context" - gContext "context" - "sync" - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type mockKV struct { - clientv3.KV - response clientv3.OpResponse -} - -func (kv *mockKV) Do(ctx gContext.Context, op clientv3.Op) (clientv3.OpResponse, error) { - return kv.response, nil -} - -var rangeTests = []struct { - prevRev int64 - response *clientv3.GetResponse -}{ - { - 5, - &clientv3.GetResponse{ - Header: &pb.ResponseHeader{ - Revision: 5, - }, - }, - }, - { - 5, - &clientv3.GetResponse{ - Header: &pb.ResponseHeader{ - Revision: 4, - }, - }, - }, - { - 5, - &clientv3.GetResponse{ - Header: &pb.ResponseHeader{ - Revision: 6, - }, - }, - }, -} - -func TestKvOrdering(t *testing.T) { - for i, tt := range rangeTests { - mKV := &mockKV{clientv3.NewKVFromKVClient(nil, nil), tt.response.OpResponse()} - kv := &kvOrdering{ - mKV, - func(r *clientv3.GetResponse) OrderViolationFunc { - return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { - r.Header.Revision++ - return nil - } - }(tt.response), - tt.prevRev, - sync.RWMutex{}, - } - res, err := kv.Get(context.TODO(), "mockKey") - if err != nil { - t.Errorf("#%d: expected response %+v, got error %+v", i, tt.response, err) - } - if rev := res.Header.Revision; rev < tt.prevRev { - t.Errorf("#%d: expected revision %d, got %d", i, tt.prevRev, rev) - } - } -} - -var txnTests = []struct { - prevRev int64 - response *clientv3.TxnResponse -}{ - { - 5, - &clientv3.TxnResponse{ - Header: &pb.ResponseHeader{ - Revision: 5, - }, - }, - }, - { - 5, - &clientv3.TxnResponse{ - Header: &pb.ResponseHeader{ - Revision: 8, - }, - }, - }, - { - 5, - &clientv3.TxnResponse{ - Header: &pb.ResponseHeader{ - Revision: 4, - }, - }, - }, -} - -func TestTxnOrdering(t *testing.T) { - for i, tt := range txnTests { - mKV := &mockKV{clientv3.NewKVFromKVClient(nil, nil), tt.response.OpResponse()} - kv := &kvOrdering{ - mKV, - func(r *clientv3.TxnResponse) OrderViolationFunc { - return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { - r.Header.Revision++ - return nil - } - }(tt.response), - tt.prevRev, - sync.RWMutex{}, - } - txn := &txnOrdering{ - kv.Txn(context.Background()), - kv, - context.Background(), - sync.Mutex{}, - []clientv3.Cmp{}, - []clientv3.Op{}, - []clientv3.Op{}, - } - res, err := txn.Commit() - if err != nil { - t.Errorf("#%d: expected response %+v, got error %+v", i, tt.response, err) - } - if rev := res.Header.Revision; rev < tt.prevRev { - t.Errorf("#%d: expected revision %d, got %d", i, tt.prevRev, rev) - } - } -} diff --git a/client/v3/ordering/util.go b/client/v3/ordering/util.go deleted file mode 100644 index 701cc709616..00000000000 --- a/client/v3/ordering/util.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ordering - -import ( - "errors" - "sync/atomic" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error - -var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision") - -func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc { - violationCount := int32(0) - return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error { - // Each request is assigned by round-robin load-balancer's picker to a different - // endpoints. If we cycled them 5 times (even with some level of concurrency), - // with high probability no endpoint points on a member with fresh data. - // TODO: Ideally we should track members (resp.opp.Header) that returned - // stale result and explicitly temporarily disable them in 'picker'. - if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) { - return ErrNoGreaterRev - } - atomic.AddInt32(&violationCount, 1) - return nil - } -} diff --git a/client/v3/retry_interceptor_test.go b/client/v3/retry_interceptor_test.go deleted file mode 100644 index 6746f10adca..00000000000 --- a/client/v3/retry_interceptor_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "testing" - - grpccredentials "google.golang.org/grpc/credentials" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/v3/credentials" -) - -type dummyAuthTokenBundle struct{} - -func (d dummyAuthTokenBundle) TransportCredentials() grpccredentials.TransportCredentials { - return nil -} - -func (d dummyAuthTokenBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials { - return nil -} - -func (d dummyAuthTokenBundle) NewWithMode(mode string) (grpccredentials.Bundle, error) { - return nil, nil -} - -func (d dummyAuthTokenBundle) UpdateAuthToken(token string) { -} - -func TestClientShouldRefreshToken(t *testing.T) { - type fields struct { - authTokenBundle credentials.Bundle - } - type args struct { - err error - callOpts *options - } - - optsWithTrue := &options{ - retryAuth: true, - } - optsWithFalse := &options{ - retryAuth: false, - } - - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "ErrUserEmpty and non nil authTokenBundle", - fields: fields{ - authTokenBundle: &dummyAuthTokenBundle{}, - }, - args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue}, - want: true, - }, - { - name: "ErrUserEmpty and nil authTokenBundle", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue}, - want: false, - }, - { - name: "ErrGRPCInvalidAuthToken and retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithTrue}, - want: true, - }, - { - name: "ErrGRPCInvalidAuthToken and !retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithFalse}, - want: false, - }, - { - name: "ErrGRPCAuthOldRevision and retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithTrue}, - want: true, - }, - { - name: "ErrGRPCAuthOldRevision and !retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithFalse}, - want: false, - }, - { - name: "Other error and retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCAuthFailed, optsWithTrue}, - want: false, - }, - { - name: "Other error and !retryAuth", - fields: fields{ - authTokenBundle: nil, - }, - args: args{rpctypes.ErrGRPCAuthFailed, optsWithFalse}, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - authTokenBundle: tt.fields.authTokenBundle, - } - if got := c.shouldRefreshToken(tt.args.err, tt.args.callOpts); got != tt.want { - t.Errorf("shouldRefreshToken() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/client/v3/snapshot/v3_snapshot.go b/client/v3/snapshot/v3_snapshot.go deleted file mode 100644 index 3e36198422e..00000000000 --- a/client/v3/snapshot/v3_snapshot.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snapshot - -import ( - "context" - "crypto/sha256" - "fmt" - "io" - "os" - "time" - - "github.com/dustin/go-humanize" - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - clientv3 "go.etcd.io/etcd/client/v3" -) - -// hasChecksum returns "true" if the file size "n" -// has appended sha256 hash digest. -func hasChecksum(n int64) bool { - // 512 is chosen because it's a minimum disk sector size - // smaller than (and multiplies to) OS page size in most systems - return (n % 512) == sha256.Size -} - -// SaveWithVersion fetches snapshot from remote etcd server, saves data -// to target path and returns server version. If the context "ctx" is canceled or timed out, -// snapshot save stream will error out (e.g. context.Canceled, -// context.DeadlineExceeded). Make sure to specify only one endpoint -// in client configuration. Snapshot API must be requested to a -// selected node, and saved snapshot is the point-in-time state of -// the selected node. -// Etcd ", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -type Txn interface { - // If takes a list of comparison. If all comparisons passed in succeed, - // the operations passed into Then() will be executed. Or the operations - // passed into Else() will be executed. - If(cs ...Cmp) Txn - - // Then takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() succeed. - Then(ops ...Op) Txn - - // Else takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() fail. - Else(ops ...Op) Txn - - // Commit tries to commit the transaction. - Commit() (*TxnResponse, error) -} - -type txn struct { - kv *kv - ctx context.Context - - mu sync.Mutex - cif bool - cthen bool - celse bool - - isWrite bool - - cmps []*pb.Compare - - sus []*pb.RequestOp - fas []*pb.RequestOp - - callOpts []grpc.CallOption -} - -func (txn *txn) If(cs ...Cmp) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cif { - panic("cannot call If twice!") - } - - if txn.cthen { - panic("cannot call If after Then!") - } - - if txn.celse { - panic("cannot call If after Else!") - } - - txn.cif = true - - for i := range cs { - txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) - } - - return txn -} - -func (txn *txn) Then(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cthen { - panic("cannot call Then twice!") - } - if txn.celse { - panic("cannot call Then after Else!") - } - - txn.cthen = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.sus = append(txn.sus, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Else(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.celse { - panic("cannot call Else twice!") - } - - txn.celse = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.fas = append(txn.fas, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Commit() (*TxnResponse, error) { - txn.mu.Lock() - defer txn.mu.Unlock() - - r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - - var resp *pb.TxnResponse - var err error - resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) - if err != nil { - return nil, toErr(txn.ctx, err) - } - return (*TxnResponse)(resp), nil -} diff --git a/client/v3/txn_test.go b/client/v3/txn_test.go deleted file mode 100644 index 0ee6e71d6be..00000000000 --- a/client/v3/txn_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestTxnPanics(t *testing.T) { - testutil.RegisterLeakDetection(t) - - kv := &kv{} - - errc := make(chan string, 6) - df := func() { - if s := recover(); s != nil { - errc <- s.(string) - } - } - - cmp := Compare(CreateRevision("foo"), "=", 0) - op := OpPut("foo", "bar") - - tests := []struct { - f func() - - err string - }{ - { - f: func() { - defer df() - kv.Txn(context.TODO()).If(cmp).If(cmp) - }, - - err: "cannot call If twice!", - }, - { - f: func() { - defer df() - kv.Txn(context.TODO()).Then(op).If(cmp) - }, - - err: "cannot call If after Then!", - }, - { - f: func() { - defer df() - kv.Txn(context.TODO()).Else(op).If(cmp) - }, - - err: "cannot call If after Else!", - }, - { - f: func() { - defer df() - kv.Txn(context.TODO()).Then(op).Then(op) - }, - - err: "cannot call Then twice!", - }, - { - f: func() { - defer df() - kv.Txn(context.TODO()).Else(op).Then(op) - }, - - err: "cannot call Then after Else!", - }, - { - f: func() { - defer df() - kv.Txn(context.TODO()).Else(op).Else(op) - }, - - err: "cannot call Else twice!", - }, - } - - for i, tt := range tests { - go tt.f() - select { - case err := <-errc: - if err != tt.err { - t.Errorf("#%d: got %s, wanted %s", i, err, tt.err) - } - case <-time.After(time.Second): - t.Errorf("#%d: did not panic, wanted panic %s", i, tt.err) - } - } -} diff --git a/client/v3/utils.go b/client/v3/utils.go deleted file mode 100644 index 850275877d3..00000000000 --- a/client/v3/utils.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "math/rand" - "time" -) - -// jitterUp adds random jitter to the duration. -// -// This adds or subtracts time from the duration within a given jitter fraction. -// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) -// -// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils -func jitterUp(duration time.Duration, jitter float64) time.Duration { - multiplier := jitter * (rand.Float64()*2 - 1) - return time.Duration(float64(duration) * (1 + multiplier)) -} diff --git a/client/v3/watch.go b/client/v3/watch.go deleted file mode 100644 index 276955cd6e5..00000000000 --- a/client/v3/watch.go +++ /dev/null @@ -1,1074 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" -) - -const ( - EventTypeDelete = mvccpb.DELETE - EventTypePut = mvccpb.PUT - - closeSendErrTimeout = 250 * time.Millisecond - - // AutoWatchID is the watcher ID passed in WatchStream.Watch when no - // user-provided ID is available. If pass, an ID will automatically be assigned. - AutoWatchID = 0 - - // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch. - InvalidWatchID = -1 -) - -var ( - errMsgGRPCInvalidAuthToken = v3rpc.ErrGRPCInvalidAuthToken.Error() - errMsgGRPCAuthOldRevision = v3rpc.ErrGRPCAuthOldRevision.Error() -) - -type Event mvccpb.Event - -type WatchChan <-chan WatchResponse - -type Watcher interface { - // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. If revisions waiting to be sent over the - // watch are compacted, then the watch will be canceled by the server, the - // client will post a compacted error watch response, and the channel will close. - // If the requested revision is 0 or unspecified, the returned channel will - // return watch events that happen after the server receives the watch request. - // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, - // and "WatchResponse" from this closed channel has zero events and nil "Err()". - // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, - // to release the associated resources. - // - // If the context is "context.Background/TODO", returned "WatchChan" will - // not be closed and block until event is triggered, except when server - // returns a non-recoverable error (e.g. ErrCompacted). - // For example, when context passed with "WithRequireLeader" and the - // connected server has no leader (e.g. due to network partition), - // error "etcdserver: no leader" (ErrNoLeader) will be returned, - // and then "WatchChan" is closed with non-nil "Err()". - // In order to prevent a watch stream being stuck in a partitioned node, - // make sure to wrap context with "WithRequireLeader". - // - // Otherwise, as long as the context has not been canceled or timed out, - // watch will retry on other recoverable errors forever until reconnected. - // - // TODO: explicitly set context error in the last "WatchResponse" message and close channel? - // Currently, client contexts are overwritten with "valCtx" that never closes. - // TODO(v3.4): configure watch retry policy, limit maximum retry number - // (see https://github.com/etcd-io/etcd/issues/8980) - Watch(ctx context.Context, key string, opts ...OpOption) WatchChan - - // RequestProgress requests a progress notify response be sent in all watch channels. - RequestProgress(ctx context.Context) error - - // Close closes the watcher and cancels all watch requests. - Close() error -} - -type WatchResponse struct { - Header pb.ResponseHeader - Events []*Event - - // CompactRevision is the minimum revision the watcher may receive. - CompactRevision int64 - - // Canceled is used to indicate watch failure. - // If the watch failed and the stream was about to close, before the channel is closed, - // the channel sends a final response that has Canceled set to true with a non-nil Err(). - Canceled bool - - // Created is used to indicate the creation of the watcher. - Created bool - - closeErr error - - // cancelReason is a reason of canceling watch - cancelReason string -} - -// IsCreate returns true if the event tells that the key is newly created. -func (e *Event) IsCreate() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision -} - -// IsModify returns true if the event tells that a new value is put on existing key. -func (e *Event) IsModify() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision -} - -// Err is the error value if this WatchResponse holds an error. -func (wr *WatchResponse) Err() error { - switch { - case wr.closeErr != nil: - return v3rpc.Error(wr.closeErr) - case wr.CompactRevision != 0: - return v3rpc.ErrCompacted - case wr.Canceled: - if len(wr.cancelReason) != 0 { - return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) - } - return v3rpc.ErrFutureRev - } - return nil -} - -// IsProgressNotify returns true if the WatchResponse is progress notification. -func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 -} - -// watcher implements the Watcher interface -type watcher struct { - remote pb.WatchClient - callOpts []grpc.CallOption - - // mu protects the grpc streams map - mu sync.Mutex - - // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream - lg *zap.Logger -} - -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient - callOpts []grpc.CallOption - - // ctx controls internal remote.Watch requests - ctx context.Context - // ctxKey is the key used when looking up this stream's context - ctxKey string - cancel context.CancelFunc - - // substreams holds all active watchers on this grpc stream - substreams map[int64]*watcherStream - // resuming holds all resuming watchers on this grpc stream - resuming []*watcherStream - - // reqc sends a watch request from Watch() to the main goroutine - reqc chan watchStreamRequest - // respc receives data from the watch client - respc chan *pb.WatchResponse - // donec closes to broadcast shutdown - donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconnect logic - errc chan error - // closingc gets the watcherStream of closing watchers - closingc chan *watcherStream - // wg is Done when all substream goroutines have exited - wg sync.WaitGroup - - // resumec closes to signal that all substreams should begin resuming - resumec chan struct{} - // closeErr is the error that closed the watch stream - closeErr error - - lg *zap.Logger -} - -// watchStreamRequest is a union of the supported watch request operation types -type watchStreamRequest interface { - toPB() *pb.WatchRequest -} - -// watchRequest is issued by the subscriber to start a new watcher -type watchRequest struct { - ctx context.Context - key string - end string - rev int64 - - // send created notification event if this field is true - createdNotify bool - // progressNotify is for progress updates - progressNotify bool - // fragmentation should be disabled by default - // if true, split watch events when total exceeds - // "--max-request-bytes" flag value + 512-byte - fragment bool - - // filters is the list of events to filter out - filters []pb.WatchCreateRequest_FilterType - // get the previous key-value pair before the event happens - prevKV bool - // retc receives a chan WatchResponse once the watcher is established - retc chan chan WatchResponse -} - -// progressRequest is issued by the subscriber to request watch progress -type progressRequest struct { -} - -// watcherStream represents a registered watcher -type watcherStream struct { - // initReq is the request that initiated this request - initReq watchRequest - - // outc publishes watch responses to subscriber - outc chan WatchResponse - // recvc buffers watch responses before publishing - recvc chan *WatchResponse - // donec closes when the watcherStream goroutine stops. - donec chan struct{} - // closing is set to true when stream should be scheduled to shutdown. - closing bool - // id is the registered watch id on the grpc stream - id int64 - - // buf holds all events received from etcd but not yet consumed by the client - buf []*WatchResponse -} - -func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) -} - -func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { - w := &watcher{ - remote: wc, - streams: make(map[string]*watchGrpcStream), - } - if c != nil { - w.callOpts = c.callOpts - w.lg = c.lg - } - return w -} - -// never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) - -// ctx with only the values; never Done -type valCtx struct{ context.Context } - -func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } -func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } -func (vc *valCtx) Err() error { return nil } - -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { - ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - callOpts: w.callOpts, - ctx: ctx, - ctxKey: streamKeyFromCtx(inctx), - cancel: cancel, - substreams: make(map[int64]*watcherStream), - respc: make(chan *pb.WatchResponse), - reqc: make(chan watchStreamRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), - lg: w.lg, - } - go wgs.run() - return wgs -} - -// Watch posts a watch request to run() and waits for a new watcher channel -func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { - ow := opWatch(key, opts...) - - var filters []pb.WatchCreateRequest_FilterType - if ow.filterPut { - filters = append(filters, pb.WatchCreateRequest_NOPUT) - } - if ow.filterDelete { - filters = append(filters, pb.WatchCreateRequest_NODELETE) - } - - wr := &watchRequest{ - ctx: ctx, - createdNotify: ow.createdNotify, - key: string(ow.key), - end: string(ow.end), - rev: ow.rev, - progressNotify: ow.progressNotify, - fragment: ow.fragment, - filters: filters, - prevKV: ow.prevKV, - retc: make(chan chan WatchResponse, 1), - } - - ok := false - ctxKey := streamKeyFromCtx(ctx) - - var closeCh chan WatchResponse - for { - // find or allocate appropriate grpc watch stream - w.mu.Lock() - if w.streams == nil { - // closed - w.mu.Unlock() - ch := make(chan WatchResponse) - close(ch) - return ch - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - // couldn't create channel; return closed channel - if closeCh == nil { - closeCh = make(chan WatchResponse, 1) - } - - // submit request - select { - case reqc <- wr: - ok = true - case <-wr.ctx.Done(): - ok = false - case <-donec: - ok = false - if wgs.closeErr != nil { - closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - continue - } - - // receive channel - if ok { - select { - case ret := <-wr.retc: - return ret - case <-ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - continue - } - } - break - } - - close(closeCh) - return closeCh -} - -func (w *watcher) Close() (err error) { - w.mu.Lock() - streams := w.streams - w.streams = nil - w.mu.Unlock() - for _, wgs := range streams { - if werr := wgs.close(); werr != nil { - err = werr - } - } - // Consider context.Canceled as a successful close - if err == context.Canceled { - err = nil - } - return err -} - -// RequestProgress requests a progress notify response be sent in all watch channels. -func (w *watcher) RequestProgress(ctx context.Context) (err error) { - ctxKey := streamKeyFromCtx(ctx) - - w.mu.Lock() - if w.streams == nil { - w.mu.Unlock() - return errors.New("no stream found for context") - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - pr := &progressRequest{} - - select { - case reqc <- pr: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-donec: - if wgs.closeErr != nil { - return wgs.closeErr - } - // retry; may have dropped stream from no ctxs - return w.RequestProgress(ctx) - } -} - -func (w *watchGrpcStream) close() (err error) { - w.cancel() - <-w.donec - select { - case err = <-w.errc: - default: - } - return toErr(w.ctx, err) -} - -func (w *watcher) closeStream(wgs *watchGrpcStream) { - w.mu.Lock() - close(wgs.donec) - wgs.cancel() - if w.streams != nil { - delete(w.streams, wgs.ctxKey) - } - w.mu.Unlock() -} - -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { - // check watch ID for backward compatibility (<= v3.3) - if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { - w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) - // failed; no channel - close(ws.recvc) - return - } - ws.id = resp.WatchId - w.substreams[ws.id] = ws -} - -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { - select { - case ws.outc <- *resp: - case <-ws.initReq.ctx.Done(): - case <-time.After(closeSendErrTimeout): - } - close(ws.outc) -} - -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { - // send channel response in case stream was never established - select { - case ws.initReq.retc <- ws.outc: - default: - } - // close subscriber's channel - if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { - go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) - } else if ws.outc != nil { - close(ws.outc) - } - if ws.id != InvalidWatchID { - delete(w.substreams, ws.id) - return - } - for i := range w.resuming { - if w.resuming[i] == ws { - w.resuming[i] = nil - return - } - } -} - -// run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { - var wc pb.Watch_WatchClient - var closeErr error - - // substreams marked to close but goroutine still running; needed for - // avoiding double-closing recvc on grpc stream teardown - closing := make(map[*watcherStream]struct{}) - - defer func() { - w.closeErr = closeErr - // shutdown substreams and resuming substreams - for _, ws := range w.substreams { - if _, ok := closing[ws]; !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - for _, ws := range w.resuming { - if _, ok := closing[ws]; ws != nil && !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - w.joinSubstreams() - for range closing { - w.closeSubstream(<-w.closingc) - } - w.wg.Wait() - w.owner.closeStream(w) - }() - - // start a stream with the etcd grpc server - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - - cancelSet := make(map[int64]struct{}) - - var cur *pb.WatchResponse - backoff := time.Millisecond - for { - select { - // Watch() requested - case req := <-w.reqc: - switch wreq := req.(type) { - case *watchRequest: - outc := make(chan WatchResponse, 1) - // TODO: pass custom watch ID? - ws := &watcherStream{ - initReq: *wreq, - id: InvalidWatchID, - outc: outc, - // unbuffered so resumes won't cause repeat events - recvc: make(chan *WatchResponse), - } - - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - - // queue up for watcher creation/resume - w.resuming = append(w.resuming, ws) - if len(w.resuming) == 1 { - // head of resume queue, can register a new watcher - if err := wc.Send(ws.initReq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - case *progressRequest: - if err := wc.Send(wreq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - - // new events from the watch client - case pbresp := <-w.respc: - if cur == nil || pbresp.Created || pbresp.Canceled { - cur = pbresp - } else if cur != nil && cur.WatchId == pbresp.WatchId { - // merge new events - cur.Events = append(cur.Events, pbresp.Events...) - // update "Fragment" field; last response with "Fragment" == false - cur.Fragment = pbresp.Fragment - } - - switch { - case pbresp.Created: - if pbresp.Canceled && shouldRetryWatch(pbresp.CancelReason) { - var newErr error - if wc, newErr = w.newWatchClient(); newErr != nil { - w.lg.Error("failed to create a new watch client", zap.Error(newErr)) - return - } - - if len(w.resuming) != 0 { - if ws := w.resuming[0]; ws != nil { - if err := wc.Send(ws.initReq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - } - - cur = nil - continue - } - - // response to head of queue creation - if len(w.resuming) != 0 { - if ws := w.resuming[0]; ws != nil { - w.addSubstream(pbresp, ws) - w.dispatchEvent(pbresp) - w.resuming[0] = nil - } - } - - if ws := w.nextResume(); ws != nil { - if err := wc.Send(ws.initReq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - - // reset for next iteration - cur = nil - - case pbresp.Canceled && pbresp.CompactRevision == 0: - delete(cancelSet, pbresp.WatchId) - if ws, ok := w.substreams[pbresp.WatchId]; ok { - // signal to stream goroutine to update closingc - close(ws.recvc) - closing[ws] = struct{}{} - } - - // reset for next iteration - cur = nil - - case cur.Fragment: - // watch response events are still fragmented - // continue to fetch next fragmented event arrival - continue - - default: - // dispatch to appropriate watch stream - ok := w.dispatchEvent(cur) - - // reset for next iteration - cur = nil - - if ok { - break - } - - // watch response on unexpected watch id; cancel id - if _, ok := cancelSet[pbresp.WatchId]; ok { - break - } - - cancelSet[pbresp.WatchId] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: pbresp.WatchId, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId)) - if err := wc.Send(req); err != nil { - w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err)) - } - } - - // watch client failed on Recv; spawn another if possible - case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { - closeErr = err - return - } - backoff = w.backoffIfUnavailable(backoff, err) - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - if ws := w.nextResume(); ws != nil { - if err := wc.Send(ws.initReq.toPB()); err != nil { - w.lg.Debug("error when sending request", zap.Error(err)) - } - } - cancelSet = make(map[int64]struct{}) - - case <-w.ctx.Done(): - return - - case ws := <-w.closingc: - w.closeSubstream(ws) - delete(closing, ws) - // no more watchers on this stream, shutdown, skip cancellation - if len(w.substreams)+len(w.resuming) == 0 { - return - } - if ws.id != InvalidWatchID { - // client is closing an established watch; close it on the server proactively instead of waiting - // to close when the next message arrives - cancelSet[ws.id] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: ws.id, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id)) - if err := wc.Send(req); err != nil { - w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err)) - } - } - } - } -} - -func shouldRetryWatch(cancelReason string) bool { - if cancelReason == "" { - return false - } - return (cancelReason == errMsgGRPCInvalidAuthToken) || - (cancelReason == errMsgGRPCAuthOldRevision) -} - -// nextResume chooses the next resuming to register with the grpc stream. Abandoned -// streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { - for len(w.resuming) != 0 { - if w.resuming[0] != nil { - return w.resuming[0] - } - w.resuming = w.resuming[1:len(w.resuming)] - } - return nil -} - -// dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - events := make([]*Event, len(pbresp.Events)) - for i, ev := range pbresp.Events { - events[i] = (*Event)(ev) - } - // TODO: return watch ID? - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Created: pbresp.Created, - Canceled: pbresp.Canceled, - cancelReason: pbresp.CancelReason, - } - - // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to - // indicate they should be broadcast. - if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID { - return w.broadcastResponse(wr) - } - - return w.unicastResponse(wr, pbresp.WatchId) - -} - -// broadcastResponse send a watch response to all watch substreams. -func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { - for _, ws := range w.substreams { - select { - case ws.recvc <- wr: - case <-ws.donec: - } - } - return true -} - -// unicastResponse sends a watch response to a specific watch substream. -func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { - ws, ok := w.substreams[watchId] - if !ok { - return false - } - select { - case ws.recvc <- wr: - case <-ws.donec: - return false - } - return true -} - -// serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { - for { - resp, err := wc.Recv() - if err != nil { - select { - case w.errc <- err: - case <-w.donec: - } - return - } - select { - case w.respc <- resp: - case <-w.donec: - return - } - } -} - -// serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { - if ws.closing { - panic("created substream goroutine but substream is closing") - } - - // nextRev is the minimum expected next revision - nextRev := ws.initReq.rev - resuming := false - defer func() { - if !resuming { - ws.closing = true - } - close(ws.donec) - if !resuming { - w.closingc <- ws - } - w.wg.Done() - }() - - emptyWr := &WatchResponse{} - for { - curWr := emptyWr - outc := ws.outc - - if len(ws.buf) > 0 { - curWr = ws.buf[0] - } else { - outc = nil - } - select { - case outc <- *curWr: - if ws.buf[0].Err() != nil { - return - } - ws.buf[0] = nil - ws.buf = ws.buf[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeSubstream - return - } - - if wr.Created { - if ws.initReq.retc != nil { - ws.initReq.retc <- ws.outc - // to prevent next write from taking the slot in buffered channel - // and posting duplicate create events - ws.initReq.retc = nil - - // send first creation event only if requested - if ws.initReq.createdNotify { - ws.outc <- *wr - } - // once the watch channel is returned, a current revision - // watch must resume at the store revision. This is necessary - // for the following case to work as expected: - // wch := m1.Watch("a") - // m2.Put("a", "b") - // <-wch - // If the revision is only bound on the first observed event, - // if wch is disconnected before the Put is issued, then reconnects - // after it is committed, it'll miss the Put. - if ws.initReq.rev == 0 { - nextRev = wr.Header.Revision - } - } - } else { - // current progress of watch; <= store revision - nextRev = wr.Header.Revision - } - - if len(wr.Events) > 0 { - nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 - } - ws.initReq.rev = nextRev - - // created event is already sent above, - // watcher should not post duplicate events - if wr.Created { - continue - } - - // TODO pause channel if buffer gets too large - ws.buf = append(ws.buf, wr) - case <-w.ctx.Done(): - return - case <-ws.initReq.ctx.Done(): - return - case <-resumec: - resuming = true - return - } - } - // lazily send cancel message if events on missing id -} - -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - // mark all substreams as resuming - close(w.resumec) - w.resumec = make(chan struct{}) - w.joinSubstreams() - for _, ws := range w.substreams { - ws.id = InvalidWatchID - w.resuming = append(w.resuming, ws) - } - // strip out nils, if any - var resuming []*watcherStream - for _, ws := range w.resuming { - if ws != nil { - resuming = append(resuming, ws) - } - } - w.resuming = resuming - w.substreams = make(map[int64]*watcherStream) - - // connect to grpc stream while accepting watcher cancelation - stopc := make(chan struct{}) - donec := w.waitCancelSubstreams(stopc) - wc, err := w.openWatchClient() - close(stopc) - <-donec - - // serve all non-closing streams, even if there's a client error - // so that the teardown path can shutdown the streams as expected. - for _, ws := range w.resuming { - if ws.closing { - continue - } - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - } - - if err != nil { - return nil, v3rpc.Error(err) - } - - // receive data from new grpc stream - go w.serveWatchClient(wc) - return wc, nil -} - -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { - var wg sync.WaitGroup - wg.Add(len(w.resuming)) - donec := make(chan struct{}) - for i := range w.resuming { - go func(ws *watcherStream) { - defer wg.Done() - if ws.closing { - if ws.initReq.ctx.Err() != nil && ws.outc != nil { - close(ws.outc) - ws.outc = nil - } - return - } - select { - case <-ws.initReq.ctx.Done(): - // closed ws will be removed from resuming - ws.closing = true - close(ws.outc) - ws.outc = nil - w.wg.Add(1) - go func() { - defer w.wg.Done() - w.closingc <- ws - }() - case <-stopc: - } - }(w.resuming[i]) - } - go func() { - defer close(donec) - wg.Wait() - }() - return donec -} - -// joinSubstreams waits for all substream goroutines to complete. -func (w *watchGrpcStream) joinSubstreams() { - for _, ws := range w.substreams { - <-ws.donec - } - for _, ws := range w.resuming { - if ws != nil { - <-ws.donec - } - } -} - -var maxBackoff = 100 * time.Millisecond - -func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration { - if isUnavailableErr(w.ctx, err) { - // retry, but backoff - if backoff < maxBackoff { - // 25% backoff factor - backoff = backoff + backoff/4 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - time.Sleep(backoff) - } - return backoff -} - -// openWatchClient retries opening a watch client until success or halt. -// manually retry in case "ws==nil && err==nil" -// TODO: remove FailFast=false -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { - backoff := time.Millisecond - for { - select { - case <-w.ctx.Done(): - if err == nil { - return nil, w.ctx.Err() - } - return nil, err - default: - } - if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { - break - } - if isHaltErr(w.ctx, err) { - return nil, v3rpc.Error(err) - } - backoff = w.backoffIfUnavailable(backoff, err) - } - return ws, nil -} - -// toPB converts an internal watch request structure to its protobuf WatchRequest structure. -func (wr *watchRequest) toPB() *pb.WatchRequest { - req := &pb.WatchCreateRequest{ - StartRevision: wr.rev, - Key: []byte(wr.key), - RangeEnd: []byte(wr.end), - ProgressNotify: wr.progressNotify, - Filters: wr.filters, - PrevKv: wr.prevKV, - Fragment: wr.fragment, - } - cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -// toPB converts an internal progress request structure to its protobuf WatchRequest structure. -func (pr *progressRequest) toPB() *pb.WatchRequest { - req := &pb.WatchProgressRequest{} - cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} - -func streamKeyFromCtx(ctx context.Context) string { - if md, ok := metadata.FromOutgoingContext(ctx); ok { - return fmt.Sprintf("%+v", md) - } - return "" -} diff --git a/client/v3/watch_test.go b/client/v3/watch_test.go deleted file mode 100644 index 0a94f08cd56..00000000000 --- a/client/v3/watch_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" -) - -func TestEvent(t *testing.T) { - tests := []struct { - ev *Event - isCreate bool - isModify bool - }{{ - ev: &Event{ - Type: EventTypePut, - Kv: &mvccpb.KeyValue{ - CreateRevision: 3, - ModRevision: 3, - }, - }, - isCreate: true, - }, { - ev: &Event{ - Type: EventTypePut, - Kv: &mvccpb.KeyValue{ - CreateRevision: 3, - ModRevision: 4, - }, - }, - isModify: true, - }} - for i, tt := range tests { - if tt.isCreate && !tt.ev.IsCreate() { - t.Errorf("#%d: event should be Create event", i) - } - if tt.isModify && !tt.ev.IsModify() { - t.Errorf("#%d: event should be Modify event", i) - } - } -} - -func TestShouldRetryWatch(t *testing.T) { - testCases := []struct { - name string - msg string - expectedRetry bool - }{ - { - name: "equal to ErrGRPCInvalidAuthToken", - msg: rpctypes.ErrGRPCInvalidAuthToken.Error(), - expectedRetry: true, - }, - { - name: "equal to ErrGRPCAuthOldRevision", - msg: rpctypes.ErrGRPCAuthOldRevision.Error(), - expectedRetry: true, - }, - { - name: "valid grpc error but not equal to ErrGRPCInvalidAuthToken or ErrGRPCAuthOldRevision", - msg: rpctypes.ErrGRPCUserEmpty.Error(), - expectedRetry: false, - }, - { - name: "invalid grpc error and not equal to ErrGRPCInvalidAuthToken or ErrGRPCAuthOldRevision", - msg: "whatever error message", - expectedRetry: false, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.expectedRetry, shouldRetryWatch(tc.msg)) - }) - } -} diff --git a/client/v3/yaml/config.go b/client/v3/yaml/config.go deleted file mode 100644 index 99d07236433..00000000000 --- a/client/v3/yaml/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package yaml handles yaml-formatted clientv3 configuration data. -package yaml - -import ( - "crypto/tls" - "crypto/x509" - "os" - - "sigs.k8s.io/yaml" - - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type yamlConfig struct { - clientv3.Config - - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` - Certfile string `json:"cert-file"` - Keyfile string `json:"key-file"` - TrustedCAfile string `json:"trusted-ca-file"` - - // CAfile is being deprecated. Use 'TrustedCAfile' instead. - // TODO: deprecate this in v4 - CAfile string `json:"ca-file"` -} - -// NewConfig creates a new clientv3.Config from a yaml file. -func NewConfig(fpath string) (*clientv3.Config, error) { - b, err := os.ReadFile(fpath) - if err != nil { - return nil, err - } - - yc := &yamlConfig{} - - err = yaml.Unmarshal(b, yc) - if err != nil { - return nil, err - } - - if yc.InsecureTransport { - return &yc.Config, nil - } - - var ( - cert *tls.Certificate - cp *x509.CertPool - ) - - if yc.Certfile != "" && yc.Keyfile != "" { - cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) - if err != nil { - return nil, err - } - } - - if yc.TrustedCAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile}) - if err != nil { - return nil, err - } - } - - tlscfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: yc.InsecureSkipTLSVerify, - RootCAs: cp, - } - if cert != nil { - tlscfg.Certificates = []tls.Certificate{*cert} - } - yc.Config.TLS = tlscfg - - return &yc.Config, nil -} diff --git a/client/v3/yaml/config_test.go b/client/v3/yaml/config_test.go deleted file mode 100644 index ec8441b1b63..00000000000 --- a/client/v3/yaml/config_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "log" - "os" - "reflect" - "testing" - - "sigs.k8s.io/yaml" -) - -var ( - certPath = "../../../tests/fixtures/server.crt" - privateKeyPath = "../../../tests/fixtures/server.key.insecure" - caPath = "../../../tests/fixtures/ca.crt" -) - -func TestConfigFromFile(t *testing.T) { - tests := []struct { - ym *yamlConfig - - werr bool - }{ - { - &yamlConfig{}, - false, - }, - { - &yamlConfig{ - InsecureTransport: true, - }, - false, - }, - { - &yamlConfig{ - Keyfile: privateKeyPath, - Certfile: certPath, - TrustedCAfile: caPath, - InsecureSkipTLSVerify: true, - }, - false, - }, - { - &yamlConfig{ - Keyfile: "bad", - Certfile: "bad", - }, - true, - }, - { - &yamlConfig{ - Keyfile: privateKeyPath, - Certfile: certPath, - TrustedCAfile: "bad", - }, - true, - }, - } - - for i, tt := range tests { - tmpfile, err := os.CreateTemp("", "clientcfg") - if err != nil { - log.Fatal(err) - } - - b, err := yaml.Marshal(tt.ym) - if err != nil { - t.Fatal(err) - } - - _, err = tmpfile.Write(b) - if err != nil { - t.Fatal(err) - } - err = tmpfile.Close() - if err != nil { - t.Fatal(err) - } - - cfg, cerr := NewConfig(tmpfile.Name()) - if cerr != nil && !tt.werr { - t.Errorf("#%d: err = %v, want %v", i, cerr, tt.werr) - continue - } - if cerr != nil { - os.Remove(tmpfile.Name()) - continue - } - - if !reflect.DeepEqual(cfg.Endpoints, tt.ym.Endpoints) { - t.Errorf("#%d: endpoint = %v, want %v", i, cfg.Endpoints, tt.ym.Endpoints) - } - - if tt.ym.InsecureTransport != (cfg.TLS == nil) { - t.Errorf("#%d: insecureTransport = %v, want %v", i, cfg.TLS == nil, tt.ym.InsecureTransport) - } - - if !tt.ym.InsecureTransport { - if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 { - t.Errorf("#%d: failed to load in cert", i) - } - if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil { - t.Errorf("#%d: failed to load in ca cert", i) - } - if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify { - t.Errorf("#%d: skipTLSVeify = %v, want %v", i, cfg.TLS.InsecureSkipVerify, tt.ym.InsecureSkipTLSVerify) - } - } - - os.Remove(tmpfile.Name()) - } -} diff --git a/client/pkg/fileutil/dir_unix.go b/client_sdk/pkg/fileutil/dir_unix.go similarity index 95% rename from client/pkg/fileutil/dir_unix.go rename to client_sdk/pkg/fileutil/dir_unix.go index add54c6315d..8c81de30bb0 100644 --- a/client/pkg/fileutil/dir_unix.go +++ b/client_sdk/pkg/fileutil/dir_unix.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows +// +build !windows package fileutil @@ -20,7 +21,7 @@ import "os" const ( // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0700 + PrivateDirMode = 0o700 ) // OpenDir opens a directory for syncing. diff --git a/client/pkg/fileutil/dir_windows.go b/client_sdk/pkg/fileutil/dir_windows.go similarity index 97% rename from client/pkg/fileutil/dir_windows.go rename to client_sdk/pkg/fileutil/dir_windows.go index fd3415d5944..3414e96fcac 100644 --- a/client/pkg/fileutil/dir_windows.go +++ b/client_sdk/pkg/fileutil/dir_windows.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build windows +// +build windows package fileutil @@ -23,7 +24,7 @@ import ( const ( // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0777 + PrivateDirMode = 0o777 ) // OpenDir opens a directory in windows with write access for syncing. diff --git a/client/pkg/fileutil/doc.go b/client_sdk/pkg/fileutil/doc.go similarity index 100% rename from client/pkg/fileutil/doc.go rename to client_sdk/pkg/fileutil/doc.go diff --git a/client_sdk/pkg/fileutil/fileutil.go b/client_sdk/pkg/fileutil/fileutil.go new file mode 100644 index 00000000000..4bac9fb596f --- /dev/null +++ b/client_sdk/pkg/fileutil/fileutil.go @@ -0,0 +1,168 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "go.uber.org/zap" +) + +const ( + // PrivateFileMode 授予所有者读/写文件的权限. + PrivateFileMode = 0o600 +) + +// IsDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func IsDirWriteable(dir string) error { + f, err := filepath.Abs(filepath.Join(dir, ".touch")) + if err != nil { + return err + } + if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + return err + } + return os.Remove(f) +} + +// TouchDirAll 与os.MkdirAll类似.如果任何目录不存在,它就用0700权限创建目录.TouchDirAll也确保给定的目录是可写的. +func TouchDirAll(dir string) error { + // 如果路径已经是一个目录,MkdirAll不做任何事情,并返回nil,所以,首先检查dir是否存在,并有预期的权限模式. + if Exist(dir) { + err := CheckDirPermission(dir, PrivateDirMode) + if err != nil { + lg, _ := zap.NewProduction() + if lg == nil { + lg = zap.NewExample() + } + lg.Warn("check file permission", zap.Error(err)) + } + } else { + err := os.MkdirAll(dir, PrivateDirMode) + if err != nil { + // if mkdirAll("a/text") and "text" is not + // a directory, this will return syscall.ENOTDIR + return err + } + } + + return IsDirWriteable(dir) +} + +// CreateDirAll is similar to TouchDirAll but returns error +// if the deepest directory was not empty. +func CreateDirAll(dir string) error { + err := TouchDirAll(dir) + if err == nil { + var ns []string + ns, err = ReadDir(dir) + if err != nil { + return err + } + if len(ns) != 0 { + err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) + } + } + return err +} + +// Exist 返回文件或目录是否存在 +func Exist(name string) bool { + _, err := os.Stat(name) + return err == nil +} + +// DirEmpty 返回文件是否创建,以及是一个空目录 +func DirEmpty(name string) bool { + ns, err := ReadDir(name) + return len(ns) == 0 && err == nil +} + +// ZeroToEnd 清空当前之后的数据,并固定分配文件空间 +func ZeroToEnd(f *os.File) error { + // offset是从0开始的, 可以比当前的文件内容长度大,多出的部分会用空(0)来代替 + off, err := f.Seek(0, io.SeekCurrent) // 返回当前的偏移量(相对开头) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, io.SeekEnd) // 返回 文件大小 + if lerr != nil { + return lerr + } + // 删除后面的内容,不管当前的偏移量在哪儿,都是从头开始截取不会影响当前的偏移量;改变文件的大小 + if err = f.Truncate(off); err != nil { + return err + } + if err = Preallocate(f, lenf, true); err != nil { + return err + } // 预分配空间 + _, err = f.Seek(off, io.SeekStart) // 跳转到 要接着写的地方 + return err +} + +// CheckDirPermission checks permission on an existing dir. +// Returns error if dir is empty or exist with a different permission than specified. +func CheckDirPermission(dir string, perm os.FileMode) error { + if !Exist(dir) { + return fmt.Errorf("directory %q empty, cannot check permission", dir) + } + // check the existing permission on the directory + dirInfo, err := os.Stat(dir) + if err != nil { + return err + } + dirMode := dirInfo.Mode().Perm() + if dirMode != perm { + err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode)) + return err + } + return nil +} + +// RemoveMatchFile 移除格式匹配的文件 +func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error { + if lg == nil { + lg = zap.NewNop() + } + if !Exist(dir) { + return fmt.Errorf("目录不存在 %s", dir) + } + fileNames, err := ReadDir(dir) + if err != nil { + return err + } + var removeFailedFiles []string + for _, fileName := range fileNames { + if matchFunc(fileName) { + file := filepath.Join(dir, fileName) + if err = os.Remove(file); err != nil { + removeFailedFiles = append(removeFailedFiles, fileName) + lg.Error("删除文件失败", + zap.String("file", file), + zap.Error(err)) + continue + } + } + } + if len(removeFailedFiles) != 0 { + return fmt.Errorf("删除文件(s) %v error", removeFailedFiles) + } + return nil +} diff --git a/client_sdk/pkg/fileutil/lock.go b/client_sdk/pkg/fileutil/lock.go new file mode 100644 index 00000000000..c6a026028e5 --- /dev/null +++ b/client_sdk/pkg/fileutil/lock.go @@ -0,0 +1,26 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "errors" + "os" +) + +var ErrLocked = errors.New("fileutil: 文件已被锁定") + +type LockedFile struct { + *os.File // 文件句柄 +} diff --git a/client/pkg/fileutil/lock_flock.go b/client_sdk/pkg/fileutil/lock_flock.go similarity index 88% rename from client/pkg/fileutil/lock_flock.go rename to client_sdk/pkg/fileutil/lock_flock.go index a4e5707a659..0133460e49e 100644 --- a/client/pkg/fileutil/lock_flock.go +++ b/client_sdk/pkg/fileutil/lock_flock.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris +// +build !windows,!plan9,!solaris package fileutil @@ -21,6 +22,8 @@ import ( "syscall" ) +// 同时尝试在某个文件上放置一个独占锁 设置LOCK_NB 如果已被锁定会返回EWOULDBLOCK + func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { @@ -36,6 +39,7 @@ func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, err return &LockedFile{f}, nil } +// 获取文件锁,阻塞等待 func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { diff --git a/client/pkg/fileutil/lock_linux.go b/client_sdk/pkg/fileutil/lock_linux.go similarity index 96% rename from client/pkg/fileutil/lock_linux.go rename to client_sdk/pkg/fileutil/lock_linux.go index c33a2f4afc7..1183d8a1996 100644 --- a/client/pkg/fileutil/lock_linux.go +++ b/client_sdk/pkg/fileutil/lock_linux.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build linux +// +build linux package fileutil @@ -39,7 +40,7 @@ var ( } linuxTryLockFile = flockTryLockFile - linuxLockFile = flockLockFile + linuxLockFile = flockLockFile // 文件锁 ) func init() { @@ -72,6 +73,7 @@ func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error return &LockedFile{f}, nil } +// LockFile OK func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { return linuxLockFile(path, flag, perm) } diff --git a/client/pkg/fileutil/lock_plan9.go b/client_sdk/pkg/fileutil/lock_plan9.go similarity index 100% rename from client/pkg/fileutil/lock_plan9.go rename to client_sdk/pkg/fileutil/lock_plan9.go diff --git a/client/pkg/fileutil/lock_solaris.go b/client_sdk/pkg/fileutil/lock_solaris.go similarity index 98% rename from client/pkg/fileutil/lock_solaris.go rename to client_sdk/pkg/fileutil/lock_solaris.go index 2e892fecc65..683cc1db9c4 100644 --- a/client/pkg/fileutil/lock_solaris.go +++ b/client_sdk/pkg/fileutil/lock_solaris.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build solaris +// +build solaris package fileutil diff --git a/client/pkg/fileutil/lock_unix.go b/client_sdk/pkg/fileutil/lock_unix.go similarity index 95% rename from client/pkg/fileutil/lock_unix.go rename to client_sdk/pkg/fileutil/lock_unix.go index 05db5367410..d89027e1fad 100644 --- a/client/pkg/fileutil/lock_unix.go +++ b/client_sdk/pkg/fileutil/lock_unix.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows && !plan9 && !solaris && !linux +// +build !windows,!plan9,!solaris,!linux package fileutil diff --git a/client_sdk/pkg/fileutil/lock_windows.go b/client_sdk/pkg/fileutil/lock_windows.go new file mode 100644 index 00000000000..5cbf2bc3d5e --- /dev/null +++ b/client_sdk/pkg/fileutil/lock_windows.go @@ -0,0 +1,126 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package fileutil + +import ( + "errors" + "fmt" + "os" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + + errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") +) + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + LOCKFILE_EXCLUSIVE_LOCK = 2 + LOCKFILE_FAIL_IMMEDIATELY = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { + f, err := open(path, flag, perm) + if err != nil { + return nil, err + } + if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + f.Close() + return nil, err + } + return &LockedFile{f}, nil +} + +func open(path string, flag int, perm os.FileMode) (*os.File, error) { + if path == "" { + return nil, fmt.Errorf("cannot open empty filename") + } + var access uint32 + switch flag { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + case syscall.O_WRONLY | syscall.O_CREAT: + access = syscall.GENERIC_ALL + default: + panic(fmt.Errorf("flag %v is not supported", flag)) + } + fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), + access, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, + syscall.OPEN_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func lockFile(fd syscall.Handle, flags uint32) error { + var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK + flag |= flags + if fd == syscall.InvalidHandle { + return nil + } + err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err.Error() == errLocked.Error() { + return ErrLocked + } else if err != errLockViolation { + return err + } + return nil +} + +func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + var reserved uint32 = 0 + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/client_sdk/pkg/fileutil/over_preallocate.go b/client_sdk/pkg/fileutil/over_preallocate.go new file mode 100644 index 00000000000..a9fac258923 --- /dev/null +++ b/client_sdk/pkg/fileutil/over_preallocate.go @@ -0,0 +1,50 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "io" + "os" +) + +// Preallocate 预先分配文件空间 +func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + return nil + } + if extendFile { + return preallocExtend(f, sizeInBytes) + } + return preallocFixed(f, sizeInBytes) +} + +// 清除多余的空间 +func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { + curOff, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + size, err := f.Seek(sizeInBytes, io.SeekEnd) + if err != nil { + return err + } + if _, err = f.Seek(curOff, io.SeekStart); err != nil { + return err + } + if sizeInBytes > size { + return nil + } + return f.Truncate(sizeInBytes) +} diff --git a/client_sdk/pkg/fileutil/over_preallocate_unix.go b/client_sdk/pkg/fileutil/over_preallocate_unix.go new file mode 100644 index 00000000000..4389252d186 --- /dev/null +++ b/client_sdk/pkg/fileutil/over_preallocate_unix.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package fileutil + +import ( + "os" + "syscall" +) + +func preallocExtend(f *os.File, sizeInBytes int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // not supported; fallback + // fallocate EINTRs frequently in some environments; fallback + if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + return preallocExtendTrunc(f, sizeInBytes) + } + } + return err +} + +func preallocFixed(f *os.File, sizeInBytes int64) error { + // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE + err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) + if err != nil { + errno, ok := err.(syscall.Errno) + // treat not supported as nil error + if ok && errno == syscall.ENOTSUP { + return nil + } + } + return err +} diff --git a/client/pkg/fileutil/preallocate_darwin.go b/client_sdk/pkg/fileutil/preallocate_darwin.go similarity index 99% rename from client/pkg/fileutil/preallocate_darwin.go rename to client_sdk/pkg/fileutil/preallocate_darwin.go index e74968d0351..caab143dd30 100644 --- a/client/pkg/fileutil/preallocate_darwin.go +++ b/client_sdk/pkg/fileutil/preallocate_darwin.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build darwin +// +build darwin package fileutil diff --git a/client/pkg/fileutil/preallocate_unsupported.go b/client_sdk/pkg/fileutil/preallocate_unsupported.go similarity index 97% rename from client/pkg/fileutil/preallocate_unsupported.go rename to client_sdk/pkg/fileutil/preallocate_unsupported.go index e7fd937a436..2c46dd49075 100644 --- a/client/pkg/fileutil/preallocate_unsupported.go +++ b/client_sdk/pkg/fileutil/preallocate_unsupported.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux && !darwin +// +build !linux,!darwin package fileutil diff --git a/client/pkg/fileutil/purge.go b/client_sdk/pkg/fileutil/purge.go similarity index 86% rename from client/pkg/fileutil/purge.go rename to client_sdk/pkg/fileutil/purge.go index f4492009d6c..e8ac0ca6f58 100644 --- a/client/pkg/fileutil/purge.go +++ b/client_sdk/pkg/fileutil/purge.go @@ -41,12 +41,6 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval lg = zap.NewNop() } errC := make(chan error, 1) - lg.Info("started to purge file", - zap.String("dir", dirname), - zap.String("suffix", suffix), - zap.Uint("max", max), - zap.Duration("interval", interval)) - go func() { if donec != nil { defer close(donec) @@ -69,16 +63,14 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval f := filepath.Join(dirname, newfnames[0]) l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) if err != nil { - lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) break } if err = os.Remove(f); err != nil { - lg.Error("failed to remove file", zap.String("path", f), zap.Error(err)) errC <- err return } if err = l.Close(); err != nil { - lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) errC <- err return } diff --git a/client/pkg/fileutil/read_dir.go b/client_sdk/pkg/fileutil/read_dir.go similarity index 95% rename from client/pkg/fileutil/read_dir.go rename to client_sdk/pkg/fileutil/read_dir.go index 2eeaa89bc04..e1b93b8ecc8 100644 --- a/client/pkg/fileutil/read_dir.go +++ b/client_sdk/pkg/fileutil/read_dir.go @@ -40,7 +40,7 @@ func (op *ReadDirOp) applyOpts(opts []ReadDirOption) { } } -// ReadDir returns the filenames in the given directory in sorted order. +// ReadDir 返回指定目录下所有经过排序的文件 func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { op := &ReadDirOp{} op.applyOpts(opts) diff --git a/client/pkg/fileutil/sync.go b/client_sdk/pkg/fileutil/sync.go similarity index 97% rename from client/pkg/fileutil/sync.go rename to client_sdk/pkg/fileutil/sync.go index 670d01fadcc..0a0855309e9 100644 --- a/client/pkg/fileutil/sync.go +++ b/client_sdk/pkg/fileutil/sync.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux && !darwin +// +build !linux,!darwin package fileutil diff --git a/client/pkg/fileutil/sync_darwin.go b/client_sdk/pkg/fileutil/sync_darwin.go similarity index 98% rename from client/pkg/fileutil/sync_darwin.go rename to client_sdk/pkg/fileutil/sync_darwin.go index 7affa78ea64..1923b276ea0 100644 --- a/client/pkg/fileutil/sync_darwin.go +++ b/client_sdk/pkg/fileutil/sync_darwin.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build darwin +// +build darwin package fileutil diff --git a/client/pkg/fileutil/sync_linux.go b/client_sdk/pkg/fileutil/sync_linux.go similarity index 98% rename from client/pkg/fileutil/sync_linux.go rename to client_sdk/pkg/fileutil/sync_linux.go index a3172382e5a..b9398c23f94 100644 --- a/client/pkg/fileutil/sync_linux.go +++ b/client_sdk/pkg/fileutil/sync_linux.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build linux +// +build linux package fileutil diff --git a/client/pkg/logutil/doc.go b/client_sdk/pkg/logutil/doc.go similarity index 100% rename from client/pkg/logutil/doc.go rename to client_sdk/pkg/logutil/doc.go diff --git a/client/pkg/logutil/log_level.go b/client_sdk/pkg/logutil/log_level.go similarity index 92% rename from client/pkg/logutil/log_level.go rename to client_sdk/pkg/logutil/log_level.go index 6c95bcfe9f7..57cd09cad84 100644 --- a/client/pkg/logutil/log_level.go +++ b/client_sdk/pkg/logutil/log_level.go @@ -20,7 +20,7 @@ import ( var DefaultLogLevel = "info" -// ConvertToZapLevel converts log level string to zapcore.Level. +// ConvertToZapLevel 将日志级别字符串转换为zapcore.Level. func ConvertToZapLevel(lvl string) zapcore.Level { var level zapcore.Level if err := level.Set(lvl); err != nil { diff --git a/client/pkg/logutil/zap.go b/client_sdk/pkg/logutil/zap.go similarity index 79% rename from client/pkg/logutil/zap.go rename to client_sdk/pkg/logutil/zap.go index 0a4374c77b8..15aa56a76d0 100644 --- a/client/pkg/logutil/zap.go +++ b/client_sdk/pkg/logutil/zap.go @@ -21,18 +21,7 @@ import ( "go.uber.org/zap/zapcore" ) -// CreateDefaultZapLogger creates a logger with default zap configuration -func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) { - lcfg := DefaultZapLoggerConfig - lcfg.Level = zap.NewAtomicLevelAt(level) - c, err := lcfg.Build() - if err != nil { - return nil, err - } - return c, nil -} - -// DefaultZapLoggerConfig defines default zap logger configuration. +// DefaultZapLoggerConfig 定义了默认的zap logger 配置. var DefaultZapLoggerConfig = zap.Config{ Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)), @@ -42,9 +31,9 @@ var DefaultZapLoggerConfig = zap.Config{ Thereafter: 100, }, - Encoding: DefaultLogFormat, + Encoding: "json", - // copied from "zap.NewProductionEncoderConfig" with some updates + // 复制自 "zap.NewProductionEncoderConfig",并进行了一些更新. EncoderConfig: zapcore.EncoderConfig{ TimeKey: "ts", LevelKey: "level", @@ -59,20 +48,21 @@ var DefaultZapLoggerConfig = zap.Config{ EncodeCaller: zapcore.ShortCallerEncoder, }, - // Use "/dev/null" to discard all + // Use "/dev/null" 弃用所有 OutputPaths: []string{"stderr"}, ErrorOutputPaths: []string{"stderr"}, } -// MergeOutputPaths merges logging output paths, resolving conflicts. +// MergeOutputPaths 合并日志输出路径,解决冲突.,如果有/dev/null,就丢弃其他所有 func MergeOutputPaths(cfg zap.Config) zap.Config { + _ = zap.NewProductionEncoderConfig outputs := make(map[string]struct{}) for _, v := range cfg.OutputPaths { outputs[v] = struct{}{} } outputSlice := make([]string, 0) if _, ok := outputs["/dev/null"]; ok { - // "/dev/null" to discard all + // "/dev/null" 丢弃所有 outputSlice = []string{"/dev/null"} } else { for k := range outputs { diff --git a/client/pkg/logutil/zap_journal.go b/client_sdk/pkg/logutil/zap_journal.go similarity index 97% rename from client/pkg/logutil/zap_journal.go rename to client_sdk/pkg/logutil/zap_journal.go index c6adc010381..5bc195a424c 100644 --- a/client/pkg/logutil/zap_journal.go +++ b/client_sdk/pkg/logutil/zap_journal.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows +// +build !windows package logutil @@ -24,7 +25,7 @@ import ( "os" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/systemd" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/systemd" "github.com/coreos/go-systemd/v22/journal" "go.uber.org/zap/zapcore" diff --git a/client_sdk/pkg/pathutil/path.go b/client_sdk/pkg/pathutil/path.go new file mode 100644 index 00000000000..bbbde2a1eb4 --- /dev/null +++ b/client_sdk/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in etcd.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/client/pkg/srv/srv.go b/client_sdk/pkg/srv/srv.go similarity index 96% rename from client/pkg/srv/srv.go rename to client_sdk/pkg/srv/srv.go index 15fda134d6a..97335361095 100644 --- a/client/pkg/srv/srv.go +++ b/client_sdk/pkg/srv/srv.go @@ -21,7 +21,7 @@ import ( "net/url" "strings" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" ) var ( @@ -33,6 +33,7 @@ var ( // GetCluster gets the cluster information via DNS discovery. // Also sees each entry as a separate instance. func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) tcp2ap := make(map[string]url.URL) // First, resolve the apurls @@ -44,10 +45,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([] tcp2ap[tcpAddr.String()] = url } - var ( - tempName int - stringParts []string - ) + stringParts := []string{} updateNodeMap := func(service, scheme string) error { _, addrs, err := lookupSRV(service, "tcp", dns) if err != nil { @@ -99,10 +97,8 @@ type SRVClients struct { // GetClient looks up the client endpoints for a service and domain. func GetClient(service, domain string, serviceName string) (*SRVClients, error) { - var ( - urls []*url.URL - srvs []*net.SRV - ) + var urls []*url.URL + var srvs []*net.SRV updateURLs := func(service, scheme string) error { _, addrs, err := lookupSRV(service, "tcp", domain) diff --git a/client/pkg/systemd/doc.go b/client_sdk/pkg/systemd/doc.go similarity index 100% rename from client/pkg/systemd/doc.go rename to client_sdk/pkg/systemd/doc.go diff --git a/client/pkg/systemd/journal.go b/client_sdk/pkg/systemd/journal.go similarity index 100% rename from client/pkg/systemd/journal.go rename to client_sdk/pkg/systemd/journal.go diff --git a/client_sdk/pkg/testutil/assert.go b/client_sdk/pkg/testutil/assert.go new file mode 100644 index 00000000000..e8e042021e9 --- /dev/null +++ b/client_sdk/pkg/testutil/assert.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "reflect" + "testing" +) + +func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { + t.Helper() + if (e == nil || a == nil) && (isNil(e) && isNil(a)) { + return + } + if reflect.DeepEqual(e, a) { + return + } + s := "" + if len(msg) > 1 { + s = msg[0] + ": " + } + s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) + FatalStack(t, s) +} + +func AssertNil(t *testing.T, v interface{}) { + t.Helper() + AssertEqual(t, nil, v) +} + +func AssertNotNil(t *testing.T, v interface{}) { + t.Helper() + if v == nil { + t.Fatalf("expected non-nil, got %+v", v) + } +} + +func AssertTrue(t *testing.T, v bool, msg ...string) { + t.Helper() + AssertEqual(t, true, v, msg...) +} + +func AssertFalse(t *testing.T, v bool, msg ...string) { + t.Helper() + AssertEqual(t, false, v, msg...) +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() != reflect.Struct && rv.IsNil() +} diff --git a/client/pkg/testutil/leak.go b/client_sdk/pkg/testutil/leak.go similarity index 93% rename from client/pkg/testutil/leak.go rename to client_sdk/pkg/testutil/leak.go index 8c08fbd5123..b2b9bdda757 100644 --- a/client/pkg/testutil/leak.go +++ b/client_sdk/pkg/testutil/leak.go @@ -23,7 +23,7 @@ CheckLeakedGoroutine verifies tests do not leave any leaky goroutines. It returns true when there are goroutines still running(leaking) after all tests. - import "go.etcd.io/etcd/client/pkg/v3/testutil" + import "github.com/ls-2018/etcd_cn/client_sdk/pkg/testutil" func TestMain(m *testing.M) { testutil.MustTestMainWithLeakDetection(m) @@ -48,7 +48,7 @@ func CheckLeakedGoroutine() bool { stackCount[normalized]++ } - fmt.Fprint(os.Stderr, "Unexpected goroutines running after all test(s).\n") + fmt.Fprintf(os.Stderr, "Unexpected goroutines running after all test(s).\n") for stack, count := range stackCount { fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack) } @@ -140,8 +140,8 @@ func interestingGoroutines() (gs []string) { strings.Contains(stack, "created by testing.(*T).Run") || strings.Contains(stack, "testing.Main(") || strings.Contains(stack, "runtime.goexit") || - strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/testutil.interestingGoroutines") || - strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop") || + strings.Contains(stack, "github.com/ls-2018/etcd_cn/client_sdk/pkg/testutil.interestingGoroutines") || + strings.Contains(stack, "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil.(*MergeLogger).outputLoop") || strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") || strings.Contains(stack, "created by runtime.gc") || strings.Contains(stack, "created by text/template/parse.lex") || diff --git a/client/pkg/testutil/pauseable_handler.go b/client_sdk/pkg/testutil/pauseable_handler.go similarity index 100% rename from client/pkg/testutil/pauseable_handler.go rename to client_sdk/pkg/testutil/pauseable_handler.go diff --git a/client/pkg/testutil/recorder.go b/client_sdk/pkg/testutil/recorder.go similarity index 97% rename from client/pkg/testutil/recorder.go rename to client_sdk/pkg/testutil/recorder.go index 064e7313875..41349fec52d 100644 --- a/client/pkg/testutil/recorder.go +++ b/client_sdk/pkg/testutil/recorder.go @@ -87,7 +87,7 @@ type recorderStream struct { } func NewRecorderStream() Recorder { - return NewRecorderStreamWithWaitTimout(5 * time.Second) + return NewRecorderStreamWithWaitTimout(time.Duration(5 * time.Second)) } func NewRecorderStreamWithWaitTimout(waitTimeout time.Duration) Recorder { diff --git a/client/pkg/testutil/testingtb.go b/client_sdk/pkg/testutil/testingtb.go similarity index 98% rename from client/pkg/testutil/testingtb.go rename to client_sdk/pkg/testutil/testingtb.go index bafaccf9846..970542c0405 100644 --- a/client/pkg/testutil/testingtb.go +++ b/client_sdk/pkg/testutil/testingtb.go @@ -15,6 +15,7 @@ package testutil import ( + "io/ioutil" "log" "os" ) @@ -111,7 +112,7 @@ func (t *testingTBProthesis) Name() string { } func (t *testingTBProthesis) TempDir() string { - dir, err := os.MkdirTemp("", t.name) + dir, err := ioutil.TempDir("", t.name) if err != nil { t.Fatal(err) } diff --git a/client/pkg/testutil/testutil.go b/client_sdk/pkg/testutil/testutil.go similarity index 100% rename from client/pkg/testutil/testutil.go rename to client_sdk/pkg/testutil/testutil.go diff --git a/client/pkg/testutil/var.go b/client_sdk/pkg/testutil/var.go similarity index 100% rename from client/pkg/testutil/var.go rename to client_sdk/pkg/testutil/var.go diff --git a/client_sdk/pkg/tlsutil/cipher_suites.go b/client_sdk/pkg/tlsutil/cipher_suites.go new file mode 100644 index 00000000000..8d4ab67f367 --- /dev/null +++ b/client_sdk/pkg/tlsutil/cipher_suites.go @@ -0,0 +1,38 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import "crypto/tls" + +// GetCipherSuite 返回相应的密码套件. 和布尔值(如果它被支持). +func GetCipherSuite(s string) (uint16, bool) { + for _, c := range tls.CipherSuites() { + if s == c.Name { + return c.ID, true + } + } + for _, c := range tls.InsecureCipherSuites() { + if s == c.Name { + return c.ID, true + } + } + switch s { + case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true + case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true + } + return 0, false +} diff --git a/client/pkg/tlsutil/doc.go b/client_sdk/pkg/tlsutil/doc.go similarity index 100% rename from client/pkg/tlsutil/doc.go rename to client_sdk/pkg/tlsutil/doc.go diff --git a/client/pkg/tlsutil/tlsutil.go b/client_sdk/pkg/tlsutil/tlsutil.go similarity index 84% rename from client/pkg/tlsutil/tlsutil.go rename to client_sdk/pkg/tlsutil/tlsutil.go index 0f79865e805..0c581e59277 100644 --- a/client/pkg/tlsutil/tlsutil.go +++ b/client_sdk/pkg/tlsutil/tlsutil.go @@ -18,15 +18,15 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "os" + "io/ioutil" ) -// NewCertPool creates x509 certPool with provided CA files. +// NewCertPool 使用提供的CA文件创建X509证书池 func NewCertPool(CAFiles []string) (*x509.CertPool, error) { certPool := x509.NewCertPool() for _, CAFile := range CAFiles { - pemByte, err := os.ReadFile(CAFile) + pemByte, err := ioutil.ReadFile(CAFile) if err != nil { return nil, err } @@ -49,14 +49,14 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { return certPool, nil } -// NewCert generates TLS cert by using the given cert,key and parse function. +// NewCert 通过使用给定的cert、key和解析函数生成TLS证书. func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := os.ReadFile(certfile) + cert, err := ioutil.ReadFile(certfile) if err != nil { return nil, err } - key, err := os.ReadFile(keyfile) + key, err := ioutil.ReadFile(keyfile) if err != nil { return nil, err } diff --git a/client/pkg/transport/doc.go b/client_sdk/pkg/transport/doc.go similarity index 100% rename from client/pkg/transport/doc.go rename to client_sdk/pkg/transport/doc.go diff --git a/client_sdk/pkg/transport/keepalive_listener.go b/client_sdk/pkg/transport/keepalive_listener.go new file mode 100644 index 00000000000..ffb9537ce63 --- /dev/null +++ b/client_sdk/pkg/transport/keepalive_listener.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" + + "github.com/ls-2018/etcd_cn/code_debug/conn" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + conn.PrintConn("keepaliveListener", c) + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + conn.PrintConn("tlsKeepaliveListener", c) + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return c, nil +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config必须是non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/client/pkg/transport/limit_listen.go b/client_sdk/pkg/transport/limit_listen.go similarity index 87% rename from client/pkg/transport/limit_listen.go rename to client_sdk/pkg/transport/limit_listen.go index 404722ba76e..fddc4a6406a 100644 --- a/client/pkg/transport/limit_listen.go +++ b/client_sdk/pkg/transport/limit_listen.go @@ -21,12 +21,12 @@ import ( "net" "sync" "time" -) -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") + "github.com/ls-2018/etcd_cn/code_debug/conn" ) +var ErrNotTCP = errors.New("only tcp connections have keepalive") + // LimitListener returns a Listener that accepts at most n simultaneous // connections from the provided Listener. func LimitListener(l net.Listener, n int) net.Listener { @@ -48,6 +48,7 @@ func (l *limitListener) Accept() (net.Conn, error) { l.release() return nil, err } + conn.PrintConn("limitListener", c) return &limitListenerConn{Conn: c, release: l.release}, nil } @@ -63,9 +64,6 @@ func (l *limitListenerConn) Close() error { return err } -// SetKeepAlive sets keepalive -// -// Deprecated: use (*keepAliveConn) SetKeepAlive instead. func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { tcpc, ok := l.Conn.(*net.TCPConn) if !ok { @@ -74,9 +72,6 @@ func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { return tcpc.SetKeepAlive(doKeepAlive) } -// SetKeepAlivePeriod sets keepalive period -// -// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead. func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { tcpc, ok := l.Conn.(*net.TCPConn) if !ok { diff --git a/client_sdk/pkg/transport/listener.go b/client_sdk/pkg/transport/listener.go new file mode 100644 index 00000000000..de88dbc602a --- /dev/null +++ b/client_sdk/pkg/transport/listener.go @@ -0,0 +1,519 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil" + + "go.uber.org/zap" +) + +// NewListener creates a new listner. +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + return newListener(addr, scheme, WithTLSInfo(tlsinfo)) +} + +// NewListenerWithOpts OK +func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { + return newListener(addr, scheme, opts...) +} + +func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + + lnOpts := newListenOpts(opts...) + + switch { + case lnOpts.IsSocketOpts(): + config, err := newListenConfig(lnOpts.socketOpts) + if err != nil { + return nil, err + } + lnOpts.ListenConfig = config + fallthrough + case lnOpts.IsTimeout(), lnOpts.IsSocketOpts(): + ln, err := lnOpts.ListenConfig.Listen(context.TODO(), "tcp", addr) + if err != nil { + return nil, err + } + lnOpts.Listener = &rwTimeoutListener{ + Listener: ln, + readTimeout: lnOpts.readTimeout, + writeTimeout: lnOpts.writeTimeout, + } + case lnOpts.IsTimeout(): + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + lnOpts.Listener = &rwTimeoutListener{ + Listener: ln, + readTimeout: lnOpts.readTimeout, + writeTimeout: lnOpts.writeTimeout, + } + default: + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + lnOpts.Listener = ln + } + + if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() { + return lnOpts.Listener, nil + } + return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener) +} + +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + if tlsinfo != nil && tlsinfo.SkipClientSANVerify { + return NewTLSListener(l, tlsinfo) + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) { + lc := net.ListenConfig{} + if sopts != nil { + ctls := getControls(sopts) + if len(ctls) > 0 { + lc.Control = ctls.Control + } + } + return lc, nil +} + +type TLSInfo struct { + // CertFile 服务端证书,如果ClientCertFile为空,它也将被用作_客户证书. + CertFile string + // KeyFile 是CertFile的密钥. + KeyFile string + + // ClientCertFile client 证书,且启用认证;则使用CertFile + ClientCertFile string + // ClientKeyFile 是ClientCertFile的密钥 + ClientKeyFile string + + TrustedCAFile string // ca证书 + ClientCertAuth bool // 客户端证书验证;默认false + CRLFile string // 证书吊销列表文件的路径 + InsecureSkipVerify bool + SkipClientSANVerify bool + + // ServerName 在发现/虚拟主机的情况下,确保证书与给定的主机相匹配 + ServerName string + + // HandshakeFailure 当一个连接无法握手时,会被选择性地调用.之后,连接将被立即关闭. + HandshakeFailure func(*tls.Conn, error) + + // CipherSuites 是一个支持的密码套件的列表.如果是空的,Go 默认会自动填充它.请注意,密码套件是按照给定的顺序进行优先排序的. + CipherSuites []uint16 + + selfCert bool // 自签 + + // parseFunc 的存在是为了简化测试.通常情况下,parseFunc应该留为零.在这种情况下,将使用tls.X509KeyPair. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN 客户端必须提供的common Name;在证书里 + AllowedCN string + + // AllowedHostname 是一个IP地址或主机名,必须与客户提供的TLS证书相匹配. + AllowedHostname string + + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { + info.Logger = lg + if selfSignedCertValidity == 0 { + err = fmt.Errorf("selfSignedCertValidity 是无效的,它应该大于0 ") + info.Logger.Warn("不能生成证书", zap.Error(err)) + return + } + err = fileutil.TouchDirAll(dirpath) + if err != nil { + if info.Logger != nil { + info.Logger.Warn("无法创建证书目录", zap.Error(err)) + } + return + } + + certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem")) + if err != nil { + return + } + keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem")) + if err != nil { + return + } + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.ClientCertFile = certPath + info.ClientKeyFile = keyPath + info.selfCert = true + return + } + + // 编号 + serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + if info.Logger != nil { + info.Logger.Warn("无法生成随机数", zap.Error(err)) + } + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)), + // 加密、解密 + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + // 服务端验证 + ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...), + BasicConstraintsValid: true, + IPAddresses: []net.IP{}, + DNSNames: []string{}, + } + + if info.Logger != nil { + info.Logger.Warn("自动生成证书", zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter)) + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + if info.Logger != nil { + info.Logger.Warn("不能生成ECDSA密钥", zap.Error(err)) + } + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + if info.Logger != nil { + info.Logger.Warn("无法生成x509证书", zap.Error(err)) + } + return + } + + certOut, err := os.Create(certPath) + if err != nil { + info.Logger.Warn("无法创建证书文件", zap.String("path", certPath), zap.Error(err)) + return + } + // 证书文件 + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + if info.Logger != nil { + info.Logger.Info("创建的Cert文件", zap.String("path", certPath)) + } + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) + if err != nil { + if info.Logger != nil { + info.Logger.Warn("无法创建私钥文件", zap.String("path", keyPath), zap.Error(err)) + } + return + } + // 秘钥 + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + if info.Logger != nil { + info.Logger.Info("创建的私钥文件", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts, selfSignedCertValidity) +} + +// OK +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile和CertFile必须同时存在[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) // parseFunc 在主程序里是nil + if err != nil { + return nil, err + } + + // 如果提供了客户证书和密钥,则对其进行预验证.这可以确保我们在接受任何连接之前崩溃. + if (info.ClientKeyFile == "") != (info.ClientCertFile == "") { + return nil, fmt.Errorf("ClientKeyFile和ClientCertFile必须同时存在或同时不存在.: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile) + } + if info.ClientCertFile != "" { + _, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc) + if err != nil { + return nil, err + } + } + + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if len(info.CipherSuites) > 0 { + cfg.CipherSuites = info.CipherSuites + } + + // 客户端证书可以通过CN上的精确匹配来验证,也可以通过对CN和san进行更一般的检查来验证. + var verifyCertificate func(*x509.Certificate) bool + if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname 只能指定一个 (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if verifyCertificate(chains[0]) { + return nil + } + } + } + return errors.New("客户端证书认证失败") + } + } + // 是有同一个CA签发的 + // 服务端获取证书 + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "未能找到peer的证书文件", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "未能创建peer证书", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + // 客户端获取证书 + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + certfile, keyfile := info.CertFile, info.KeyFile + if info.ClientCertFile != "" { + certfile, keyfile = info.ClientCertFile, info.ClientKeyFile + } + cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "未能找到peer的证书文件", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "未能创建peer证书", + zap.String("cert-file", certfile), + zap.String("key-file", keyfile), + zap.Error(err), + ) + } + } + return cert, err + } + return cfg, nil +} + +// OK +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP etcd. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + cfg.ClientAuth = tls.NoClientCert + if info.TrustedCAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + cs := info.cafiles() + if len(cs) > 0 { + info.Logger.Info("Loading cert pool", zap.Strings("cs", cs), + zap.Any("tlsinfo", info)) + cp, err := tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP etcd + cfg.NextProtos = []string{"h2"} + + // go1.13 enables TLS 1.3 by default + // and in TLS 1.3, cipher suites are not configurable + // setting Max TLS version to TLS 1.2 for go 1.13 + cfg.MaxVersion = tls.VersionTLS12 + + return cfg, nil +} + +// ClientConfig 生成一个tls.Config对象,供HTTP客户端使用. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() // // 初始化TLS配置 + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify // 客户端是否验证服务端证书链和主机名 + + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if err != nil { + return nil, err + } + if hasNonEmptyCN { + return nil, fmt.Errorf("证书没有CN(%s): %s", cn, info.CertFile) + } + } + + cfg.MaxVersion = tls.VersionTLS12 + + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/client_sdk/pkg/transport/listener_opts.go b/client_sdk/pkg/transport/listener_opts.go new file mode 100644 index 00000000000..82d829c0bc9 --- /dev/null +++ b/client_sdk/pkg/transport/listener_opts.go @@ -0,0 +1,76 @@ +package transport + +import ( + "net" + "time" +) + +type ListenerOptions struct { + Listener net.Listener + ListenConfig net.ListenConfig + + socketOpts *SocketOpts // 套接字选项 + tlsInfo *TLSInfo // 证书信息 + skipTLSInfoCheck bool + writeTimeout time.Duration // 设置读写超时 + readTimeout time.Duration +} + +func newListenOpts(opts ...ListenerOption) *ListenerOptions { + lnOpts := &ListenerOptions{} + lnOpts.applyOpts(opts) + return lnOpts +} + +func (lo *ListenerOptions) applyOpts(opts []ListenerOption) { + for _, opt := range opts { + opt(lo) + } +} + +// IsTimeout returns true if the listener has a read/write timeout defined. +func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 } + +// IsSocketOpts returns true if the listener options includes socket options. +func (lo *ListenerOptions) IsSocketOpts() bool { + if lo.socketOpts == nil { + return false + } + return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress +} + +// IsTLS returns true if listner options includes TLSInfo. +func (lo *ListenerOptions) IsTLS() bool { + if lo.tlsInfo == nil { + return false + } + return !lo.tlsInfo.Empty() +} + +// ListenerOption 是可以应用于listener的选项. +type ListenerOption func(*ListenerOptions) + +// WithTimeout 允许对listener应用一个读或写的超时. +func WithTimeout(read, write time.Duration) ListenerOption { + return func(lo *ListenerOptions) { + lo.writeTimeout = write + lo.readTimeout = read + } +} + +// WithSocketOpts 定义了将应用于listener的套接字选项. +func WithSocketOpts(s *SocketOpts) ListenerOption { + return func(lo *ListenerOptions) { lo.socketOpts = s } +} + +// WithTLSInfo 向listener添加TLS证书. +func WithTLSInfo(t *TLSInfo) ListenerOption { + return func(lo *ListenerOptions) { lo.tlsInfo = t } +} + +// WithSkipTLSInfoCheck when true a transport can be created with an https scheme +// without passing TLSInfo, circumventing not presented error. Skipping this check +// also requires that TLSInfo is not passed. +func WithSkipTLSInfoCheck(skip bool) ListenerOption { + return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip } +} diff --git a/client/pkg/transport/listener_tls.go b/client_sdk/pkg/transport/listener_tls.go similarity index 94% rename from client/pkg/transport/listener_tls.go rename to client_sdk/pkg/transport/listener_tls.go index 1a283739318..10d296e401c 100644 --- a/client/pkg/transport/listener_tls.go +++ b/client_sdk/pkg/transport/listener_tls.go @@ -19,10 +19,12 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" - "os" "strings" "sync" + + cm "github.com/ls-2018/etcd_cn/code_debug/conn" ) // tlsListener overrides a TLS listener so it will reject client @@ -39,6 +41,8 @@ type tlsListener struct { type tlsCheckFunc func(context.Context, *tls.Conn) error +var crlBytesMap sync.Map + // NewTLSListener handshakes TLS connections and performs optional CRL checking. func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { check := func(context.Context, *tls.Conn) error { return nil } @@ -128,7 +132,7 @@ func (l *tlsListener) acceptLoop() { l.err = err return } - + cm.PrintConn("tlsListener", conn) pendingMu.Lock() pending[conn] = struct{}{} pendingMu.Unlock() @@ -167,11 +171,18 @@ func (l *tlsListener) acceptLoop() { } func checkCRL(crlPath string, cert []*x509.Certificate) error { - // TODO: cache - crlBytes, err := os.ReadFile(crlPath) - if err != nil { - return err + var crlBytes []byte + + if v, ok := crlBytesMap.Load(crlPath); ok { + crlBytes = v.([]byte) + } else { + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + crlBytesMap.Store(crlPath, crlBytes) } + certList, err := x509.ParseCRL(crlBytes) if err != nil { return err @@ -222,8 +233,7 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { // reverse lookup - var names []string - var wildcards []string + wildcards, names := []string{}, []string{} for _, dns := range dnsNames { if strings.HasPrefix(dns, "*.") { wildcards = append(wildcards, dns[1:]) diff --git a/client_sdk/pkg/transport/sockopt.go b/client_sdk/pkg/transport/sockopt.go new file mode 100644 index 00000000000..278deb7c7a9 --- /dev/null +++ b/client_sdk/pkg/transport/sockopt.go @@ -0,0 +1,41 @@ +package transport + +import ( + "syscall" +) + +type Controls []func(network, addr string, conn syscall.RawConn) error + +func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error { + for _, s := range ctls { + if err := s(network, addr, conn); err != nil { + return err + } + } + return nil +} + +type SocketOpts struct { + // [1] https://man7.org/linux/man-pages/man7/socket.7.html + // 启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口. + // 用户应该记住.在这种情况下.数据文件上的锁可能会导致意外情况的发生.用户应该注意防止锁竞争. + ReusePort bool + // ReuseAddress启用了一个套接字选项SO_REUSEADDR.允许绑定到`TIME_WAIT`状态下的地址.在etcd因过多的`TIME_WAIT'而缓慢重启的情况下.这对提高MTTR很有用. + // [1] https://man7.org/linux/man-pages/man7/socket.7.html + ReuseAddress bool +} + +func getControls(sopts *SocketOpts) Controls { + ctls := Controls{} + if sopts.ReuseAddress { + ctls = append(ctls, setReuseAddress) + } + if sopts.ReusePort { + ctls = append(ctls, setReusePort) + } + return ctls +} + +func (sopts *SocketOpts) Empty() bool { + return !sopts.ReuseAddress && !sopts.ReusePort +} diff --git a/client_sdk/pkg/transport/sockopt_unix.go b/client_sdk/pkg/transport/sockopt_unix.go new file mode 100644 index 00000000000..432b52e0fce --- /dev/null +++ b/client_sdk/pkg/transport/sockopt_unix.go @@ -0,0 +1,22 @@ +//go:build !windows +// +build !windows + +package transport + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func setReusePort(network, address string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) +} + +func setReuseAddress(network, address string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) +} diff --git a/client_sdk/pkg/transport/sockopt_windows.go b/client_sdk/pkg/transport/sockopt_windows.go new file mode 100644 index 00000000000..4e5af70b11e --- /dev/null +++ b/client_sdk/pkg/transport/sockopt_windows.go @@ -0,0 +1,19 @@ +//go:build windows +// +build windows + +package transport + +import ( + "fmt" + "syscall" +) + +func setReusePort(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Windows") +} + +// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as +// there is no protection against port hijacking. +func setReuseAddress(network, addr string, conn syscall.RawConn) error { + return fmt.Errorf("address reuse is not supported on Windows") +} diff --git a/client/pkg/transport/timeout_conn.go b/client_sdk/pkg/transport/timeout_conn.go similarity index 100% rename from client/pkg/transport/timeout_conn.go rename to client_sdk/pkg/transport/timeout_conn.go diff --git a/client/pkg/transport/timeout_dialer.go b/client_sdk/pkg/transport/timeout_dialer.go similarity index 100% rename from client/pkg/transport/timeout_dialer.go rename to client_sdk/pkg/transport/timeout_dialer.go diff --git a/client/pkg/transport/timeout_listener.go b/client_sdk/pkg/transport/timeout_listener.go similarity index 94% rename from client/pkg/transport/timeout_listener.go rename to client_sdk/pkg/transport/timeout_listener.go index 5d74bd70c23..1a142adf603 100644 --- a/client/pkg/transport/timeout_listener.go +++ b/client_sdk/pkg/transport/timeout_listener.go @@ -17,6 +17,8 @@ package transport import ( "net" "time" + + "github.com/ls-2018/etcd_cn/code_debug/conn" ) // NewTimeoutListener returns a listener that listens on the given address. @@ -37,6 +39,7 @@ func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { if err != nil { return nil, err } + conn.PrintConn("rwTimeoutListener", c) return timeoutConn{ Conn: c, writeTimeout: rwln.writeTimeout, diff --git a/client_sdk/pkg/transport/timeout_transport.go b/client_sdk/pkg/transport/timeout_transport.go new file mode 100644 index 00000000000..85e28b6ec58 --- /dev/null +++ b/client_sdk/pkg/transport/timeout_transport.go @@ -0,0 +1,49 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport 返回一个使用给定的TLS信息创建的传输. +// 如果创建的连接上的读/写块超过了它的时间限制. 它将返回超时错误. +// 如果读/写超时被设置,传输将不能重新使用连接. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // 超时的连接在闲置后很快就会超时,它不应该被放回http传输系统作为闲置连接供将来使用. + tr.MaxIdleConnsPerHost = -1 + } else { + // 允许peer之间有更多的空闲连接,以避免不必要的端口分配. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/client/pkg/transport/tls.go b/client_sdk/pkg/transport/tls.go similarity index 88% rename from client/pkg/transport/tls.go rename to client_sdk/pkg/transport/tls.go index d5375863fd5..62fe0d38519 100644 --- a/client/pkg/transport/tls.go +++ b/client_sdk/pkg/transport/tls.go @@ -15,8 +15,6 @@ package transport import ( - "context" - "errors" "fmt" "strings" "time" @@ -29,8 +27,6 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { if err != nil { return nil, err } - defer t.CloseIdleConnections() - var errs []string var endpoints []string for _, ep := range eps { @@ -38,7 +34,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { errs = append(errs, fmt.Sprintf("%q is insecure", ep)) continue } - conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):]) + conn, cerr := t.Dial("tcp", ep[len("https://"):]) if cerr != nil { errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) continue @@ -47,7 +43,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { endpoints = append(endpoints, ep) } if len(errs) != 0 { - err = errors.New(strings.Join(errs, ",")) + err = fmt.Errorf("%s", strings.Join(errs, ",")) } return endpoints, err } diff --git a/client_sdk/pkg/transport/transport.go b/client_sdk/pkg/transport/transport.go new file mode 100644 index 00000000000..30c72bd8abe --- /dev/null +++ b/client_sdk/pkg/transport/transport.go @@ -0,0 +1,76 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +// NewTransport 创建transport +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := &net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + } + + dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) { + return dialer.DialContext(ctx, "unix", addr) + } + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialContext, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + // Cost of reopening connection on sockets is low, and they are mostly used in testing. + // Long living unix-transport connections were leading to 'leak' test flakes. + // Alternativly the returned Transport (t) should override CloseIdleConnections to + // forward it to 'tu' as well. + IdleConnTimeout: time.Microsecond, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/client/pkg/transport/unix_listener.go b/client_sdk/pkg/transport/unix_listener.go similarity index 100% rename from client/pkg/transport/unix_listener.go rename to client_sdk/pkg/transport/unix_listener.go diff --git a/client/pkg/types/doc.go b/client_sdk/pkg/types/doc.go similarity index 100% rename from client/pkg/types/doc.go rename to client_sdk/pkg/types/doc.go diff --git a/client_sdk/pkg/types/over_id.go b/client_sdk/pkg/types/over_id.go new file mode 100644 index 00000000000..8799a1dbb16 --- /dev/null +++ b/client_sdk/pkg/types/over_id.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "strconv" + +// ID 代表一个通用的标识符,通常存储为uint64,但在输入/输出时通常表示为16进制字符串. +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString 8e9e05c52164694d +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/client/pkg/types/set.go b/client_sdk/pkg/types/set.go similarity index 97% rename from client/pkg/types/set.go rename to client_sdk/pkg/types/set.go index 3e69c8d8b94..5b2f3796fb6 100644 --- a/client/pkg/types/set.go +++ b/client_sdk/pkg/types/set.go @@ -48,7 +48,7 @@ type unsafeSet struct { d map[string]struct{} } -// Add adds a new value to the set (no-op if the value is already present) +// Add 将一个新的值添加到集合中(如果该值已经存在,则不做任何操作). func (us *unsafeSet) Add(value string) { us.d[value] = struct{}{} } @@ -90,7 +90,7 @@ func (us *unsafeSet) Length() int { // Values returns the values of the Set in an unspecified order. func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0, len(us.d)) + values = make([]string, 0) for val := range us.d { values = append(values, val) } diff --git a/client/pkg/types/slice.go b/client_sdk/pkg/types/slice.go similarity index 100% rename from client/pkg/types/slice.go rename to client_sdk/pkg/types/slice.go diff --git a/client/pkg/types/urls.go b/client_sdk/pkg/types/urls.go similarity index 80% rename from client/pkg/types/urls.go rename to client_sdk/pkg/types/urls.go index 49a38967e64..021195857aa 100644 --- a/client/pkg/types/urls.go +++ b/client_sdk/pkg/types/urls.go @@ -25,6 +25,7 @@ import ( type URLs []url.URL +// NewURLs OK func NewURLs(strs []string) (URLs, error) { all := make([]url.URL, len(strs)) if len(all) == 0 { @@ -36,25 +37,20 @@ func NewURLs(strs []string) (URLs, error) { if err != nil { return nil, err } - - switch u.Scheme { - case "http", "https": - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - case "unix", "unixs": - break - default: - return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme必须是http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) } all[i] = *u } us := URLs(all) us.Sort() + return us, nil } diff --git a/client/pkg/types/urlsmap.go b/client_sdk/pkg/types/urlsmap.go similarity index 82% rename from client/pkg/types/urlsmap.go rename to client_sdk/pkg/types/urlsmap.go index 47690cc381a..074d4e77dc5 100644 --- a/client/pkg/types/urlsmap.go +++ b/client_sdk/pkg/types/urlsmap.go @@ -20,12 +20,12 @@ import ( "strings" ) -// URLsMap is a map from a name to its URLs. +// URLsMap 节点名字与通信地址的对应 type URLsMap map[string]URLs -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: +// NewURLsMap 返回URLsMap 【节点名字与通信地址的对应】 // mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +// 类型转换 func NewURLsMap(s string) (URLsMap, error) { m := parse(s) @@ -54,7 +54,7 @@ func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { return um, nil } -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +// String 返回mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 func (c URLsMap) String() string { var pairs []string for name, urls := range c { @@ -66,8 +66,7 @@ func (c URLsMap) String() string { return strings.Join(pairs, ",") } -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. +// URLs 返回所有的URLS func (c URLsMap) URLs() []string { var urls []string for _, us := range c { @@ -79,12 +78,11 @@ func (c URLsMap) URLs() []string { return urls } -// Len returns the size of URLsMap. func (c URLsMap) Len() int { return len(c) } -// parse parses the given string and returns a map listing the values specified for each key. +// parse 解析给定的字符串,并返回一个列出每个键的指定值的map. func parse(s string) map[string][]string { m := make(map[string][]string) for s != "" { diff --git a/client_sdk/v2/README.md b/client_sdk/v2/README.md new file mode 100644 index 00000000000..284b7124902 --- /dev/null +++ b/client_sdk/v2/README.md @@ -0,0 +1,127 @@ +# etcd/client + +etcd/client is the Go client library for etcd. + +[![GoDoc](https://godoc.org/github.com/ls-2018/etcd_cn/client?status.png)](https://godoc.org/github.com/ls-2018/etcd_cn/client) + +For full compatibility, it is recommended to install released versions of clients using go modules. + +## Install + +```bash +go get github.com/ls-2018/etcd_cn/client +``` + +## Usage + +```go +package main + +import ( + "log" + "time" + "context" + + "github.com/ls-2018/etcd_cn/client" +) + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + // set timeout per request to fail fast when the target endpoint is unavailable + HeaderTimeoutPerRequest: time.Second, + } + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + // set "/foo" key with "bar" value + log.Print("Setting '/foo' key with 'bar' value") + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Set is done. Metadata is %q\n", resp) + } + // get "/foo" key's value + log.Print("Getting '/foo' key value") + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Get is done. Metadata is %q\n", resp) + // print value + log.Printf("%q key has %q value\n", resp.NodeExtern.Key, resp.NodeExtern.Value) + } +} +``` + +## Error Handling + +etcd client might return three types of errors. + +- context error + +Each API call has its first parameter as `context`. A context can backend canceled or have an attached deadline. If the +context is canceled or reaches its deadline, the responding context error will backend returned no matter what internal +errors the API call has already encountered. + +- cluster error + +Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a +requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will backend added into a +list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will backend returned. + +- response error + +If the response gets from the cluster is invalid, a plain string error will backend returned. For example, it might backend a +invalid JSON error. + +Here is the example code to handle client errors: + +```go +cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} +c, err := client.New(cfg) +if err != nil { + log.Fatal(err) +} + +kapi := client.NewKeysAPI(c) +resp, err := kapi.Set(ctx, "test", "bar", nil) +if err != nil { + if err == context.Canceled { + // ctx is canceled by another routine + } else if err == context.DeadlineExceeded { + // ctx is attached with a deadline and it exceeded + } else if cerr, ok := err.(*client.ClusterError); ok { + // process (cerr.Errors) + } else { + // bad cluster endpoints, which are not etcd servers + } +} +``` + +## Caveat + +1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket + resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from + the data consumed by the client because data replicated to each etcd member has already passed through the consensus + process. + +2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning + properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first + attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all + available endpoints, it will return all errors happened. + +3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't + help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve + this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, + but the connection is kept alive, hasn't been brought to our attention. + +4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is + isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read + requests or monitor the /health endpoint for member health information. diff --git a/client_sdk/v2/auth_role.go b/client_sdk/v2/auth_role.go new file mode 100644 index 00000000000..ef3aba6ce48 --- /dev/null +++ b/client_sdk/v2/auth_role.go @@ -0,0 +1,66 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} diff --git a/client_sdk/v2/auth_user.go b/client_sdk/v2/auth_user.go new file mode 100644 index 00000000000..f5d773c4eae --- /dev/null +++ b/client_sdk/v2/auth_user.go @@ -0,0 +1,88 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "net/url" + "path" +) + +var defaultV2AuthPrefix = "/v2/auth" + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} diff --git a/client/v2/cancelreq.go b/client_sdk/v2/cancelreq.go similarity index 100% rename from client/v2/cancelreq.go rename to client_sdk/v2/cancelreq.go diff --git a/client_sdk/v2/client.go b/client_sdk/v2/client.go new file mode 100644 index 00000000000..4347bfb6b48 --- /dev/null +++ b/client_sdk/v2/client.go @@ -0,0 +1,717 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/offical/api/v3/version" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, etcd returns the response body immediately. + // For PUT/POST/DELETE request, etcd will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, etcd returns the header immediately to notify Client + // watch start. But if etcd is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to etcd and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd etcd and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns etcd error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +// ErrNoRequest indicates that the HTTPRequest object could not be found +// or was nil. No processing could continue. +var ErrNoRequest = errors.New("no HTTPRequest was available") + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + if req == nil { + return nil, nil, ErrNoRequest + } + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/client/v2/cluster_error.go b/client_sdk/v2/cluster_error.go similarity index 100% rename from client/v2/cluster_error.go rename to client_sdk/v2/cluster_error.go diff --git a/client_sdk/v2/curl.go b/client_sdk/v2/curl.go new file mode 100644 index 00000000000..82a92ad25a0 --- /dev/null +++ b/client_sdk/v2/curl.go @@ -0,0 +1,60 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var cURLDebug = false + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/client_sdk/v2/doc.go b/client_sdk/v2/doc.go new file mode 100644 index 00000000000..3c3101e57ad --- /dev/null +++ b/client_sdk/v2/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + "context" + + "github.com/ls-2018/etcd_cn/client_sdk/v2" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/client_sdk/v2/json.go b/client_sdk/v2/json.go new file mode 100644 index 00000000000..b3a65580bef --- /dev/null +++ b/client_sdk/v2/json.go @@ -0,0 +1,72 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "strconv" + "unsafe" + + "github.com/json-iterator/go" + "github.com/modern-go/reflect2" +) + +type customNumberExtension struct { + jsoniter.DummyExtension +} + +func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { + if typ.String() == "interface {}" { + return customNumberDecoder{} + } + return nil +} + +type customNumberDecoder struct{} + +func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number jsoniter.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return + } + iter.ReportError("DecodeNumber", err.Error()) + default: + *(*interface{})(ptr) = iter.Read() + } +} + +// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func caseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} diff --git a/client/v2/keys.go b/client_sdk/v2/keys.go similarity index 97% rename from client/v2/keys.go rename to client_sdk/v2/keys.go index fa8fdc6b261..a4d14072fb5 100644 --- a/client/v2/keys.go +++ b/client_sdk/v2/keys.go @@ -25,9 +25,7 @@ import ( "strings" "time" - kjson "sigs.k8s.io/json" - - "go.etcd.io/etcd/client/pkg/v3/pathutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/pathutil" ) const ( @@ -79,9 +77,7 @@ const ( PrevNoExist = PrevExistType("false") ) -var ( - defaultV2KeysPrefix = "/v2/keys" -) +var defaultV2KeysPrefix = "/v2/keys" // NewKeysAPI builds a KeysAPI that interacts with etcd's key-value // API over HTTP. @@ -166,7 +162,7 @@ type SetOptions struct { PrevValue string // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. + // Node必须是in order for the Set operation to succeed. // // If PrevIndex is set to 0 (default), no comparison is made. PrevIndex uint64 @@ -201,7 +197,7 @@ type GetOptions struct { // should be returned. Recursive bool - // Sort instructs the server whether or not to sort the Nodes. + // Sort instructs the etcd whether or not to sort the Nodes. // If true, the Nodes are sorted alphabetically by key in // ascending order (A to z). If false (default), the Nodes will // not be sorted and the ordering used should not be considered @@ -224,7 +220,7 @@ type DeleteOptions struct { PrevValue string // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. + // Node必须是in order for the Delete operation to succeed. // // If PrevIndex is set to 0 (default), no comparison is made. PrevIndex uint64 @@ -272,7 +268,7 @@ type Response struct { // This index is not tied to the Node(s) contained in this Response. Index uint64 `json:"-"` - // ClusterID holds the cluster-level ID reported by the server. This + // ClusterID holds the cluster-level ID reported by the etcd. This // should be different for different etcd clusters. ClusterID string `json:"-"` } @@ -299,7 +295,7 @@ type Node struct { // ModifiedIndex is the etcd index at-which this Node was last modified. ModifiedIndex uint64 `json:"modifiedIndex"` - // Expiration is the server side expiration time of the key. + // Expiration is the etcd side expiration time of the key. Expiration *time.Time `json:"expiration,omitempty"` // TTL is the time to live of the key in second. @@ -472,7 +468,7 @@ func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { // v2KeysURL forms a URL representing the location of a key. // The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the +// etcd. The prefix is the path needed to route from the // provided endpoint's path to the root of the keys API // (typically "/v2/keys"). func v2KeysURL(ep url.URL, prefix, key string) *url.URL { @@ -655,9 +651,11 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp return res, err } +var jsonIterator = caseSensitiveJsonIterator() + func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { var res Response - err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res) + err := jsonIterator.Unmarshal(body, &res) if err != nil { return nil, ErrInvalidJSON } diff --git a/client/v2/members.go b/client_sdk/v2/members.go similarity index 98% rename from client/v2/members.go rename to client_sdk/v2/members.go index 5e868ec6991..d911c466673 100644 --- a/client/v2/members.go +++ b/client_sdk/v2/members.go @@ -23,7 +23,7 @@ import ( "net/url" "path" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" ) var ( @@ -130,7 +130,7 @@ func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { return nil, err } - return mCollection, nil + return []Member(mCollection), nil } func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { diff --git a/client_sdk/v3/auth.go b/client_sdk/v3/auth.go new file mode 100644 index 00000000000..a544de4c08c --- /dev/null +++ b/client_sdk/v3/auth.go @@ -0,0 +1,205 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "github.com/ls-2018/etcd_cn/offical/api/v3/authpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthStatusResponse pb.AuthStatusResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type UserAddOptions authpb.UserAddOptions + +type Auth interface { + Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + AuthStatus(ctx context.Context) (*AuthStatusResponse, error) + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + UserList(ctx context.Context) (*AuthUserListResponse, error) + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type authClient struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &authClient{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth { + api := &authClient{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) { + resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...) + return (*AuthStatusResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +// RoleAdd ok +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +// RoleGrantPermission ok +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: key, + RangeEnd: rangeEnd, + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +// RoleGet ok +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +// RoleList ok +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: string(key), RangeEnd: string(rangeEnd)}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.PermissionTypeValue[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("无效的权限类型: %s", s) +} diff --git a/client_sdk/v3/client.go b/client_sdk/v3/client.go new file mode 100644 index 00000000000..463565c41ac --- /dev/null +++ b/client_sdk/v3/client.go @@ -0,0 +1,573 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials" + "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/endpoint" + "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/resolver" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpccredentials "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: 端点不可用") + ErrOldCluster = errors.New("etcdclient: 旧的集群版本") +) + +// Client 提供并管理一个etcd v3客户端会话. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + conn *grpc.ClientConn + cfg Config // 配置信息 + creds grpccredentials.TransportCredentials // 证书信息 + resolver *resolver.EtcdManualResolver + mu *sync.RWMutex + ctx context.Context // 上下文 + cancel context.CancelFunc // 上下文 cancel func + Username string + Password string + authTokenBundle credentials.Bundle + callOpts []grpc.CallOption + lgMu *sync.RWMutex + lg *zap.Logger +} + +// New 创建一个client用于与etcd server 通信 +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context, opts ...Option) *Client { + cctx, cancel := context.WithCancel(ctx) + c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)} + for _, opt := range opts { + opt(c) + } + if c.lg == nil { + c.lg = zap.NewNop() + } + return c +} + +// Option is a function type that can be passed as argument to NewCtxClient to configure client +type Option func(*Client) + +// WithZapLogger is a NewCtxClient option that overrides the logger +func WithZapLogger(lg *zap.Logger) Option { + return func(c *Client) { + c.lg = lg + } +} + +// WithLogger overrides the logger. +// +// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config +// +// Does not changes grpcLogger, that can be explicitly configured +// using grpc_zap.ReplaceGrpcLoggerV2(..) method. +func (c *Client) WithLogger(lg *zap.Logger) *Client { + c.lgMu.Lock() + c.lg = lg + c.lgMu.Unlock() + return c +} + +// GetLogger gets the logger. +// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger. +func (c *Client) GetLogger() *zap.Logger { + c.lgMu.RLock() + l := c.lg + c.lgMu.RUnlock() + return l +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +func (c *Client) Ctx() context.Context { return c.ctx } + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { + creds := c.credentialsForEndpoint(ep) + + // Using ad-hoc created resolver, to guarantee only explicitly given + // endpoint is used. + return c.dial(creds, grpc.WithResolvers(resolver.New(ep))) +} + +// roundRobinQuorumBackoff retries against quorum between each backoff. +// This is intended for use with a round robin load balancer. +func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + // after each round robin across quorum, backoff for our wait between duration + n := uint(len(c.Endpoints())) + quorum := (n/2 + 1) + if attempt%quorum == 0 { + c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) + return jitterUp(waitBetween, jitterFraction) + } + c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) + return 0 + } +} + +// --------------------------------------------- OVER ------------------------------------------------------------ + +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg.Endpoints = eps + c.resolver.SetEndpoints(eps) +} + +// Sync 将客户端的端点与来自etcd成员的已知端点进行同步. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + c.lg.Info("Auto sync endpoints failed.", zap.Error(err)) + } + } + } +} + +// dialSetupOpts 链接参数 +func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + PermitWithoutStream: c.cfg.PermitWithoutStream, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Interceptor retry and backoff. + // TODO: Replace all of clientv3/retry.go with RetryPolicy: + // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130 + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + opts = append(opts, + // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. + // Streams that are safe to retry are enabled individually. + grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)), + ) + + return opts, nil +} + +// 检查服务端版本 +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + + eps := c.Endpoints() + errc := make(chan error, len(eps)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + cancel() + ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + + wg.Add(len(eps)) + for _, ep := range eps { + // 如果集群是当前的任何端点都会给出一个最新的版本 + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") // [3 5 2] + maj, min := 0, 0 + if len(vs) >= 2 { + var serr error + if maj, serr = strconv.Atoi(vs[0]); serr != nil { + errc <- serr + return + } + if min, serr = strconv.Atoi(vs[1]); serr != nil { + errc <- serr + return + } + } + // 3.2版本以下 + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + for range eps { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr 如果给定的错误和上下文表明无法取得进展甚至在重新连接后返回true. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +// isUnavailableErr 返回错误是不是 不可用 类型 +func isUnavailableErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return false + } + if err == nil { + return false + } + ev, ok := status.FromError(err) + if ok { + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + return ev.Code() == codes.Unavailable + } + return false +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + if ev, ok := status.FromError(err); ok { + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + } + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} + +// IsConnCanceled returns true, if error is from a closed gRPC connection. +// ref. https://github.com/grpc/grpc-go/pull/1854 +func IsConnCanceled(err error) bool { + if err == nil { + return false + } + + // >= gRPC v1.23.x + s, ok := status.FromError(err) + if ok { + // connection is canceled or etcd has already closed the connection + return s.Code() == codes.Canceled || s.Message() == "transport is closing" + } + + // >= gRPC v1.10.x + if err == context.Canceled { + return true + } + + // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' + return strings.Contains(err.Error(), "grpc: the client connection is closing") +} + +func (c *Client) Endpoints() []string { + c.mu.RLock() + defer c.mu.RUnlock() + eps := make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return eps +} + +// OK +func (c *Client) getToken(ctx context.Context) error { + var err error + + if c.Username == "" || c.Password == "" { + return nil + } + + resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) + if err != nil { + if err == rpctypes.ErrAuthNotEnabled { + return nil + } + return err + } + c.authTokenBundle.UpdateAuthToken(resp.Token) + return nil +} + +// OK +func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + creds := c.credentialsForEndpoint(c.Endpoints()[0]) // 根据第一个判断是不需要证书 + opts := append(dopts, grpc.WithResolvers(c.resolver)) + return c.dial(creds, opts...) +} + +// OK +func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := c.dialSetupOpts(creds, dopts...) + if err != nil { + return nil, fmt.Errorf("配置dialer失败: %v", err) + } + if c.Username != "" && c.Password != "" { + c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) + } + + opts = append(opts, c.cfg.DialOptions...) + + dctx := c.ctx + if c.cfg.DialTimeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + defer cancel() + } + target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0])) + conn, err := grpc.DialContext(dctx, target, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// 返回地址 +func authority(endpoint string) string { + spl := strings.SplitN(endpoint, "://", 2) + if len(spl) < 2 { + if strings.HasPrefix(endpoint, "unix:") { + return endpoint[len("unix:"):] + } + if strings.HasPrefix(endpoint, "unixs:") { + return endpoint[len("unixs:"):] + } + return endpoint + } + return spl[1] +} + +// OK +func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials { + r := endpoint.RequiresCredentials(ep) // 127.0.0.1:2379 + switch r { + case endpoint.CREDS_DROP: + return nil + case endpoint.CREDS_OPTIONAL: + return c.creds + case endpoint.CREDS_REQUIRE: + if c.creds != nil { + return c.creds + } + return credentials.NewBundle(credentials.Config{}).TransportCredentials() + default: + panic(fmt.Errorf("unsupported CredsRequirement: %v", r)) + } +} + +// 创建一个client用于与etcd server 通信 +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds grpccredentials.TransportCredentials + if cfg.TLS != nil { + creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials() + } + + // 使用一个临时的客户端来启动第一个连接 + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.RWMutex), + callOpts: defaultCallOpts, + lgMu: new(sync.RWMutex), + } + + var err error + if cfg.Logger != nil { + client.lg = cfg.Logger + } else if cfg.LogConfig != nil { + client.lg, err = cfg.LogConfig.Build() + } else { + client.lg, err = CreateDefaultZapLogger() + } + if err != nil { + return nil, err + } + + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC消息接收大小 (%d bytes)必须是大于发送的 (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultWaitForReady, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + client.resolver = resolver.New(cfg.Endpoints...) + + if len(cfg.Endpoints) < 1 { + client.cancel() + return nil, fmt.Errorf("至少需要一个端点") + } + + conn, err := client.dialWithBalancer() + if err != nil { + client.cancel() + client.resolver.Close() + return nil, err + } + client.conn = conn + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + // 获得已建立连接的令牌 + ctx, cancel = client.ctx, func() {} + if client.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout) + } + err = client.getToken(ctx) + if err != nil { + client.Close() + cancel() + return nil, err + } + cancel() + if cfg.RejectOldCluster { // false + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} diff --git a/client_sdk/v3/cluster.go b/client_sdk/v3/cluster.go new file mode 100644 index 00000000000..4956a1b5216 --- /dev/null +++ b/client_sdk/v3/cluster.go @@ -0,0 +1,130 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse +) + +type Cluster interface { + MemberList(ctx context.Context) (*MemberListResponse, error) + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/client/v3/compact_op.go b/client_sdk/v3/compact_op.go similarity index 93% rename from client/v3/compact_op.go rename to client_sdk/v3/compact_op.go index a6e660aa825..4bc3e323c09 100644 --- a/client/v3/compact_op.go +++ b/client_sdk/v3/compact_op.go @@ -15,7 +15,7 @@ package clientv3 import ( - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" ) // CompactOp represents a compact operation. @@ -45,7 +45,7 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { } // WithCompactPhysical makes Compact wait until all compacted entries are -// removed from the etcd server's storage. +// removed from the etcd etcd's storage. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/client_sdk/v3/compare.go b/client_sdk/v3/compare.go new file mode 100644 index 00000000000..dc928e5213e --- /dev/null +++ b/client_sdk/v3/compare.go @@ -0,0 +1,144 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type ( + CompareTarget int + CompareResult int +) + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.Compare_Value = &pb.Compare_Value{Value: val} + case pb.Compare_VERSION: + cmp.Compare_Version = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.Compare_CreateRevision = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.Compare_ModRevision = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.Compare_Lease = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: key, Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: key, Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: key, Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: key, Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: key, Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return []byte(cmp.Key) } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = string(key) } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if cmp.Compare_Value != nil { + return []byte(cmp.Compare_Value.Value) + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { + cmp.Compare_Value.Value = string(v) +} + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = end + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/client_sdk/v3/concurrency/distribted_mutex.go b/client_sdk/v3/concurrency/distribted_mutex.go new file mode 100644 index 00000000000..1e325988e87 --- /dev/null +++ b/client_sdk/v3/concurrency/distribted_mutex.go @@ -0,0 +1,236 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + "sync" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +// ErrLocked is returned by TryLock when Mutex is already locked by another session. +var ErrLocked = errors.New("mutex: Locked by another session") +var ErrSessionExpired = errors.New("mutex: session is expired") + +// Mutex implements the sync Locker interface with etcd +// 即前缀机制,也称目录机制,例如,一个名为 `/mylock` 的锁,两个争抢它的客户端进行写操作, +// 实际写入的Key分别为:`key1="/mylock/UUID1"`,`key2="/mylock/UUID2"`, +// 其中,UUID表示全局唯一的ID,确保两个Key的唯一性.很显然,写操作都会成功,但返回的Revision不一样, +// 那么,如何判断谁获得了锁呢?通过前缀`“/mylock"`查询,返回包含两个Key-Value对的Key-Value列表, +// 同时也包含它们的Revision,通过Revision大小,客户端可以判断自己是否获得锁,如果抢锁失败,则等待锁释放(对应的 Key 被删除或者租约过期), +// 然后再判断自己是否可以获得锁. +type Mutex struct { + s *Session + + pfx string // 前缀 + myKey string // key + myRev int64 // 当前的修订版本 + hdr *pb.ResponseHeader +} + +// NewMutex 通过session和锁前缀 +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// TryLock locks the mutex if not already locked by another session. +// If lock is held by another session, return immediately after attempting necessary cleanup +// The ctx argument is used for the sending/receiving Txn RPC. +func (m *Mutex) TryLock(ctx context.Context) error { + resp, err := m.tryAcquire(ctx) + if err != nil { + return err + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + client := m.s.Client() + // Cannot lock, so delete the key + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return ErrLocked +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +// Lock 使用可取消的context锁定互斥锁.如果context被取消 +// 在尝试获取锁时,互斥锁会尝试清除其过时的锁条目. +func (m *Mutex) Lock(ctx context.Context) error { + resp, err := m.tryAcquire(ctx) + if err != nil { + return err + } + // if no key on prefix / the minimum rev is key, already hold the lock + // 通过对比自身的revision和最先创建的key的revision得出谁获得了锁 + // 例如 自身revision:5,最先创建的key createRevision:3 那么不获得锁,进入waitDeletes + // 自身revision:5,最先创建的key createRevision:5 那么获得锁 + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + client := m.s.Client() + // 等待其他程序释放锁,并删除其他revisions + // 通过 Watch 机制各自监听 prefix 相同,revision 比自己小的 key,因为只有 revision 比自己小的 key 释放锁, + // 我才能有机会,获得锁,如下代码所示,其中 waitDelete 会使用我们上面的介绍的 Watch 去监听比自己小的 key,详细代码可参考concurrency mutex的实现. + _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) // 监听前缀,上删除的 修订版本之前的kv + // release lock key if wait failed + if werr != nil { + m.Unlock(client.Ctx()) + return werr + } + + // make sure the session is not expired, and the owner key still exists. + gresp, werr := client.Get(ctx, m.myKey) + if werr != nil { + m.Unlock(client.Ctx()) + return werr + } + + if len(gresp.Kvs) == 0 { // is the session key lost? + return ErrSessionExpired + } + m.hdr = gresp.Header + + return nil +} + +func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) { + s := m.s + client := m.s.Client() + // s.Lease()租约 + // 生成锁的key + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) // /my-lock/sfhskjdhfksfhalsklfhksdf + // 核心还是使用了我们上面介绍的事务和 Lease 特性,当 CreateRevision 为 0 时, + // 它会创建一个 prefix 为 /my-lock 的 key ,并获取到 /my-lock prefix下面最早创建的一个 key(revision 最小), + // 分布式锁最终是由写入此 key 的 client 获得,其他 client 则进入等待模式. + // + // + // 使用事务机制 + // 比较key的revision为0(0标示没有key) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // 则put key,并设置租约 + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // 否则 获取这个key,重用租约中的锁(这里主要目的是在于重入) + // 通过第二次获取锁,判断锁是否存在来支持重入 + // 所以只要租约一致,那么是可以重入的. + get := v3.OpGet(m.myKey) + // 通过前缀获取最先创建的key + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + // 这里是比较的逻辑,如果等于0,写入当前的key,否则则读取这个key + // 大佬的代码写的就是奇妙 + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return nil, err + } + //{ + // "header":{ + // "cluster_id":14841639068965178418, + // "member_id":10276657743932975437, + // "Revision":6, + // "raft_term":2 + // }, + // "succeeded":true, + // "responses":[ + // { + // "ResponseOp_ResponsePut":{ + // "response_put":{ + // "header":{ + // "Revision":6 + // } + // } + // } + // }, + // { + // "ResponseOp_ResponseRange":{ + // "response_range":{ + // "header":{ + // "Revision":6 + // }, + // "kvs":[ + // { + // "key":"/my-lock//694d805a644b7a0d", + // "create_revision":6, + // "mod_revision":6, + // "version":1, + // "lease":7587862072907233805 + // } + // ], + // "count":1 + // } + // } + // } + // ] + //} + //marshal, _ := json.Marshal(resp) + //fmt.Println(string(marshal)) + // 获取到自身的revision(注意,此处CreateRevision和Revision不一定相等) + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + return resp, nil +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} + +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/client/v3/concurrency/doc.go b/client_sdk/v3/concurrency/doc.go similarity index 100% rename from client/v3/concurrency/doc.go rename to client_sdk/v3/concurrency/doc.go diff --git a/client_sdk/v3/concurrency/election.go b/client_sdk/v3/concurrency/election.go new file mode 100644 index 00000000000..df7f21af7b5 --- /dev/null +++ b/client_sdk/v3/concurrency/election.go @@ -0,0 +1,239 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + keyPrefix string + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection 返回给定关键字前缀上的新选举结果. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + keyPrefix: pfx, + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign 在前缀键上放置一个符合选举条件的值. +// 对于同一个前缀,多个会议可以参与选举,但一次只能有一个领导人. +// 如果context是'context. todo ()/context. background ()', Campaign将继续被阻塞,以便其他key被删除,除非etcd返回一个不可恢复的错误(例如ErrCompacted). +// 否则,直到上下文没有被取消或超时,Campaign将继续被阻塞,直到它成为leader. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if kv.Value != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // 在上下文取消的情况下清理 + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim 让leader宣布一个新的值,而不需要一次选举. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe 返回一个通道,该通道可靠地观察有序的leader proposal 作为响应 +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +// 观察 节点变更 +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // 等待在这个前缀更新第一个值wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + // 只接受put;删除操作将使observe()重试 + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, kv.Key, v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +func (e *Election) Key() string { return e.leaderKey } + +func (e *Election) Rev() int64 { return e.leaderRev } + +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/client_sdk/v3/concurrency/key.go b/client_sdk/v3/concurrency/key.go new file mode 100644 index 00000000000..7e93827af5d --- /dev/null +++ b/client_sdk/v3/concurrency/key.go @@ -0,0 +1,70 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +// 从revision开始监听删除事件,因为revision存在,所以也避免了ABA问题 +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + // 遇到删除事件才返回 + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// 等待持有锁的key删除 +// 内部实现为等其他所有比当前createRevision小的key,监听删除事件 +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + // WithLastCreate 按照CreateRevision排序,降序 例如 5 4 3 2 1 + // WithMaxCreateRev 获取比maxCreateRev小的key + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := resp.Kvs[0].Key + // 等待该目录前缀下的所有k都被删掉 + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/client/v3/concurrency/session.go b/client_sdk/v3/concurrency/session.go similarity index 79% rename from client/v3/concurrency/session.go rename to client_sdk/v3/concurrency/session.go index 8838b77e2d7..19d91aa5460 100644 --- a/client/v3/concurrency/session.go +++ b/client_sdk/v3/concurrency/session.go @@ -18,15 +18,15 @@ import ( "context" "time" - "go.uber.org/zap" - - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" ) const defaultSessionTTL = 60 // Session represents a lease kept alive for the lifetime of a client. // Fault-tolerant applications may use sessions to reason about liveness. +// 会话表示在客户端的生存期内保持活动的租约. +// 应用程序可能会使用会话来解释活动性. type Session struct { client *v3.Client opts *sessionOptions @@ -37,11 +37,11 @@ type Session struct { } // NewSession gets the leased session for a client. +// 抽象出了一个session对象来持续保持租约不过期 func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { - lg := client.GetLogger() ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} for _, opt := range opts { - opt(ops, lg) + opt(ops) } id := ops.leaseID @@ -54,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { } ctx, cancel := context.WithCancel(ops.ctx) + // 保证锁,在线程的活动期间,实现锁的的续租 keepAlive, err := client.KeepAlive(ctx, id) if err != nil || keepAlive == nil { cancel() @@ -63,15 +64,19 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { donec := make(chan struct{}) s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} - // keep the lease alive until client error or cancelled context + // 在客户端错误或取消上下文之前保持租约的活动状态 go func() { defer close(donec) for range keepAlive { - // eat messages until keep alive channel closes + // 在保持活动频道关闭前接收信息 } }() return s, nil + // 1、多个请求来前抢占锁,通过Revision来判断锁的先后顺序; + // 2、如果有比当前key的Revision小的Revision存在,说明有key已经获得了锁; + // 3、等待直到前面的key被删除,然后自己就获得了锁. + // 通过etcd实现的锁,直接包含了锁的续租,如果使用Redis还要自己去实现,相比较使用更简单. } // Client is the etcd client that is attached to the session. @@ -111,16 +116,14 @@ type sessionOptions struct { } // SessionOption configures Session. -type SessionOption func(*sessionOptions, *zap.Logger) +type SessionOption func(*sessionOptions) // WithTTL configures the session's TTL in seconds. // If TTL is <= 0, the default 60 seconds TTL will be used. func WithTTL(ttl int) SessionOption { - return func(so *sessionOptions, lg *zap.Logger) { + return func(so *sessionOptions) { if ttl > 0 { so.ttl = ttl - } else { - lg.Warn("WithTTL(): TTL should be > 0, preserving current TTL", zap.Int64("current-session-ttl", int64(so.ttl))) } } } @@ -129,7 +132,7 @@ func WithTTL(ttl int) SessionOption { // This is useful in process restart scenario, for example, to reclaim // leadership from an election prior to restart. func WithLease(leaseID v3.LeaseID) SessionOption { - return func(so *sessionOptions, _ *zap.Logger) { + return func(so *sessionOptions) { so.leaseID = leaseID } } @@ -140,7 +143,7 @@ func WithLease(leaseID v3.LeaseID) SessionOption { // context is canceled before Close() completes, the session's lease will be // abandoned and left to expire instead of being revoked. func WithContext(ctx context.Context) SessionOption { - return func(so *sessionOptions, _ *zap.Logger) { + return func(so *sessionOptions) { so.ctx = ctx } } diff --git a/client/v3/concurrency/stm.go b/client_sdk/v3/concurrency/stm.go similarity index 98% rename from client/v3/concurrency/stm.go rename to client_sdk/v3/concurrency/stm.go index ba7303d0977..319856a4e6c 100644 --- a/client/v3/concurrency/stm.go +++ b/client_sdk/v3/concurrency/stm.go @@ -18,7 +18,7 @@ import ( "context" "math" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" ) // STM is an interface for software transactional memory. @@ -33,7 +33,7 @@ type STM interface { // Del deletes a key. Del(key string) - // commit attempts to apply the txn's changes to the server. + // commit attempts to apply the txn's changes to the etcd. commit() *v3.TxnResponse reset() } diff --git a/client_sdk/v3/config.go b/client_sdk/v3/config.go new file mode 100644 index 00000000000..e801d48e8b5 --- /dev/null +++ b/client_sdk/v3/config.go @@ -0,0 +1,54 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + Endpoints []string `json:"endpoints"` // etcd client --> etcd 的地址 + AutoSyncInterval time.Duration `json:"auto-sync-interval"` // 是用其最新成员更新端点的时间间隔.0禁止自动同步.默认情况下自动同步被禁用. + DialTimeout time.Duration `json:"dial-timeout"` // 建立链接的超时时间 + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` // client 向服务端发送发包确保链接存活 + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` // 长时间没有接收到响应关闭链接 + MaxCallSendMsgSize int // 默认2MB + MaxCallRecvMsgSize int + TLS *tls.Config // 客户端sdk证书 + Username string `json:"username"` + Password string `json:"password"` + RejectOldCluster bool `json:"reject-old-cluster"` // 是否拒绝老版本服务器 + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + // For example, pass "grpc.WithBlock()" to block until the underlying connection is up. + // Without this, Dial returns immediately and connecting the etcd happens in background. + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + Logger *zap.Logger + LogConfig *zap.Config + + // PermitWithoutStream when set will allow client to send keepalive pings to etcd without any active streams(RPCs). + PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker +} diff --git a/client/v3/credentials/credentials.go b/client_sdk/v3/credentials/credentials.go similarity index 94% rename from client/v3/credentials/credentials.go rename to client_sdk/v3/credentials/credentials.go index 024c16b6048..a724a08c83e 100644 --- a/client/v3/credentials/credentials.go +++ b/client_sdk/v3/credentials/credentials.go @@ -22,23 +22,21 @@ import ( "net" "sync" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" grpccredentials "google.golang.org/grpc/credentials" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) -// Config defines gRPC credential configuration. type Config struct { TLSConfig *tls.Config } -// Bundle defines gRPC credential interface. +// Bundle grpc认证接口 type Bundle interface { grpccredentials.Bundle UpdateAuthToken(token string) } -// NewBundle constructs a new gRPC credential bundle. +// NewBundle 构造一个新的gRPC凭据包. func NewBundle(cfg Config) Bundle { return &bundle{ tc: newTransportCredential(cfg.TLSConfig), diff --git a/client/v3/ctx.go b/client_sdk/v3/ctx.go similarity index 94% rename from client/v3/ctx.go rename to client_sdk/v3/ctx.go index 38cee6c27e4..6ea8e05fbfb 100644 --- a/client/v3/ctx.go +++ b/client_sdk/v3/ctx.go @@ -17,10 +17,9 @@ package clientv3 import ( "context" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" "google.golang.org/grpc/metadata" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" ) // WithRequireLeader requires client requests to only succeed diff --git a/client/v3/experimental/recipes/barrier.go b/client_sdk/v3/experimental/recipes/barrier.go similarity index 94% rename from client/v3/experimental/recipes/barrier.go rename to client_sdk/v3/experimental/recipes/barrier.go index 7e950a3e385..4fdfc8bc439 100644 --- a/client/v3/experimental/recipes/barrier.go +++ b/client_sdk/v3/experimental/recipes/barrier.go @@ -17,8 +17,8 @@ package recipe import ( "context" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" ) // Barrier creates a key in etcd to block processes, then deletes the key to diff --git a/client_sdk/v3/experimental/recipes/client.go b/client_sdk/v3/experimental/recipes/client.go new file mode 100644 index 00000000000..976a493f3f8 --- /dev/null +++ b/client_sdk/v3/experimental/recipes/client.go @@ -0,0 +1,55 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recipe + +import ( + "context" + "errors" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + spb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +var ( + ErrKeyExists = errors.New("key already exists") + ErrWaitMismatch = errors.New("unexpected wait result") + ErrTooManyClients = errors.New("too many clients") + ErrNoWatcher = errors.New("no watcher channel") +) + +// deleteRevKey deletes a key by revision, returning false if key is missing +func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) { + cmp := v3.Compare(v3.ModRevision(key), "=", rev) + req := v3.OpDelete(key) + txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() + if err != nil { + return false, err + } else if !txnresp.Succeeded { + return false, nil + } + return true, nil +} + +func claimFirstKey(kv v3.KV, kvs []*spb.KeyValue) (*spb.KeyValue, error) { + for _, k := range kvs { + ok, err := deleteRevKey(kv, string(k.Key), k.ModRevision) + if err != nil { + return nil, err + } else if ok { + return k, nil + } + } + return nil, nil +} diff --git a/client/v3/experimental/recipes/doc.go b/client_sdk/v3/experimental/recipes/doc.go similarity index 100% rename from client/v3/experimental/recipes/doc.go rename to client_sdk/v3/experimental/recipes/doc.go diff --git a/client_sdk/v3/experimental/recipes/double_barrier.go b/client_sdk/v3/experimental/recipes/double_barrier.go new file mode 100644 index 00000000000..f087eec3da1 --- /dev/null +++ b/client_sdk/v3/experimental/recipes/double_barrier.go @@ -0,0 +1,139 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recipe + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// DoubleBarrier blocks processes on Enter until an expected count enters, then +// blocks again on Leave until all processes have left. +type DoubleBarrier struct { + s *concurrency.Session + ctx context.Context + + key string // key for the collective barrier + count int + myKey *EphemeralKV // current key for this process on the barrier +} + +func NewDoubleBarrier(s *concurrency.Session, key string, count int) *DoubleBarrier { + return &DoubleBarrier{ + s: s, + ctx: context.TODO(), + key: key, + count: count, + } +} + +// Enter waits for "count" processes to enter the barrier then returns +func (b *DoubleBarrier) Enter() error { + client := b.s.Client() + ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters") + if err != nil { + return err + } + b.myKey = ek + + resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix()) + if err != nil { + return err + } + + if len(resp.Kvs) > b.count { + return ErrTooManyClients + } + + if len(resp.Kvs) == b.count { + // unblock waiters + _, err = client.Put(b.ctx, b.key+"/ready", "") + return err + } + + _, err = WaitEvents( + client, + b.key+"/ready", + ek.Revision(), + []mvccpb.Event_EventType{mvccpb.PUT}) + return err +} + +// Leave waits for "count" processes to leave the barrier then returns +func (b *DoubleBarrier) Leave() error { + client := b.s.Client() + resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix()) + if err != nil { + return err + } + if len(resp.Kvs) == 0 { + return nil + } + + lowest, highest := resp.Kvs[0], resp.Kvs[0] + for _, k := range resp.Kvs { + if k.ModRevision < lowest.ModRevision { + lowest = k + } + if k.ModRevision > highest.ModRevision { + highest = k + } + } + isLowest := string(lowest.Key) == b.myKey.Key() + + if len(resp.Kvs) == 1 { + // this is the only node in the barrier; finish up + if _, err = client.Delete(b.ctx, b.key+"/ready"); err != nil { + return err + } + return b.myKey.Delete() + } + + // this ensures that if a process fails, the ephemeral lease will be + // revoked, its barrier key is removed, and the barrier can resume + + // lowest process in node => wait on highest process + if isLowest { + _, err = WaitEvents( + client, + string(highest.Key), + highest.ModRevision, + []mvccpb.Event_EventType{mvccpb.DELETE}) + if err != nil { + return err + } + return b.Leave() + } + + // delete self and wait on lowest process + if err = b.myKey.Delete(); err != nil { + return err + } + + key := string(lowest.Key) + _, err = WaitEvents( + client, + key, + lowest.ModRevision, + []mvccpb.Event_EventType{mvccpb.DELETE}) + if err != nil { + return err + } + return b.Leave() +} diff --git a/client/v3/experimental/recipes/grpc_gateway/user_add.sh b/client_sdk/v3/experimental/recipes/grpc_gateway/user_add.sh similarity index 100% rename from client/v3/experimental/recipes/grpc_gateway/user_add.sh rename to client_sdk/v3/experimental/recipes/grpc_gateway/user_add.sh diff --git a/client_sdk/v3/experimental/recipes/key.go b/client_sdk/v3/experimental/recipes/key.go new file mode 100644 index 00000000000..6206b8c22fa --- /dev/null +++ b/client_sdk/v3/experimental/recipes/key.go @@ -0,0 +1,166 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recipe + +import ( + "context" + "fmt" + "strings" + "time" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" +) + +// RemoteKV is a key/revision pair created by the client and stored on etcd +type RemoteKV struct { + kv v3.KV + key string + rev int64 + val string +} + +func newKey(kv v3.KV, key string, leaseID v3.LeaseID) (*RemoteKV, error) { + return newKV(kv, key, "", leaseID) +} + +func newKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (*RemoteKV, error) { + rev, err := putNewKV(kv, key, val, leaseID) + if err != nil { + return nil, err + } + return &RemoteKV{kv, key, rev, val}, nil +} + +func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) { + for { + // 创建对应的key + + newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) + rev, err := putNewKV(kv, newKey, val, v3.NoLease) + if err == nil { + return &RemoteKV{kv, newKey, rev, val}, nil + } + // 如果之前已经创建了,就返回 + if err != ErrKeyExists { + return nil, err + } + } +} + +// putNewKV +// 只有在没有创建的时候才能创建成功 +func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) { + cmp := v3.Compare(v3.Version(key), "=", 0) + req := v3.OpPut(key, val, v3.WithLease(leaseID)) + txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() + if err != nil { + return 0, err + } + if !txnresp.Succeeded { + return 0, ErrKeyExists + } + return txnresp.Header.Revision, nil +} + +// newSequentialKV allocates a new sequential key /nnnnn with a given +// prefix and value. Note: a bookkeeping node __ is also allocated. +func newSequentialKV(kv v3.KV, prefix, val string) (*RemoteKV, error) { + resp, err := kv.Get(context.TODO(), prefix, v3.WithLastKey()...) + if err != nil { + return nil, err + } + + // add 1 to last key, if any + newSeqNum := 0 + if len(resp.Kvs) != 0 { + fields := strings.Split(string(resp.Kvs[0].Key), "/") + _, serr := fmt.Sscanf(fields[len(fields)-1], "%d", &newSeqNum) + if serr != nil { + return nil, serr + } + newSeqNum++ + } + newKey := fmt.Sprintf("%s/%016d", prefix, newSeqNum) + + // base prefix key必须是current (i.e., <=) with the etcd update; + // the base key is important to avoid the following: + // N1: LastKey() == 1, start txn. + // N2: new Key 2, new Key 3, Delete Key 2 + // N1: txn succeeds allocating key 2 when it shouldn't + baseKey := "__" + prefix + + // current revision might contain modification so +1 + cmp := v3.Compare(v3.ModRevision(baseKey), "<", resp.Header.Revision+1) + reqPrefix := v3.OpPut(baseKey, "") + reqnewKey := v3.OpPut(newKey, val) + + txn := kv.Txn(context.TODO()) + txnresp, err := txn.If(cmp).Then(reqPrefix, reqnewKey).Commit() + if err != nil { + return nil, err + } + if !txnresp.Succeeded { + return newSequentialKV(kv, prefix, val) + } + return &RemoteKV{kv, newKey, txnresp.Header.Revision, val}, nil +} + +func (rk *RemoteKV) Key() string { return rk.key } +func (rk *RemoteKV) Revision() int64 { return rk.rev } +func (rk *RemoteKV) Value() string { return rk.val } + +func (rk *RemoteKV) Delete() error { + if rk.kv == nil { + return nil + } + _, err := rk.kv.Delete(context.TODO(), rk.key) + rk.kv = nil + return err +} + +func (rk *RemoteKV) Put(val string) error { + _, err := rk.kv.Put(context.TODO(), rk.key, val) + return err +} + +// EphemeralKV is a new key associated with a session lease +type EphemeralKV struct{ RemoteKV } + +// newEphemeralKV creates a new key/value pair associated with a session lease +func newEphemeralKV(s *concurrency.Session, key, val string) (*EphemeralKV, error) { + k, err := newKV(s.Client(), key, val, s.Lease()) + if err != nil { + return nil, err + } + return &EphemeralKV{*k}, nil +} + +// newUniqueEphemeralKey creates a new unique valueless key associated with a session lease +func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) { + return newUniqueEphemeralKV(s, prefix, "") +} + +// newUniqueEphemeralKV creates a new unique key/value pair associated with a session lease +func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) { + for { + newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) + ek, err = newEphemeralKV(s, newKey, val) + if err == nil || err != ErrKeyExists { + break + } + } + return ek, err +} diff --git a/client/v3/experimental/recipes/priority_queue.go b/client_sdk/v3/experimental/recipes/priority_queue.go similarity index 95% rename from client/v3/experimental/recipes/priority_queue.go rename to client_sdk/v3/experimental/recipes/priority_queue.go index 1b26067466f..1837310c0be 100644 --- a/client/v3/experimental/recipes/priority_queue.go +++ b/client_sdk/v3/experimental/recipes/priority_queue.go @@ -18,8 +18,8 @@ import ( "context" "fmt" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" ) // PriorityQueue implements a multi-reader, multi-writer distributed queue. diff --git a/client_sdk/v3/experimental/recipes/queue.go b/client_sdk/v3/experimental/recipes/queue.go new file mode 100644 index 00000000000..0786471bcc3 --- /dev/null +++ b/client_sdk/v3/experimental/recipes/queue.go @@ -0,0 +1,77 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recipe + +import ( + "context" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// Queue implements a multi-reader, multi-writer distributed queue. +type Queue struct { + client *v3.Client + ctx context.Context + + keyPrefix string +} + +func NewQueue(client *v3.Client, keyPrefix string) *Queue { + return &Queue{client, context.TODO(), keyPrefix} +} + +func (q *Queue) Enqueue(val string) error { + _, err := newUniqueKV(q.client, q.keyPrefix, val) + return err +} + +// Dequeue 处理的是一个先进新出的队列 +// 如果队列为空,Dequeue将会阻塞直到里面有值塞入 +func (q *Queue) Dequeue() (string, error) { + // TODO: fewer round trips by fetching more than one key + resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...) + if err != nil { + return "", err + } + + kv, err := claimFirstKey(q.client, resp.Kvs) + if err != nil { + return "", err + } else if kv != nil { + return string(kv.Value), nil + } else if resp.More { + // missed some items, retry to read in more + return q.Dequeue() + } + + // nothing yet; wait on elements + ev, err := WaitPrefixEvents( + q.client, + q.keyPrefix, + resp.Header.Revision, + []mvccpb.Event_EventType{mvccpb.PUT}) + if err != nil { + return "", err + } + + ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision) + if err != nil { + return "", err + } else if !ok { + return q.Dequeue() + } + return string(ev.Kv.Value), err +} diff --git a/client/v3/experimental/recipes/rwmutex.go b/client_sdk/v3/experimental/recipes/rwmutex.go similarity index 93% rename from client/v3/experimental/recipes/rwmutex.go rename to client_sdk/v3/experimental/recipes/rwmutex.go index 9f520baf48b..c848373cca8 100644 --- a/client/v3/experimental/recipes/rwmutex.go +++ b/client_sdk/v3/experimental/recipes/rwmutex.go @@ -17,9 +17,9 @@ package recipe import ( "context" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" ) type RWMutex struct { diff --git a/client_sdk/v3/experimental/recipes/watch.go b/client_sdk/v3/experimental/recipes/watch.go new file mode 100644 index 00000000000..222d2a75498 --- /dev/null +++ b/client_sdk/v3/experimental/recipes/watch.go @@ -0,0 +1,58 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package recipe + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// WaitEvents waits on a key until it observes the given events and returns the final one. +func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wc := c.Watch(ctx, key, clientv3.WithRev(rev)) + if wc == nil { + return nil, ErrNoWatcher + } + return waitEvents(wc, evs), nil +} + +func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev)) + if wc == nil { + return nil, ErrNoWatcher + } + return waitEvents(wc, evs), nil +} + +func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event { + i := 0 + for wresp := range wc { + for _, ev := range wresp.Events { + if ev.Type == evs[i] { + i++ + if i == len(evs) { + return ev + } + } + } + } + return nil +} diff --git a/client_sdk/v3/internal/endpoint/over_endpoint.go b/client_sdk/v3/internal/endpoint/over_endpoint.go new file mode 100644 index 00000000000..34bd7beae13 --- /dev/null +++ b/client_sdk/v3/internal/endpoint/over_endpoint.go @@ -0,0 +1,136 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package endpoint + +import ( + "fmt" + "net" + "net/url" + "path" + "strings" +) + +type CredsRequirement int + +const ( + // CREDS_REQUIRE - Credentials/certificate required for thi type of connection. + CREDS_REQUIRE CredsRequirement = iota + // CREDS_DROP - Credentials/certificate not needed and should get ignored. + CREDS_DROP + // CREDS_OPTIONAL - Credentials/certificate might be used if supplied + CREDS_OPTIONAL +) + +func extractHostFromHostPort(ep string) string { + host, _, err := net.SplitHostPort(ep) + if err != nil { + return ep + } + return host +} + +func extractHostFromPath(pathStr string) string { + return extractHostFromHostPort(path.Base(pathStr)) +} + +// mustSplit2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func mustSplit2(s, sep string) (string, string) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep)) + } + return spl[0], spl[1] +} + +func schemeToCredsRequirement(schema string) CredsRequirement { + switch schema { + case "https", "unixs": + return CREDS_REQUIRE + case "http": + return CREDS_DROP + case "unix": + // Preserving previous behavior from: + // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212 + // that likely was a bug due to missing 'fallthrough'. + // At the same time it seems legit to let the users decide whether they + // want credential control or not (and 'unixs' schema is not a standard thing). + return CREDS_OPTIONAL + case "": + return CREDS_OPTIONAL + default: + return CREDS_OPTIONAL + } +} + +// This function translates endpoints names supported by etcd etcd into +// endpoints as supported by grpc with additional information +// (server_name for cert validation, requireCreds - whether certs are needed). +// The main differences: +// - etcd supports unixs & https names as opposed to unix & http to +// distinguish need to configure certificates. +// - etcd support http(s) names as opposed to tcp supported by grpc/dial method. +// - etcd supports unix(s)://local-file naming schema +// (as opposed to unix:local-file canonical name used by grpc for current dir files). +// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon) +// is considered serverName - to allow local testing of cert-protected communication. +// See more: +// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47 +// - https://golang.org/pkg/net/#Dial +// - https://github.com/grpc/grpc/blob/master/doc/naming.md +func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) { + if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") { + if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") { + // absolute path case + schema, absolutePath := mustSplit2(ep, "://") + return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema) + } + if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") { + // legacy etcd local path + schema, localPath := mustSplit2(ep, "://") + return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + } + schema, localPath := mustSplit2(ep, ":") + return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema) + } + + if strings.Contains(ep, "://") { + url, err := url.Parse(ep) + if err != nil { + return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL + } + if url.Scheme == "http" || url.Scheme == "https" { + return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme) + } + return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme) + } + // Handles plain addresses like 10.0.0.44:437. + return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL +} + +// RequiresCredentials 127.0.0.1:2379 +func RequiresCredentials(ep string) CredsRequirement { + _, _, requireCreds := translateEndpoint(ep) + return requireCreds +} + +// Interpret endpoint parses an endpoint of the form +// (http|https)://*|(unix|unixs)://) +// and returns low-level address (supported by 'net') to connect to, +// and a etcd name used for x509 certificate matching. +func Interpret(ep string) (address string, serverName string) { + addr, serverName, _ := translateEndpoint(ep) + return addr, serverName +} diff --git a/client_sdk/v3/internal/resolver/resolver.go b/client_sdk/v3/internal/resolver/resolver.go new file mode 100644 index 00000000000..63d68ad5dcf --- /dev/null +++ b/client_sdk/v3/internal/resolver/resolver.go @@ -0,0 +1,71 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resolver + +import ( + "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/endpoint" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +const ( + Schema = "etcd-endpoints" +) + +type EtcdManualResolver struct { + *manual.Resolver + endpoints []string + serviceConfig *serviceconfig.ParseResult +} + +func New(endpoints ...string) *EtcdManualResolver { + r := manual.NewBuilderWithScheme(Schema) // etcd-endpoints + return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil} +} + +func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`) + if r.serviceConfig.Err != nil { + return nil, r.serviceConfig.Err + } + res, err := r.Resolver.Build(target, cc, opts) + if err != nil { + return nil, err + } + // 将存储在r中的端点填充到ClientConn (cc)中. + r.updateState() + return res, nil +} + +func (r *EtcdManualResolver) SetEndpoints(endpoints []string) { + r.endpoints = endpoints + r.updateState() +} + +func (r EtcdManualResolver) updateState() { + if r.CC != nil { + addresses := make([]resolver.Address, len(r.endpoints)) + for i, ep := range r.endpoints { + addr, serverName := endpoint.Interpret(ep) + addresses[i] = resolver.Address{Addr: addr, ServerName: serverName} + } + state := resolver.State{ + Addresses: addresses, + ServiceConfig: r.serviceConfig, + } + r.UpdateState(state) + } +} diff --git a/client_sdk/v3/kv.go b/client_sdk/v3/kv.go new file mode 100644 index 00000000000..0563fcb2651 --- /dev/null +++ b/client_sdk/v3/kv.go @@ -0,0 +1,156 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + Do(ctx context.Context, op Op) (OpResponse, error) + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} + +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} + +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} + +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +// Get etcdctl get +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("未知的操作") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/client_sdk/v3/lease.go b/client_sdk/v3/lease.go new file mode 100644 index 00000000000..b4ff8bf4f8d --- /dev/null +++ b/client_sdk/v3/lease.go @@ -0,0 +1,570 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// LeaseResponseChSize is the size of buffer to store unsent lease responses. +// WARNING: DO NOT UPDATE. +// Only for testing purposes. +var LeaseResponseChSize = 16 + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + fmt.Println("lease--->:", *r) + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +// KeepAlive 尝试保持给定的租约永久alive +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(ctx, id, ka.donec) + l.firstKeepAliveOnce.Do(func() { + // 500毫秒一次,不断的发送保持活动请求 + go l.recvKeepAliveLoop() + // 删除等待太久没反馈的租约 + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + // 打开一个新的lease stream并开始发送保持活动请求. + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + // 根据LeaseKeepAliveResponse更新租约 + // 如果租约过期删除所有alive channels + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +// 打开一个新的lease stream并开始发送保持活动请求. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + // 建立服务端和客户端连接的lease stream + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } + } + // still advance in order to rate-limit keep-alive sends + ka.nextKeepAlive = nextKeepAlive + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +// 获取在租约TTL中没有收到响应的任何保持活动的通道 +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // 等待响应太久;租约可能已过期 + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +// 在给定流的生命周期内发送保持活动请求 +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(retryConnWait): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/client/v3/leasing/cache.go b/client_sdk/v3/leasing/cache.go similarity index 94% rename from client/v3/leasing/cache.go rename to client_sdk/v3/leasing/cache.go index 214ee2fc196..a4215c6b98a 100644 --- a/client/v3/leasing/cache.go +++ b/client_sdk/v3/leasing/cache.go @@ -20,9 +20,9 @@ import ( "sync" "time" - v3pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" ) const revokeBackoff = 2 * time.Second @@ -144,7 +144,7 @@ func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) { cacheResp := li.response if len(cacheResp.Kvs) == 0 { kv := &mvccpb.KeyValue{ - Key: key, + Key: string(key), CreateRevision: respHeader.Revision, } cacheResp.Kvs = append(cacheResp.Kvs, kv) @@ -154,7 +154,7 @@ func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) { if cacheResp.Kvs[0].ModRevision < respHeader.Revision { cacheResp.Header = respHeader cacheResp.Kvs[0].ModRevision = respHeader.Revision - cacheResp.Kvs[0].Value = val + cacheResp.Kvs[0].Value = string(val) } } @@ -228,11 +228,9 @@ func (lk *leaseKey) get(op v3.Op) *v3.GetResponse { ret.Kvs = nil } else { kv := *ret.Kvs[0] - kv.Key = make([]byte, len(kv.Key)) - copy(kv.Key, ret.Kvs[0].Key) + kv.Key = ret.Kvs[0].Key if !op.IsKeysOnly() { - kv.Value = make([]byte, len(kv.Value)) - copy(kv.Value, ret.Kvs[0].Value) + kv.Value = ret.Kvs[0].Value } ret.Kvs = []*mvccpb.KeyValue{&kv} } @@ -297,7 +295,7 @@ func (lc *leaseCache) evalOps(ops []v3.Op) ([]*v3pb.ResponseOp, bool) { return nil, false } resps[i] = &v3pb.ResponseOp{ - Response: &v3pb.ResponseOp_ResponseRange{ + ResponseOp_ResponseRange: &v3pb.ResponseOp_ResponseRange{ ResponseRange: (*v3pb.RangeResponse)(resp), }, } diff --git a/client_sdk/v3/leasing/doc.go b/client_sdk/v3/leasing/doc.go new file mode 100644 index 00000000000..5ffb8b70c21 --- /dev/null +++ b/client_sdk/v3/leasing/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package leasing serves linearizable reads from a local cache by acquiring +// exclusive write access to keys through a client-side leasing protocol. This +// leasing layer can either directly wrap the etcd client or it can be exposed +// through the etcd grpc proxy etcd, granting multiple clients write access. +// +// First, create a leasing KV from a clientv3.Client 'cli': +// +// lkv, err := leasing.NewKV(cli, "leasing-prefix") +// if err != nil { +// // handle error +// } +// +// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's +// key locally. On the etcd, the leasing key is stored to "leasing-prefix/abc": +// +// resp, err := lkv.Get(context.TODO(), "abc") +// +// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime: +// +// resp, err = lkv.Get(context.TODO(), "abc") +// +// If another leasing client writes to a leased key, then the owner relinquishes its exclusive +// access, permitting the writer to modify the key: +// +// lkv2, err := leasing.NewKV(cli, "leasing-prefix") +// if err != nil { +// // handle error +// } +// lkv2.Put(context.TODO(), "abc", "456") +// resp, err = lkv.Get("abc") +// +package leasing diff --git a/client_sdk/v3/leasing/kv.go b/client_sdk/v3/leasing/kv.go new file mode 100644 index 00000000000..35022b1274d --- /dev/null +++ b/client_sdk/v3/leasing/kv.go @@ -0,0 +1,479 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package leasing + +import ( + "context" + "strings" + "sync" + "time" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type leasingKV struct { + cl *v3.Client + kv v3.KV + pfx string + leases leaseCache + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + sessionOpts []concurrency.SessionOption + session *concurrency.Session + sessionc chan struct{} +} + +var closedCh chan struct{} + +func init() { + closedCh = make(chan struct{}) + close(closedCh) +} + +// NewKV wraps a KV instance so that all requests are wired through a leasing protocol. +func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) { + cctx, cancel := context.WithCancel(cl.Ctx()) + lkv := &leasingKV{ + cl: cl, + kv: cl.KV, + pfx: pfx, + leases: leaseCache{revokes: make(map[string]time.Time)}, + ctx: cctx, + cancel: cancel, + sessionOpts: opts, + sessionc: make(chan struct{}), + } + lkv.wg.Add(2) + go func() { + defer lkv.wg.Done() + lkv.monitorSession() + }() + go func() { + defer lkv.wg.Done() + lkv.leases.clearOldRevokes(cctx) + }() + return lkv, lkv.Close, lkv.waitSession(cctx) +} + +func (lkv *leasingKV) Close() { + lkv.cancel() + lkv.wg.Wait() +} + +func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) { + return lkv.get(ctx, v3.OpGet(key, opts...)) +} + +func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) { + return lkv.put(ctx, v3.OpPut(key, val, opts...)) +} + +func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) { + return lkv.delete(ctx, v3.OpDelete(key, opts...)) +} + +func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) { + switch { + case op.IsGet(): + resp, err := lkv.get(ctx, op) + return resp.OpResponse(), err + case op.IsPut(): + resp, err := lkv.put(ctx, op) + return resp.OpResponse(), err + case op.IsDelete(): + resp, err := lkv.delete(ctx, op) + return resp.OpResponse(), err + case op.IsTxn(): + cmps, thenOps, elseOps := op.Txn() + resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit() + return resp.OpResponse(), err + } + return v3.OpResponse{}, nil +} + +func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) { + return lkv.kv.Compact(ctx, rev, opts...) +} + +func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn { + return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx} +} + +func (lkv *leasingKV) monitorSession() { + for lkv.ctx.Err() == nil { + if lkv.session != nil { + select { + case <-lkv.session.Done(): + case <-lkv.ctx.Done(): + return + } + } + lkv.leases.mu.Lock() + select { + case <-lkv.sessionc: + lkv.sessionc = make(chan struct{}) + default: + } + lkv.leases.entries = make(map[string]*leaseKey) + lkv.leases.mu.Unlock() + + s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...) + if err != nil { + continue + } + + lkv.leases.mu.Lock() + lkv.session = s + close(lkv.sessionc) + lkv.leases.mu.Unlock() + } +} + +func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) { + cctx, cancel := context.WithCancel(lkv.ctx) + defer cancel() + for cctx.Err() == nil { + if rev == 0 { + resp, err := lkv.kv.Get(ctx, lkv.pfx+key) + if err != nil { + continue + } + rev = resp.Header.Revision + if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" { + lkv.rescind(cctx, key, rev) + return + } + } + wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1)) + for resp := range wch { + for _, ev := range resp.Events { + if string(ev.Kv.Value) != "REVOKE" { + continue + } + if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() { + lkv.rescind(cctx, key, ev.Kv.ModRevision) + } + return + } + } + rev = 0 + } +} + +// rescind releases a lease from this client. +func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) { + if lkv.leases.Evict(key) > rev { + return + } + cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev) + op := v3.OpDelete(lkv.pfx + key) + for ctx.Err() == nil { + if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil { + return + } + } +} + +func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1)) + for resp := range wch { + for _, ev := range resp.Events { + if ev.Type == v3.EventTypeDelete { + return ctx.Err() + } + } + } + return ctx.Err() +} + +func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) { + key := string(op.KeyBytes()) + wc, rev := lkv.leases.Lock(key) + cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1) + resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit() + switch { + case err != nil: + lkv.leases.Evict(key) + fallthrough + case !resp.Succeeded: + if wc != nil { + close(wc) + } + return nil, nil, err + } + return resp, wc, nil +} + +func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) { + if err := lkv.waitSession(ctx); err != nil { + return nil, err + } + for ctx.Err() == nil { + resp, wc, err := lkv.tryModifyOp(ctx, op) + if err != nil || wc == nil { + resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op) + } + if err != nil { + return nil, err + } + if resp.Succeeded { + lkv.leases.mu.Lock() + lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header) + lkv.leases.mu.Unlock() + pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut()) + pr.Header = resp.Header + } + if wc != nil { + close(wc) + } + if resp.Succeeded { + return pr, nil + } + } + return nil, ctx.Err() +} + +func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) { + for ctx.Err() == nil { + if err := lkv.waitSession(ctx); err != nil { + return nil, err + } + lcmp := v3.Cmp{Key: key, Target: pb.Compare_LEASE} + resp, err := lkv.kv.Txn(ctx).If( + v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0), + v3.Compare(lcmp, "=", 0)). + Then( + op, + v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))). + Else( + op, + v3.OpGet(lkv.pfx+key), + ).Commit() + if err == nil { + if !resp.Succeeded { + kvs := resp.Responses[1].GetResponseRange().Kvs + // if txn failed since already owner, lease is acquired + resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID() + } + return resp, nil + } + // retry if transient error + if _, ok := err.(rpctypes.EtcdError); ok { + return nil, err + } + if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { + return nil, err + } + } + return nil, ctx.Err() +} + +func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) { + do := func() (*v3.GetResponse, error) { + r, err := lkv.kv.Do(ctx, op) + return r.Get(), err + } + if !lkv.readySession() { + return do() + } + + if resp, ok := lkv.leases.Get(ctx, op); resp != nil { + return resp, nil + } else if !ok || op.IsSerializable() { + // 必须是handled by etcd or can skip linearization + return do() + } + + key := string(op.KeyBytes()) + if !lkv.leases.MayAcquire(key) { + resp, err := lkv.kv.Do(ctx, op) + return resp.Get(), err + } + + resp, err := lkv.acquire(ctx, key, v3.OpGet(key)) + if err != nil { + return nil, err + } + getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange()) + getResp.Header = resp.Header + if resp.Succeeded { + getResp = lkv.leases.Add(key, getResp, op) + lkv.wg.Add(1) + go func() { + defer lkv.wg.Done() + lkv.monitorLease(ctx, key, resp.Header.Revision) + }() + } + return getResp, nil +} + +func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) { + lkey, lend := lkv.pfx+key, lkv.pfx+end + resp, err := lkv.kv.Txn(ctx).If( + v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1), + ).Then( + v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()), + v3.OpDelete(key, v3.WithRange(end)), + ).Commit() + if err != nil { + lkv.leases.EvictRange(key, end) + return nil, err + } + if !resp.Succeeded { + return nil, nil + } + for _, kv := range resp.Responses[0].GetResponseRange().Kvs { + lkv.leases.Delete(string(kv.Key), resp.Header) + } + delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange()) + delResp.Header = resp.Header + return delResp, nil +} + +func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) { + key, end := string(op.KeyBytes()), string(op.RangeBytes()) + for ctx.Err() == nil { + maxLeaseRev, err := lkv.revokeRange(ctx, key, end) + if err != nil { + return nil, err + } + wcs := lkv.leases.LockRange(key, end) + delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end) + closeAll(wcs) + if err != nil || delResp != nil { + return delResp, err + } + } + return nil, ctx.Err() +} + +func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) { + if err := lkv.waitSession(ctx); err != nil { + return nil, err + } + if len(op.RangeBytes()) > 0 { + return lkv.deleteRange(ctx, op) + } + key := string(op.KeyBytes()) + for ctx.Err() == nil { + resp, wc, err := lkv.tryModifyOp(ctx, op) + if err != nil || wc == nil { + resp, err = lkv.revoke(ctx, key, op) + } + if err != nil { + // don't know if delete was processed + lkv.leases.Evict(key) + return nil, err + } + if resp.Succeeded { + dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange()) + dr.Header = resp.Header + lkv.leases.Delete(key, dr.Header) + } + if wc != nil { + close(wc) + } + if resp.Succeeded { + return dr, nil + } + } + return nil, ctx.Err() +} + +func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) { + rev := lkv.leases.Rev(key) + txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op) + resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit() + if err != nil || resp.Succeeded { + return resp, err + } + return resp, lkv.waitRescind(ctx, key, resp.Header.Revision) +} + +func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) { + lkey, lend := lkv.pfx+begin, "" + if len(end) > 0 { + lend = lkv.pfx + end + } + leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend)) + if err != nil { + return 0, err + } + return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs) +} + +func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) { + maxLeaseRev := int64(0) + for _, kv := range kvs { + if rev := kv.CreateRevision; rev > maxLeaseRev { + maxLeaseRev = rev + } + if v3.LeaseID(kv.Lease) == lkv.leaseID() { + // don't revoke own keys + continue + } + key := strings.TrimPrefix(string(kv.Key), lkv.pfx) + if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil { + return 0, err + } + } + return maxLeaseRev, nil +} + +func (lkv *leasingKV) waitSession(ctx context.Context) error { + lkv.leases.mu.RLock() + sessionc := lkv.sessionc + lkv.leases.mu.RUnlock() + select { + case <-sessionc: + return nil + case <-lkv.ctx.Done(): + return lkv.ctx.Err() + case <-ctx.Done(): + return ctx.Err() + } +} + +func (lkv *leasingKV) readySession() bool { + lkv.leases.mu.RLock() + defer lkv.leases.mu.RUnlock() + if lkv.session == nil { + return false + } + select { + case <-lkv.session.Done(): + default: + return true + } + return false +} + +func (lkv *leasingKV) leaseID() v3.LeaseID { + lkv.leases.mu.RLock() + defer lkv.leases.mu.RUnlock() + return lkv.session.Lease() +} diff --git a/client_sdk/v3/leasing/txn.go b/client_sdk/v3/leasing/txn.go new file mode 100644 index 00000000000..299331e1f01 --- /dev/null +++ b/client_sdk/v3/leasing/txn.go @@ -0,0 +1,223 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package leasing + +import ( + "context" + "strings" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type txnLeasing struct { + v3.Txn + lkv *leasingKV + ctx context.Context + cs []v3.Cmp + opst []v3.Op + opse []v3.Op +} + +func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn { + txn.cs = append(txn.cs, cs...) + txn.Txn = txn.Txn.If(cs...) + return txn +} + +func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn { + txn.opst = append(txn.opst, ops...) + txn.Txn = txn.Txn.Then(ops...) + return txn +} + +func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn { + txn.opse = append(txn.opse, ops...) + txn.Txn = txn.Txn.Else(ops...) + return txn +} + +func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) { + if resp, err := txn.eval(); resp != nil || err != nil { + return resp, err + } + return txn.serverTxn() +} + +func (txn *txnLeasing) eval() (*v3.TxnResponse, error) { + // TODO: wait on keys in comparisons + thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse) + ops := make([]v3.Op, 0, len(thenOps)+len(elseOps)) + ops = append(ops, thenOps...) + ops = append(ops, elseOps...) + + for _, ch := range txn.lkv.leases.NotifyOps(ops) { + select { + case <-ch: + case <-txn.ctx.Done(): + return nil, txn.ctx.Err() + } + } + + txn.lkv.leases.mu.RLock() + defer txn.lkv.leases.mu.RUnlock() + succeeded, ok := txn.lkv.leases.evalCmp(txn.cs) + if !ok || txn.lkv.leases.header == nil { + return nil, nil + } + if ops = txn.opst; !succeeded { + ops = txn.opse + } + + resps, ok := txn.lkv.leases.evalOps(ops) + if !ok { + return nil, nil + } + return &v3.TxnResponse{Header: copyHeader(txn.lkv.leases.header), Succeeded: succeeded, Responses: resps}, nil +} + +// fallback computes the ops to fetch all possible conflicting +// leasing keys for a list of ops. +func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) { + for _, op := range ops { + if op.IsGet() { + continue + } + lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), "" + if len(op.RangeBytes()) > 0 { + lend = txn.lkv.pfx + string(op.RangeBytes()) + } + fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend))) + } + return fbOps +} + +func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) { + seen := make(map[string]bool) + for _, op := range ops { + key := string(op.KeyBytes()) + if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] { + continue + } + rev := txn.lkv.leases.Rev(key) + cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1)) + seen[key] = true + } + return cmps +} + +func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) { + for _, op := range ops { + if op.IsGet() || len(op.RangeBytes()) == 0 { + continue + } + + key, end := string(op.KeyBytes()), string(op.RangeBytes()) + maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end) + if err != nil { + return nil, err + } + + opts := append(v3.WithLastRev(), v3.WithRange(end)) + getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...) + if err != nil { + return nil, err + } + maxModRev := int64(0) + if len(getResp.Kvs) > 0 { + maxModRev = getResp.Kvs[0].ModRevision + } + + noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1) + noLeaseUpdate := v3.Compare( + v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end), + "<", + maxRevLK+1) + cmps = append(cmps, noKeyUpdate, noLeaseUpdate) + } + return cmps, nil +} + +func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) { + cmps := txn.guardKeys(ops) + rangeCmps, err := txn.guardRanges(ops) + return append(cmps, rangeCmps...), err +} + +func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) { + ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn}) + txn.lkv.leases.mu.Lock() + for _, op := range ops { + key := string(op.KeyBytes()) + if op.IsDelete() && len(op.RangeBytes()) > 0 { + end := string(op.RangeBytes()) + for k := range txn.lkv.leases.entries { + if inRange(k, key, end) { + txn.lkv.leases.delete(k, txnResp.Header) + } + } + } else if op.IsDelete() { + txn.lkv.leases.delete(key, txnResp.Header) + } + if op.IsPut() { + txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header) + } + } + txn.lkv.leases.mu.Unlock() +} + +func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error { + for _, resp := range fbResps { + _, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs) + if err != nil { + return err + } + } + return nil +} + +func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) { + if err := txn.lkv.waitSession(txn.ctx); err != nil { + return nil, err + } + + userOps := gatherOps(append(txn.opst, txn.opse...)) + userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse) + fbOps := txn.fallback(userOps) + + defer closeAll(txn.lkv.leases.LockWriteOps(userOps)) + for { + cmps, err := txn.guard(userOps) + if err != nil { + return nil, err + } + resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit() + if err != nil { + for _, cmp := range cmps { + txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx)) + } + return nil, err + } + if resp.Succeeded { + txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn) + userResp := resp.Responses[0].GetResponseTxn() + userResp.Header = resp.Header + return (*v3.TxnResponse)(userResp), nil + } + if err := txn.revokeFallback(resp.Responses); err != nil { + return nil, err + } + } +} diff --git a/client_sdk/v3/leasing/util.go b/client_sdk/v3/leasing/util.go new file mode 100644 index 00000000000..030ee59b020 --- /dev/null +++ b/client_sdk/v3/leasing/util.go @@ -0,0 +1,108 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package leasing + +import ( + "bytes" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool { + var result int + if len(resp.Kvs) != 0 { + kv := resp.Kvs[0] + switch tcmp.Target { + case v3pb.Compare_VALUE: + if tcmp.Compare_Value != nil { + result = bytes.Compare([]byte(kv.Value), []byte(tcmp.Compare_Value.Value)) + } + case v3pb.Compare_CREATE: + if tcmp.Compare_CreateRevision != nil { + result = compareInt64(kv.CreateRevision, tcmp.Compare_CreateRevision.CreateRevision) + } + case v3pb.Compare_MOD: + if tcmp.Compare_ModRevision != nil { + result = compareInt64(kv.ModRevision, tcmp.Compare_ModRevision.ModRevision) + } + case v3pb.Compare_VERSION: + if tcmp.Compare_Version != nil { + result = compareInt64(kv.Version, tcmp.Compare_Version.Version) + } + } + } + switch tcmp.Result { + case v3pb.Compare_EQUAL: + return result == 0 + case v3pb.Compare_NOT_EQUAL: + return result != 0 + case v3pb.Compare_GREATER: + return result > 0 + case v3pb.Compare_LESS: + return result < 0 + } + return true +} + +func gatherOps(ops []v3.Op) (ret []v3.Op) { + for _, op := range ops { + if !op.IsTxn() { + ret = append(ret, op) + continue + } + _, thenOps, elseOps := op.Txn() + ret = append(ret, gatherOps(append(thenOps, elseOps...))...) + } + return ret +} + +func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) { + for i, op := range ops { + if !op.IsTxn() { + ret = append(ret, op) + continue + } + _, thenOps, elseOps := op.Txn() + if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded { + ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...) + } else { + ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...) + } + } + return ret +} + +func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader { + h := *hdr + return &h +} + +func closeAll(chs []chan<- struct{}) { + for _, ch := range chs { + close(ch) + } +} diff --git a/client_sdk/v3/logger.go b/client_sdk/v3/logger.go new file mode 100644 index 00000000000..ba79ca36632 --- /dev/null +++ b/client_sdk/v3/logger.go @@ -0,0 +1,77 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "log" + "os" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc/grpclog" +) + +func init() { + // We override grpc logger only when the environment variable is set + // in order to not interfere by default with user's code or other libraries. + if os.Getenv("ETCD_CLIENT_DEBUG") != "" { + lg, err := CreateDefaultZapLogger() + if err != nil { + panic(err) + } + grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) + } +} + +// SetLogger sets grpc logger. +// +// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2. +func SetLogger(l grpclog.LoggerV2) { + grpclog.SetLoggerV2(l) +} + +// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level. +func etcdClientDebugLevel() zapcore.Level { + envLevel := os.Getenv("ETCD_CLIENT_DEBUG") + if envLevel == "" || envLevel == "true" { + return zapcore.InfoLevel + } + var l zapcore.Level + if err := l.Set(envLevel); err == nil { + log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'") + return zapcore.InfoLevel + } + return l +} + +// CreateDefaultZapLoggerConfig creates a logger config that is configurable using env variable: +// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info) +func CreateDefaultZapLoggerConfig() zap.Config { + lcfg := logutil.DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(etcdClientDebugLevel()) + return lcfg +} + +// CreateDefaultZapLogger creates a logger that is configurable using env variable: +// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info) +func CreateDefaultZapLogger() (*zap.Logger, error) { + c, err := CreateDefaultZapLoggerConfig().Build() + if err != nil { + return nil, err + } + return c.Named("etcd-client"), nil +} diff --git a/client_sdk/v3/maintenance.go b/client_sdk/v3/maintenance.go new file mode 100644 index 00000000000..a61456c8113 --- /dev/null +++ b/client_sdk/v3/maintenance.go @@ -0,0 +1,225 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "io" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + AlarmList(ctx context.Context) (*AlarmResponse, error) // 获取目前所有的警报 + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) // 解除警报 + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) // 碎片整理 + Status(ctx context.Context, endpoint string) (*StatusResponse, error) // 获取端点的状态 + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) // + Snapshot(ctx context.Context) (io.ReadCloser, error) // 返回一个快照 + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) // leader 转移 +} + +type maintenance struct { + lg *zap.Logger + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.Dial(endpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + } + + // get token with established connection + dctx := c.ctx + cancel := func() {} + if c.cfg.DialTimeout > 0 { + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + } + err = c.getToken(dctx) + cancel() + if err != nil { + return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err) + } + cancel = func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + lg: c.lg, + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +// AlarmList OK +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +// Defragment 碎片整理 +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } + + m.lg.Info("打开快照流;下载ing") + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + switch err { + case io.EOF: + m.lg.Info("快照读取完成;关闭ing") + default: + m.lg.Warn("从快照流接收失败;关闭ing", zap.Error(err)) + } + pw.CloseWithError(err) + return + } + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + }() + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil +} + +type snapshotReadCloser struct { + ctx context.Context + io.ReadCloser +} + +func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(p) + return n, toErr(rc.ctx, err) +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/client_sdk/v3/mirror/syncer.go b/client_sdk/v3/mirror/syncer.go new file mode 100644 index 00000000000..73981011967 --- /dev/null +++ b/client_sdk/v3/mirror/syncer.go @@ -0,0 +1,102 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mirror implements etcd mirroring operations. +package mirror + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +const ( + batchLimit = 1000 +) + +type Syncer interface { + SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) // 同步k-v 状态.通过返回的chan发送. + SyncUpdates(ctx context.Context) clientv3.WatchChan // 在同步base数据后,同步增量数据 +} + +// NewSyncer 同步器 +func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer { + return &syncer{c: c, prefix: prefix, rev: rev} +} + +type syncer struct { + c *clientv3.Client + rev int64 + prefix string +} + +func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) { + respchan := make(chan clientv3.GetResponse, 1024) + errchan := make(chan error, 1) + + // 如果没有指定rev,我们将选择最近的修订. + if s.rev == 0 { + resp, err := s.c.Get(ctx, "foo") + if err != nil { + errchan <- err + close(respchan) + close(errchan) + return respchan, errchan + } + s.rev = resp.Header.Revision + } + + go func() { + defer close(respchan) + defer close(errchan) + + var key string + + opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)} + + if len(s.prefix) == 0 { + // 同步所有kv + opts = append(opts, clientv3.WithFromKey()) + key = "\x00" + } else { + opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix))) + key = s.prefix + } + + for { + resp, err := s.c.Get(ctx, key, opts...) + if err != nil { + errchan <- err + return + } + + respchan <- *resp + + if !resp.More { + return + } + // move to next key + key = string(append([]byte(resp.Kvs[len(resp.Kvs)-1].Key), 0)) + } + }() + + return respchan, errchan +} + +func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan { + if s.rev == 0 { + panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?") + } + return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1)) +} diff --git a/client_sdk/v3/mock/mockserver/doc.go b/client_sdk/v3/mock/mockserver/doc.go new file mode 100644 index 00000000000..00c044c3aab --- /dev/null +++ b/client_sdk/v3/mock/mockserver/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mockserver provides mock implementations for etcdserver's etcd interface. +package mockserver diff --git a/client/v3/mock/mockserver/mockserver.go b/client_sdk/v3/mock/mockserver/mockserver.go similarity index 78% rename from client/v3/mock/mockserver/mockserver.go rename to client_sdk/v3/mock/mockserver/mockserver.go index 837d45db175..d219f188764 100644 --- a/client/v3/mock/mockserver/mockserver.go +++ b/client_sdk/v3/mock/mockserver/mockserver.go @@ -17,17 +17,18 @@ package mockserver import ( "context" "fmt" + "io/ioutil" "net" "os" "sync" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" "google.golang.org/grpc/resolver" ) -// MockServer provides a mocked out grpc server of the etcdserver interface. +// MockServer provides a mocked out grpc etcd of the etcdserver interface. type MockServer struct { ln net.Listener Network string @@ -83,7 +84,7 @@ func startMockServersUnix(count int) (ms *MockServers, err error) { dir := os.TempDir() addrs := make([]string, 0, count) for i := 0; i < count; i++ { - f, err := os.CreateTemp(dir, "etcd-unix-so-") + f, err := ioutil.TempFile(dir, "etcd-unix-so-") if err != nil { return nil, fmt.Errorf("failed to allocate temp file for unix socket: %v", err) } @@ -118,7 +119,7 @@ func startMockServers(network string, addrs []string) (ms *MockServers, err erro return ms, nil } -// StartAt restarts mock server at given index. +// StartAt restarts mock etcd at given index. func (ms *MockServers) StartAt(idx int) (err error) { ms.mu.Lock() defer ms.mu.Unlock() @@ -132,7 +133,6 @@ func (ms *MockServers) StartAt(idx int) (err error) { svr := grpc.NewServer() pb.RegisterKVServer(svr, &mockKVServer{}) - pb.RegisterLeaseServer(svr, &mockLeaseServer{}) ms.Servers[idx].GrpcServer = svr ms.wg.Add(1) @@ -142,7 +142,7 @@ func (ms *MockServers) StartAt(idx int) (err error) { return nil } -// StopAt stops mock server at given index. +// StopAt stops mock etcd at given index. func (ms *MockServers) StopAt(idx int) { ms.mu.Lock() defer ms.mu.Unlock() @@ -157,7 +157,7 @@ func (ms *MockServers) StopAt(idx int) { ms.wg.Done() } -// Stop stops the mock server, immediately closing all open connections and listeners. +// Stop stops the mock etcd, immediately closing all open connections and listeners. func (ms *MockServers) Stop() { for idx := range ms.Servers { ms.StopAt(idx) @@ -186,29 +186,3 @@ func (m *mockKVServer) Txn(context.Context, *pb.TxnRequest) (*pb.TxnResponse, er func (m *mockKVServer) Compact(context.Context, *pb.CompactionRequest) (*pb.CompactionResponse, error) { return &pb.CompactionResponse{}, nil } - -func (m *mockKVServer) Lease(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return &pb.LeaseGrantResponse{}, nil -} - -type mockLeaseServer struct{} - -func (s mockLeaseServer) LeaseGrant(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return &pb.LeaseGrantResponse{}, nil -} - -func (s *mockLeaseServer) LeaseRevoke(context.Context, *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return &pb.LeaseRevokeResponse{}, nil -} - -func (s *mockLeaseServer) LeaseKeepAlive(pb.Lease_LeaseKeepAliveServer) error { - return nil -} - -func (s *mockLeaseServer) LeaseTimeToLive(context.Context, *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - return &pb.LeaseTimeToLiveResponse{}, nil -} - -func (s *mockLeaseServer) LeaseLeases(context.Context, *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { - return &pb.LeaseLeasesResponse{}, nil -} diff --git a/client_sdk/v3/namespace/doc.go b/client_sdk/v3/namespace/doc.go new file mode 100644 index 00000000000..01849b150ab --- /dev/null +++ b/client_sdk/v3/namespace/doc.go @@ -0,0 +1,43 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package namespace is a clientv3 wrapper that translates all keys to begin +// with a given prefix. +// +// First, create a client: +// +// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) +// if err != nil { +// // handle error! +// } +// +// Next, override the client interfaces: +// +// unprefixedKV := cli.KV +// cli.KV = namespace.NewKV(cli.KV, "my-prefix/") +// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/") +// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/") +// +// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/": +// +// cli.Put(context.TODO(), "abc", "123") +// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 123 +// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456") +// resp, _ = cli.Get(context.TODO(), "abc") +// fmt.Printf("%s\n", resp.Kvs[0].Value) +// // Output: 456 +// +package namespace diff --git a/client_sdk/v3/namespace/kv.go b/client_sdk/v3/namespace/kv.go new file mode 100644 index 00000000000..483970fb58d --- /dev/null +++ b/client_sdk/v3/namespace/kv.go @@ -0,0 +1,213 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" +) + +type kvPrefix struct { + clientv3.KV + pfx string +} + +// NewKV wraps a KV instance so that all requests +// are prefixed with a given string. +func NewKV(kv clientv3.KV, prefix string) clientv3.KV { + return &kvPrefix{kv, prefix} +} + +func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { + if len(key) == 0 { + return nil, rpctypes.ErrEmptyKey + } + op := kv.prefixOp(clientv3.OpPut(key, val, opts...)) + r, err := kv.KV.Do(ctx, op) + if err != nil { + return nil, err + } + put := r.Put() + kv.unprefixPutResponse(put) + return put, nil +} + +func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...))) + if err != nil { + return nil, err + } + get := r.Get() + kv.unprefixGetResponse(get) + return get, nil +} + +func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { + if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) { + return nil, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...))) + if err != nil { + return nil, err + } + del := r.Del() + kv.unprefixDeleteResponse(del) + return del, nil +} + +func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { + if len(op.KeyBytes()) == 0 && !op.IsTxn() { + return clientv3.OpResponse{}, rpctypes.ErrEmptyKey + } + r, err := kv.KV.Do(ctx, kv.prefixOp(op)) + if err != nil { + return r, err + } + switch { + case r.Get() != nil: + kv.unprefixGetResponse(r.Get()) + case r.Put() != nil: + kv.unprefixPutResponse(r.Put()) + case r.Del() != nil: + kv.unprefixDeleteResponse(r.Del()) + case r.Txn() != nil: + kv.unprefixTxnResponse(r.Txn()) + } + return r, nil +} + +type txnPrefix struct { + clientv3.Txn + kv *kvPrefix +} + +func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { + return &txnPrefix{kv.KV.Txn(ctx), kv} +} + +func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { + txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...) + return txn +} + +func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { + txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...) + return txn +} + +func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { + txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...) + return txn +} + +func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { + resp, err := txn.Txn.Commit() + if err != nil { + return nil, err + } + txn.kv.unprefixTxnResponse(resp) + return resp, nil +} + +func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { + if !op.IsTxn() { + begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) + op.WithKeyBytes(begin) + op.WithRangeBytes(end) + return op + } + cmps, thenOps, elseOps := op.Txn() + return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps)) +} + +func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { + for i := range resp.Kvs { + resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) { + if resp.PrevKv != nil { + resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) { + for i := range resp.PrevKvs { + resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):] + } +} + +func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { + for _, r := range resp.Responses { + if r.ResponseOp_ResponseRange != nil { + tv := r.ResponseOp_ResponseRange + if tv.ResponseRange != nil { + kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange)) + } + } + if r.ResponseOp_ResponsePut != nil { + tv := r.ResponseOp_ResponsePut + if tv.ResponsePut != nil { + kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut)) + } + } + + if r.ResponseOp_ResponseDeleteRange != nil { + tv := r.ResponseOp_ResponseDeleteRange + if tv.ResponseDeleteRange != nil { + kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) + } + } + if r.ResponseOp_ResponseTxn != nil { + tv := r.ResponseOp_ResponseTxn + if tv.ResponseTxn != nil { + kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn)) + } + } + + } +} + +func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { + return prefixInterval(kv.pfx, key, end) +} + +func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp { + newCmps := make([]clientv3.Cmp, len(cs)) + for i := range cs { + newCmps[i] = cs[i] + pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), []byte(cs[i].RangeEnd)) + newCmps[i].WithKeyBytes(pfxKey) + if len(cs[i].RangeEnd) != 0 { + newCmps[i].RangeEnd = string(endKey) + } + } + return newCmps +} + +func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = kv.prefixOp(ops[i]) + } + return newOps +} diff --git a/client_sdk/v3/namespace/lease.go b/client_sdk/v3/namespace/lease.go new file mode 100644 index 00000000000..8a9b4176a2b --- /dev/null +++ b/client_sdk/v3/namespace/lease.go @@ -0,0 +1,57 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "bytes" + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +type leasePrefix struct { + clientv3.Lease + pfx []byte +} + +// NewLease wraps a Lease interface to filter for only keys with a prefix +// and remove that prefix when fetching attached keys through TimeToLive. +func NewLease(l clientv3.Lease, prefix string) clientv3.Lease { + return &leasePrefix{l, []byte(prefix)} +} + +func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { + resp, err := l.Lease.TimeToLive(ctx, id, opts...) + if err != nil { + return nil, err + } + if len(resp.Keys) > 0 { + var outKeys [][]byte + for i := range resp.Keys { + if len(resp.Keys[i]) < len(l.pfx) { + // too short + continue + } + if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) { + // doesn't match prefix + continue + } + // strip prefix + outKeys = append(outKeys, resp.Keys[i][len(l.pfx):]) + } + resp.Keys = outKeys + } + return resp, nil +} diff --git a/client/v3/namespace/util.go b/client_sdk/v3/namespace/util.go similarity index 100% rename from client/v3/namespace/util.go rename to client_sdk/v3/namespace/util.go diff --git a/client_sdk/v3/namespace/watch.go b/client_sdk/v3/namespace/watch.go new file mode 100644 index 00000000000..7836aba7bd2 --- /dev/null +++ b/client_sdk/v3/namespace/watch.go @@ -0,0 +1,84 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package namespace + +import ( + "context" + "sync" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +type watcherPrefix struct { + clientv3.Watcher + pfx string + + wg sync.WaitGroup + stopc chan struct{} + stopOnce sync.Once +} + +// NewWatcher wraps a Watcher instance so that all Watch requests +// are prefixed with a given string and all Watch responses have +// the prefix removed. +func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher { + return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})} +} + +// Watch ok +func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + // since OpOption is opaque, determine range for prefixing through an OpGet + op := clientv3.OpGet(key, opts...) + end := op.RangeBytes() + pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end) + if pfxEnd != nil { + opts = append(opts, clientv3.WithRange(string(pfxEnd))) + } + + wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...) + + // 翻译watch事件从前缀到无前缀 + pfxWch := make(chan clientv3.WatchResponse) + w.wg.Add(1) + go func() { + defer func() { + close(pfxWch) + w.wg.Done() + }() + for wr := range wch { + for i := range wr.Events { + wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):] + if wr.Events[i].PrevKv != nil { + wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key + } + } + select { + case pfxWch <- wr: + case <-ctx.Done(): + return + case <-w.stopc: + return + } + } + }() + return pfxWch +} + +func (w *watcherPrefix) Close() error { + err := w.Watcher.Close() + w.stopOnce.Do(func() { close(w.stopc) }) + w.wg.Wait() + return err +} diff --git a/client_sdk/v3/naming/doc.go b/client_sdk/v3/naming/doc.go new file mode 100644 index 00000000000..f80116f8902 --- /dev/null +++ b/client_sdk/v3/naming/doc.go @@ -0,0 +1,60 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package naming provides: +// - subpackage endpoints: an abstraction layer to store and read endpoints +// information from etcd. +// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC +// services based on the endpoints configuration +// +// To use, first import the packages: +// +// import ( +// "github.com/ls-2018/etcd_cn/client_sdk/v3" +// "github.com/ls-2018/etcd_cn/client_sdk/v3" +// "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints" +// "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/resolver" +// "google.golang.org/grpc" +// ) +// +// First, register new endpoint addresses for a service: +// +// func etcdAdd(c *clientv3.Client, service, addr string) error { +// em := endpoints.NewManager(c, service) +// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}); +// } +// +// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: +// +// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { +// etcdResolver, err := resolver.NewBuilder(c); +// if err { return nil, err } +// return grpc.Dial("etcd:///" + service, grpc.WithResolvers(etcdResolver)) +// } +// +// Optionally, force delete an endpoint: +// +// func etcdDelete(c *clientv3, service, addr string) error { +// em := endpoints.NewManager(c, service) +// return em.DeleteEndpoint(c.Ctx(), service+"/"+addr) +// } +// +// Or register an expiring endpoint with a lease: +// +// func etcdAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { +// em := endpoints.NewManager(c, service) +// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}, clientv3.WithLease(lid)); +// } +// +package naming diff --git a/client/v3/naming/endpoints/endpoints.go b/client_sdk/v3/naming/endpoints/endpoints.go similarity index 79% rename from client/v3/naming/endpoints/endpoints.go rename to client_sdk/v3/naming/endpoints/endpoints.go index ffe77eff7b6..03b744e2b84 100644 --- a/client/v3/naming/endpoints/endpoints.go +++ b/client_sdk/v3/naming/endpoints/endpoints.go @@ -1,23 +1,9 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package endpoints import ( "context" - clientv3 "go.etcd.io/etcd/client/v3" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" ) // Endpoint represents a single address the connection can be established with. @@ -25,7 +11,7 @@ import ( // Inspired by: https://pkg.go.dev/google.golang.org/grpc/resolver#Address. // Please document etcd version since which version each field is supported. type Endpoint struct { - // Addr is the server address on which a connection will be established. + // Addr is the etcd address on which a connection will be established. // Since etcd 3.1 Addr string diff --git a/client/v3/naming/endpoints/endpoints_impl.go b/client_sdk/v3/naming/endpoints/endpoints_impl.go similarity index 79% rename from client/v3/naming/endpoints/endpoints_impl.go rename to client_sdk/v3/naming/endpoints/endpoints_impl.go index f88a3eed13f..2460f6d1cfb 100644 --- a/client/v3/naming/endpoints/endpoints_impl.go +++ b/client_sdk/v3/naming/endpoints/endpoints_impl.go @@ -1,17 +1,3 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package endpoints // TODO: The API is not yet implemented. @@ -22,8 +8,8 @@ import ( "errors" "strings" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/naming/endpoints/internal" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints/internal" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -92,8 +78,7 @@ func (m *endpointManager) DeleteEndpoint(ctx context.Context, key string, opts . } func (m *endpointManager) NewWatchChannel(ctx context.Context) (WatchChannel, error) { - key := m.target + "/" - resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable()) + resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable()) if err != nil { return nil, err } @@ -102,7 +87,7 @@ func (m *endpointManager) NewWatchChannel(ctx context.Context) (WatchChannel, er initUpdates := make([]*Update, 0, len(resp.Kvs)) for _, kv := range resp.Kvs { var iup internal.Update - if err := json.Unmarshal(kv.Value, &iup); err != nil { + if err := json.Unmarshal([]byte(kv.Value), &iup); err != nil { lg.Warn("unmarshal endpoint update failed", zap.String("key", string(kv.Key)), zap.Error(err)) continue } @@ -127,8 +112,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd lg := m.client.GetLogger() opts := []clientv3.OpOption{clientv3.WithRev(rev), clientv3.WithPrefix()} - key := m.target + "/" - wch := m.client.Watch(ctx, key, opts...) + wch := m.client.Watch(ctx, m.target, opts...) for { select { case <-ctx.Done(): @@ -150,7 +134,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd var op Operation switch e.Type { case clientv3.EventTypePut: - err = json.Unmarshal(e.Kv.Value, &iup) + err = json.Unmarshal([]byte(e.Kv.Value), &iup) op = Add if err != nil { lg.Warn("unmarshal endpoint update failed", zap.String("key", string(e.Kv.Key)), zap.Error(err)) @@ -173,8 +157,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd } func (m *endpointManager) List(ctx context.Context) (Key2EndpointMap, error) { - key := m.target + "/" - resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable()) + resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable()) if err != nil { return nil, err } @@ -182,7 +165,7 @@ func (m *endpointManager) List(ctx context.Context) (Key2EndpointMap, error) { eps := make(Key2EndpointMap) for _, kv := range resp.Kvs { var iup internal.Update - if err := json.Unmarshal(kv.Value, &iup); err != nil { + if err := json.Unmarshal([]byte(kv.Value), &iup); err != nil { continue } diff --git a/client_sdk/v3/naming/endpoints/internal/update.go b/client_sdk/v3/naming/endpoints/internal/update.go new file mode 100644 index 00000000000..08d957f443e --- /dev/null +++ b/client_sdk/v3/naming/endpoints/internal/update.go @@ -0,0 +1,38 @@ +package internal + +// Operation describes action performed on endpoint (addition vs deletion). +// Must stay JSON-format compatible with: +// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Operation +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a persistent (JSON marshalled) format representing +// endpoint within the etcd storage. +// +// As the format can be persisted by one version of etcd client library and +// read by other the format必须是kept backward compatible and +// in particular必须是superset of the grpc(<=1.29.1) naming.Update structure: +// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Update +// +// Please document since which version of etcd-client given property is supported. +// Please keep the naming consistent with e.g. https://pkg.go.dev/google.golang.org/grpc/resolver#Address. +// +// Notice that it is not valid having both empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + // Since etcd 3.1. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + // Since etcd 3.1. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + // Since etcd 3.1. + Metadata interface{} +} diff --git a/client_sdk/v3/naming/resolver/resolver.go b/client_sdk/v3/naming/resolver/resolver.go new file mode 100644 index 00000000000..d208d35133b --- /dev/null +++ b/client_sdk/v3/naming/resolver/resolver.go @@ -0,0 +1,107 @@ +package resolver + +import ( + "context" + "sync" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints" + + "google.golang.org/grpc/codes" + gresolver "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +type builder struct { + c *clientv3.Client +} + +func (b builder) Build(target gresolver.Target, cc gresolver.ClientConn, opts gresolver.BuildOptions) (gresolver.Resolver, error) { + r := &resolver{ + c: b.c, + target: target.Endpoint, + cc: cc, + } + r.ctx, r.cancel = context.WithCancel(context.Background()) + + em, err := endpoints.NewManager(r.c, r.target) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "resolver: failed to new endpoint manager: %s", err) + } + r.wch, err = em.NewWatchChannel(r.ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "resolver: failed to new watch channer: %s", err) + } + + r.wg.Add(1) + go r.watch() + return r, nil +} + +func (b builder) Scheme() string { + return "etcd" +} + +// NewBuilder creates a resolver builder. +func NewBuilder(client *clientv3.Client) (gresolver.Builder, error) { + return builder{c: client}, nil +} + +type resolver struct { + c *clientv3.Client + target string + cc gresolver.ClientConn + wch endpoints.WatchChannel + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func (r *resolver) watch() { + defer r.wg.Done() + + allUps := make(map[string]*endpoints.Update) + for { + select { + case <-r.ctx.Done(): + return + case ups, ok := <-r.wch: + if !ok { + return + } + + for _, up := range ups { + switch up.Op { + case endpoints.Add: + allUps[up.Key] = up + case endpoints.Delete: + delete(allUps, up.Key) + } + } + + addrs := convertToGRPCAddress(allUps) + r.cc.UpdateState(gresolver.State{Addresses: addrs}) + } + } +} + +func convertToGRPCAddress(ups map[string]*endpoints.Update) []gresolver.Address { + var addrs []gresolver.Address + for _, up := range ups { + addr := gresolver.Address{ + Addr: up.Endpoint.Addr, + Metadata: up.Endpoint.Metadata, + } + addrs = append(addrs, addr) + } + return addrs +} + +// ResolveNow is a no-op here. +// It's just a hint, resolver can ignore this if it's not necessary. +func (r *resolver) ResolveNow(gresolver.ResolveNowOptions) {} + +func (r *resolver) Close() { + r.cancel() + r.wg.Wait() +} diff --git a/client/v3/op.go b/client_sdk/v3/op.go similarity index 80% rename from client/v3/op.go rename to client_sdk/v3/op.go index 6193d090a91..d11a40f3ebd 100644 --- a/client/v3/op.go +++ b/client_sdk/v3/op.go @@ -14,7 +14,11 @@ package clientv3 -import pb "go.etcd.io/etcd/api/v3/etcdserverpb" +import ( + "fmt" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) type opType int @@ -28,12 +32,10 @@ const ( var noPrefixEnd = []byte{0} -// Op represents an Operation that kv can execute. type Op struct { t opType - key []byte - end []byte - + key string + end string // for range limit int64 sort *SortOption @@ -57,29 +59,22 @@ type Op struct { // "--max-request-bytes" flag value + 512-byte fragment bool - // for put ignoreValue bool ignoreLease bool - // progressNotify is for progress updates. - progressNotify bool - // createdNotify is for created event - createdNotify bool - // filters for watchers - filterPut bool - filterDelete bool + progressNotify bool // 处理更新 + createdNotify bool // 创建事件 + filterPut bool // 过滤掉put事件 + filterDelete bool // 过滤掉delete事件 // for put - val []byte + val string leaseID LeaseID // txn cmps []Cmp thenOps []Op elseOps []Op - - isOptsWithFromKey bool - isOptsWithPrefix bool } // accessors / mutators @@ -95,13 +90,13 @@ func (op Op) Txn() ([]Cmp, []Op, []Op) { } // KeyBytes returns the byte slice holding the Op's key. -func (op Op) KeyBytes() []byte { return op.key } +func (op Op) KeyBytes() []byte { return []byte(op.key) } // WithKeyBytes sets the byte slice for the Op's key. -func (op *Op) WithKeyBytes(key []byte) { op.key = key } +func (op *Op) WithKeyBytes(key []byte) { op.key = string(key) } // RangeBytes returns the byte slice holding with the Op's range end, if any. -func (op Op) RangeBytes() []byte { return op.end } +func (op Op) RangeBytes() []byte { return []byte(op.end) } // Rev returns the requested revision, if any. func (op Op) Rev() int64 { return op.rev } @@ -137,13 +132,15 @@ func (op Op) MinCreateRev() int64 { return op.minCreateRev } func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } // WithRangeBytes sets the byte slice for the Op's range end. -func (op *Op) WithRangeBytes(end []byte) { op.end = end } +func (op *Op) WithRangeBytes(end []byte) { + op.end = string(end) +} // ValueBytes returns the byte slice holding the Op's value, if any. -func (op Op) ValueBytes() []byte { return op.val } +func (op Op) ValueBytes() []byte { return []byte(op.val) } // WithValueBytes sets the byte slice for the Op's value. -func (op *Op) WithValueBytes(v []byte) { op.val = v } +func (op *Op) WithValueBytes(v []byte) { op.val = string(v) } func (op Op) toRangeRequest() *pb.RangeRequest { if op.t != tRange { @@ -188,15 +185,16 @@ func (op Op) toTxnRequest() *pb.TxnRequest { func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + return &pb.RequestOp{RequestOp_RequestRange: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} - return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + return &pb.RequestOp{RequestOp_RequestPut: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + fmt.Println("----->", r) + return &pb.RequestOp{RequestOp_RequestDeleteRange: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} case tTxn: - return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + return &pb.RequestOp{RequestOp_RequestTxn: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} default: panic("Unknown Op") } @@ -219,17 +217,13 @@ func (op Op) isWrite() bool { return op.t != tRange } -func NewOp() *Op { - return &Op{key: []byte("")} -} - // OpGet returns "get" operation based on given key and operation options. func OpGet(key string, opts ...OpOption) Op { // WithPrefix and WithFromKey are not supported together if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) { panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") } - ret := Op{t: tRange, key: []byte(key)} + ret := Op{t: tRange, key: key} ret.applyOpts(opts) return ret } @@ -240,7 +234,7 @@ func OpDelete(key string, opts ...OpOption) Op { if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) { panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") } - ret := Op{t: tDeleteRange, key: []byte(key)} + ret := Op{t: tDeleteRange, key: key} ret.applyOpts(opts) switch { case ret.leaseID != 0: @@ -269,10 +263,10 @@ func OpDelete(key string, opts ...OpOption) Op { // OpPut returns "put" operation based on given key-value and operation options. func OpPut(key, val string, opts ...OpOption) Op { - ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret := Op{t: tPut, key: key, val: val} ret.applyOpts(opts) switch { - case ret.end != nil: + case ret.end != "": panic("unexpected range in put") case ret.limit != 0: panic("unexpected limit in put") @@ -301,24 +295,25 @@ func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} } +// 检查watch请求 func opWatch(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} + ret := Op{t: tRange, key: key} ret.applyOpts(opts) switch { case ret.leaseID != 0: - panic("unexpected lease in watch") + panic("unexpected watch中不能有租约") case ret.limit != 0: - panic("unexpected limit in watch") + panic("unexpected watch中不能有limit") case ret.sort != nil: - panic("unexpected sort in watch") + panic("unexpected watch中不能有sort") case ret.serializable: - panic("unexpected serializable in watch") + panic("unexpected watch中不能有 serializable") case ret.countOnly: - panic("unexpected countOnly in watch") + panic("unexpected watch中不能有countOnly") case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in watch") + panic("unexpected watch中不能过滤修订版本") case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in watch") + panic("unexpected watch中不能过滤创建时的修订版本") } return ret } @@ -337,8 +332,7 @@ func WithLease(leaseID LeaseID) OpOption { return func(op *Op) { op.leaseID = leaseID } } -// WithLimit limits the number of results to return from 'Get' request. -// If WithLimit is given a 0 limit, it is treated as no limit. +// WithLimit 限制从'Get'请求返回的结果的数量.如果给WithLimit一个0限制,则它被视为没有限制. func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } // WithRev specifies the store revision for 'Get' request. @@ -352,9 +346,9 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } func WithSort(target SortTarget, order SortOrder) OpOption { return func(op *Op) { if target == SortByKey && order == SortAscend { - // If order != SortNone, server fetches the entire key-space, + // If order != SortNone, etcd fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since by default the server returns results sorted by keys + // Since by default the etcd returns results sorted by keys // in lexicographically ascending order, the client should ignore // SortOrder if the target is SortByKey. order = SortNone @@ -363,25 +357,25 @@ func WithSort(target SortTarget, order SortOrder) OpOption { } } -// GetPrefixRangeEnd gets the range end of the prefix. -// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +// GetPrefixRangeEnd 得到前缀的范围端. +// 例如 1 ---> [1,2) func GetPrefixRangeEnd(prefix string) string { - return string(getPrefix([]byte(prefix))) + return getPrefix(prefix) } -func getPrefix(key []byte) []byte { +func getPrefix(key string) string { end := make([]byte, len(key)) copy(end, key) for i := len(end) - 1; i >= 0; i-- { if end[i] < 0xff { end[i] = end[i] + 1 end = end[:i+1] - return end + return string(end) } } // next prefix does not exist (e.g., 0xffff); // default to WithFromKey policy - return noPrefixEnd + return string(noPrefixEnd) } // WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate @@ -389,9 +383,8 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { - op.isOptsWithPrefix = true if len(op.key) == 0 { - op.key, op.end = []byte{0}, []byte{0} + op.key, op.end = string([]byte{0}), string([]byte{0}) return } op.end = getPrefix(op.key) @@ -401,9 +394,9 @@ func WithPrefix() OpOption { // WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. // For example, 'Get' requests with 'WithRange(end)' returns // the keys in the range [key, end). -// endKey must be lexicographically greater than start key. +// endKey必须是lexicographically greater than start key. func WithRange(endKey string) OpOption { - return func(op *Op) { op.end = []byte(endKey) } + return func(op *Op) { op.end = endKey } } // WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests @@ -411,10 +404,9 @@ func WithRange(endKey string) OpOption { func WithFromKey() OpOption { return func(op *Op) { if len(op.key) == 0 { - op.key = []byte{0} + op.key = string([]byte{0}) } - op.end = []byte("\x00") - op.isOptsWithFromKey = true + op.end = string([]byte("\x00")) } } @@ -448,13 +440,10 @@ func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRe // WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } -// WithFirstCreate gets the key with the oldest creation revision in the request range. func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } -// WithLastCreate gets the key with the latest creation revision in the request range. func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } -// WithFirstKey gets the lexically first key in the request range. func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } // WithLastKey gets the lexically last key in the request range. @@ -471,7 +460,7 @@ func withTop(target SortTarget, order SortOrder) []OpOption { return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} } -// WithProgressNotify makes watch server send periodic progress updates +// WithProgressNotify makes watch etcd send periodic progress updates // every 10 minutes when there is no incoming events. // Progress updates have zero events in WatchResponse. func WithProgressNotify() OpOption { @@ -480,7 +469,7 @@ func WithProgressNotify() OpOption { } } -// WithCreatedNotify makes watch server sends the created event. +// WithCreatedNotify makes watch etcd sends the created event. func WithCreatedNotify() OpOption { return func(op *Op) { op.createdNotify = true @@ -507,11 +496,11 @@ func WithPrevKV() OpOption { // WithFragment to receive raw watch response with fragmentation. // Fragmentation is disabled by default. If fragmentation is enabled, -// etcd watch server will split watch response before sending to clients -// when the total size of watch events exceed server-side request limit. -// The default server-side request limit is 1.5 MiB, which can be configured +// etcd watch etcd will split watch response before sending to clients +// when the total size of watch events exceed etcd-side request limit. +// The default etcd-side request limit is 1.5 MiB, which can be configured // as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. -// See "etcdserver/api/v3rpc/watch.go" for more details. +// See "etcdserver/api/v3rpc/over_watch.go" for more details. func WithFragment() OpOption { return func(op *Op) { op.fragment = true } } @@ -563,37 +552,7 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi } // IsOptsWithPrefix returns true if WithPrefix option is called in the given opts. -func IsOptsWithPrefix(opts []OpOption) bool { - ret := NewOp() - for _, opt := range opts { - opt(ret) - } - - return ret.isOptsWithPrefix -} +func IsOptsWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } // IsOptsWithFromKey returns true if WithFromKey option is called in the given opts. -func IsOptsWithFromKey(opts []OpOption) bool { - ret := NewOp() - for _, opt := range opts { - opt(ret) - } - - return ret.isOptsWithFromKey -} - -func (op Op) IsSortOptionValid() bool { - if op.sort != nil { - sortOrder := int32(op.sort.Order) - sortTarget := int32(op.sort.Target) - - if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok { - return false - } - - if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok { - return false - } - } - return true -} +func IsOptsWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/client_sdk/v3/options.go b/client_sdk/v3/options.go new file mode 100644 index 00000000000..d5916e9c210 --- /dev/null +++ b/client_sdk/v3/options.go @@ -0,0 +1,45 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + "time" + + "google.golang.org/grpc" +) + +var ( + // client-side handling retrying of request failures where data was not written to the wire or + // where etcd indicates it did not process the data. gRPC default is default is "WaitForReady(false)" + // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to + // transient failures. + defaultWaitForReady = grpc.WaitForReady(true) + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) + defaultUnaryMaxRetries uint = 100 + defaultStreamMaxRetries = ^uint(0) // max uint + defaultBackoffWaitBetween = 25 * time.Millisecond // 重试间隔 + + // client-side retry backoff default jitter fraction. + defaultBackoffJitterFraction = 0.10 +) + +// "clientv3.Config" 默认的 "gRPC.CallOption". +var defaultCallOpts = []grpc.CallOption{ + defaultWaitForReady, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, +} diff --git a/client_sdk/v3/ordering/doc.go b/client_sdk/v3/ordering/doc.go new file mode 100644 index 00000000000..856f3305801 --- /dev/null +++ b/client_sdk/v3/ordering/doc.go @@ -0,0 +1,42 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ordering is a clientv3 wrapper that caches response header revisions +// to detect ordering violations from stale responses. Users may define a +// policy on how to handle the ordering violation, but typically the client +// should connect to another endpoint and reissue the request. +// +// The most common situation where an ordering violation happens is a client +// reconnects to a partitioned member and issues a serializable read. Since the +// partitioned member is likely behind the last member, it may return a Get +// response based on a store revision older than the store revision used to +// service a prior Get on the former endpoint. +// +// First, create a client: +// +// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}}) +// if err != nil { +// // handle error! +// } +// +// Next, override the client interface with the ordering wrapper: +// +// vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { +// return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev) +// } +// cli.KV = ordering.NewKV(cli.KV, vf) +// +// Now calls using 'cli' will reject order violations with an error. +// +package ordering diff --git a/client_sdk/v3/ordering/kv.go b/client_sdk/v3/ordering/kv.go new file mode 100644 index 00000000000..139b6c87021 --- /dev/null +++ b/client_sdk/v3/ordering/kv.go @@ -0,0 +1,149 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ordering + +import ( + "context" + "sync" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +// kvOrdering ensures that serialized requests do not return +// get with revisions less than the previous +// returned revision. +type kvOrdering struct { + clientv3.KV + orderViolationFunc OrderViolationFunc + prevRev int64 + revMu sync.RWMutex +} + +func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering { + return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}} +} + +func (kv *kvOrdering) getPrevRev() int64 { + kv.revMu.RLock() + defer kv.revMu.RUnlock() + return kv.prevRev +} + +func (kv *kvOrdering) setPrevRev(currRev int64) { + kv.revMu.Lock() + defer kv.revMu.Unlock() + if currRev > kv.prevRev { + kv.prevRev = currRev + } +} + +func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + // prevRev is stored in a local variable in order to record the prevRev + // at the beginning of the Get operation, because concurrent + // access to kvOrdering could change the prevRev field in the + // middle of the Get operation. + prevRev := kv.getPrevRev() + op := clientv3.OpGet(key, opts...) + for { + r, err := kv.KV.Do(ctx, op) + if err != nil { + return nil, err + } + resp := r.Get() + if resp.Header.Revision == prevRev { + return resp, nil + } else if resp.Header.Revision > prevRev { + kv.setPrevRev(resp.Header.Revision) + return resp, nil + } + err = kv.orderViolationFunc(op, r, prevRev) + if err != nil { + return nil, err + } + } +} + +func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn { + return &txnOrdering{ + kv.KV.Txn(ctx), + kv, + ctx, + sync.Mutex{}, + []clientv3.Cmp{}, + []clientv3.Op{}, + []clientv3.Op{}, + } +} + +// txnOrdering ensures that serialized requests do not return +// txn responses with revisions less than the previous +// returned revision. +type txnOrdering struct { + clientv3.Txn + *kvOrdering + ctx context.Context + mu sync.Mutex + cmps []clientv3.Cmp + thenOps []clientv3.Op + elseOps []clientv3.Op +} + +func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + txn.cmps = cs + txn.Txn.If(cs...) + return txn +} + +func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + txn.thenOps = ops + txn.Txn.Then(ops...) + return txn +} + +func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + txn.elseOps = ops + txn.Txn.Else(ops...) + return txn +} + +func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) { + // prevRev is stored in a local variable in order to record the prevRev + // at the beginning of the Commit operation, because concurrent + // access to txnOrdering could change the prevRev field in the + // middle of the Commit operation. + prevRev := txn.getPrevRev() + opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps) + for { + opResp, err := txn.KV.Do(txn.ctx, opTxn) + if err != nil { + return nil, err + } + txnResp := opResp.Txn() + if txnResp.Header.Revision >= prevRev { + txn.setPrevRev(txnResp.Header.Revision) + return txnResp, nil + } + err = txn.orderViolationFunc(opTxn, opResp, prevRev) + if err != nil { + return nil, err + } + } +} diff --git a/client_sdk/v3/ordering/util.go b/client_sdk/v3/ordering/util.go new file mode 100644 index 00000000000..ce7f3deac44 --- /dev/null +++ b/client_sdk/v3/ordering/util.go @@ -0,0 +1,42 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ordering + +import ( + "errors" + "sync/atomic" + + "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error + +var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision") + +func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc { + violationCount := int32(0) + return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error { + // Each request is assigned by round-robin load-balancer's picker to a different + // endpoints. If we cycled them 5 times (even with some level of concurrency), + // with high probability no endpoint points on a member with fresh data. + // TODO: Ideally we should track members (resp.opp.Header) that returned + // stale result and explicitly temporarily disable them in 'picker'. + if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) { + return ErrNoGreaterRev + } + atomic.AddInt32(&violationCount, 1) + return nil + } +} diff --git a/client/v3/retry.go b/client_sdk/v3/retry.go similarity index 98% rename from client/v3/retry.go rename to client_sdk/v3/retry.go index 69ecc631471..6f0616a4952 100644 --- a/client/v3/retry.go +++ b/client_sdk/v3/retry.go @@ -17,8 +17,8 @@ package clientv3 import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -46,14 +46,14 @@ func (rp retryPolicy) String() string { // isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. // // immutable requests (e.g. Get) should be retried unless it's -// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). +// an obvious etcd-side error (e.g. rpctypes.ErrRequestTooLarge). // // Returning "false" means retry should stop, since client cannot // handle itself even with retries. func isSafeRetryImmutableRPC(err error) bool { eErr := rpctypes.Error(err) if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { - // interrupted by non-transient server-side or gRPC-side error + // interrupted by non-transient etcd-side or gRPC-side error // client cannot handle itself (e.g. rpctypes.ErrCompacted) return false } @@ -101,6 +101,7 @@ func RetryKVClient(c *Client) pb.KVClient { kc: pb.NewKVClient(c.conn), } } + func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) } diff --git a/client/v3/retry_interceptor.go b/client_sdk/v3/retry_interceptor.go similarity index 79% rename from client/v3/retry_interceptor.go rename to client_sdk/v3/retry_interceptor.go index 6f15a9c9739..508f77c4626 100644 --- a/client/v3/retry_interceptor.go +++ b/client_sdk/v3/retry_interceptor.go @@ -19,24 +19,23 @@ package clientv3 import ( "context" - "errors" + "fmt" "io" "sync" "time" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "github.com/ls-2018/etcd_cn/code_debug/conf" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" ) -// unaryClientInterceptor returns a new retrying unary client interceptor. -// -// The default configuration of the interceptor is to not retry *at all*. This behaviour can be -// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { @@ -52,23 +51,23 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { return err } - c.GetLogger().Debug( - "retrying of unary invoker", - zap.String("target", cc.Target()), - zap.String("method", method), - zap.Uint("attempt", attempt), - ) + c.GetLogger().Debug("重试调用", zap.String("target", cc.Target()), zap.Uint("attempt", attempt)) + if !conf.Perf { + switch v := req.(type) { + case *pb.TxnRequest: + d, _ := v.Marshal() + fmt.Println("--->:", string(d)) // key:"a" value:"b" + default: + fmt.Println("--->:", req) // key:"a" value:"b" + } + fmt.Println("--->:", method) // /etcdserverpb.KV/Put + } + lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) if lastErr == nil { return nil } - c.GetLogger().Warn( - "retrying of unary invoker failed", - zap.String("target", cc.Target()), - zap.String("method", method), - zap.Uint("attempt", attempt), - zap.Error(lastErr), - ) + c.GetLogger().Warn("重试一元调用失败", zap.String("target", cc.Target()), zap.Uint("attempt", attempt), zap.Error(lastErr)) if isContextError(lastErr) { if ctx.Err() != nil { // its the context deadline or cancellation. @@ -78,18 +77,24 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien continue } if c.shouldRefreshToken(lastErr, callOpts) { - gtErr := c.refreshToken(ctx) - if gtErr != nil { + // clear auth token before refreshing it. + // call c.Auth.Authenticate with an invalid token will always fail the auth check on the etcd-side, + // if the etcd has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165) + // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource. + c.authTokenBundle.UpdateAuthToken("") + + gterr := c.getToken(ctx) + if gterr != nil { c.GetLogger().Warn( "retrying of unary invoker failed to fetch new auth token", zap.String("target", cc.Target()), - zap.Error(gtErr), + zap.Error(gterr), ) - return gtErr // lastErr must be invalid auth token + return gterr // lastErr必须是invalid auth token } continue } - if !isSafeRetry(c, lastErr, callOpts) { + if !isSafeRetry(c.lg, lastErr, callOpts) { return lastErr } } @@ -97,7 +102,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien } } -// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// streamClientInterceptor returns a new retrying stream client interceptor for etcd side streaming calls. // // The default configuration of the interceptor is to not retry *at all*. This behaviour can be // changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). @@ -109,12 +114,15 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { ctx = withVersion(ctx) - // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired - // (see https://github.com/etcd-io/etcd/issues/11954 for more). - err := c.getToken(ctx) - if err != nil { - c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) - return nil, err + // getToken automatically + // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged. + if c.authTokenBundle != nil { + // equal to c.Username != "" && c.Password != "" + err := c.getToken(ctx) + if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled { + c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err)) + return nil, err + } } grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) @@ -147,7 +155,7 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli // and returns a boolean value. func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { if rpctypes.Error(err) == rpctypes.ErrUserEmpty { - // refresh the token when username, password is present but the server returns ErrUserEmpty + // refresh the token when username, password is present but the etcd returns ErrUserEmpty // which is possible when the client token is cleared somehow return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != "" } @@ -156,23 +164,6 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool { (rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision) } -func (c *Client) refreshToken(ctx context.Context) error { - if c.authTokenBundle == nil { - // c.authTokenBundle will be initialized only when - // c.Username != "" && c.Password != "". - // - // When users use the TLS CommonName based authentication, the - // authTokenBundle is always nil. But it's possible for the clients - // to get `rpctypes.ErrAuthOldRevision` response when the clients - // concurrently modify auth data (e.g, addUser, deleteUser etc.). - // In this case, there is no need to refresh the token; instead the - // clients just need to retry the operations (e.g. Put, Delete etc). - return nil - } - - return c.getToken(ctx) -} - // type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a // proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish // a new ClientStream according to the retry policy. @@ -271,15 +262,18 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } if s.client.shouldRefreshToken(err, s.callOpts) { - gtErr := s.client.refreshToken(s.ctx) - if gtErr != nil { - s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr)) + // clear auth token to avoid failure when call getToken + s.client.authTokenBundle.UpdateAuthToken("") + + gterr := s.client.getToken(s.ctx) + if gterr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) return false, err // return the original error for simplicity } return true, err } - return isSafeRetry(s.client, err, s.callOpts), err + return isSafeRetry(s.client.lg, err, s.callOpts), err } func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { @@ -319,28 +313,17 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro } // isSafeRetry returns "true", if request is safe for retry with the given error. -func isSafeRetry(c *Client, err error, callOpts *options) bool { +func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { if isContextError(err) { return false } - - // Situation when learner refuses RPC it is supposed to not serve is from the server - // perspective not retryable. - // But for backward-compatibility reasons we need to support situation that - // customer provides mix of learners (not yet voters) and voters with an - // expectation to pick voter in the next attempt. - // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability. - if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 { - return true - } - switch callOpts.retryPolicy { case repeatable: return isSafeRetryImmutableRPC(err) case nonRepeatable: return isSafeRetryMutableRPC(err) default: - c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) return false } } @@ -360,14 +343,12 @@ func contextErrToGrpcErr(err error) error { } } -var ( - defaultOptions = &options{ - retryPolicy: nonRepeatable, - max: 0, // disable - backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), - retryAuth: true, - } -) +var defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, +} // backoffFunc denotes a family of functions that control the backoff duration between call retries. // @@ -391,7 +372,7 @@ func withMax(maxRetries uint) retryOption { }} } -// WithBackoff sets the `BackoffFunc` used to control time between retries. +// WithBackoff sets the `BackoffFunc `used to control time between retries. func withBackoff(bf backoffFunc) retryOption { return retryOption{applyFunc: func(o *options) { o.backoffFunc = bf diff --git a/client/v3/snapshot/doc.go b/client_sdk/v3/snapshot/doc.go similarity index 100% rename from client/v3/snapshot/doc.go rename to client_sdk/v3/snapshot/doc.go diff --git a/client_sdk/v3/snapshot/v3_snapshot.go b/client_sdk/v3/snapshot/v3_snapshot.go new file mode 100644 index 00000000000..533fb511439 --- /dev/null +++ b/client_sdk/v3/snapshot/v3_snapshot.go @@ -0,0 +1,98 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snapshot + +import ( + "context" + "crypto/sha256" + "fmt" + "io" + "os" + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/dustin/go-humanize" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "go.uber.org/zap" +) + +// hasChecksum returns "true" if the file size "n" +// has appended sha256 hash digest. +func hasChecksum(n int64) bool { + // 512 is chosen because it's a minimum disk sector size + // smaller than (and multiplies to) OS page size in most systems + return (n % 512) == sha256.Size +} + +// Save 从远程etcd获取快照并将数据保存到目标路径.如果上下文 "ctx "被取消或超时, +// 快照保存流将出错(例如,context.Canceled,context.DeadlineExceeded). +// 请确保在客户端配置中只指定一个端点.必须向选定的节点请求快照API,而保存的快照是选定节点的时间点状态. +func Save(ctx context.Context, lg *zap.Logger, cfg clientv3.Config, dbPath string) error { + if lg == nil { + lg = zap.NewExample() + } + cfg.Logger = lg.Named("client") + if len(cfg.Endpoints) != 1 { + return fmt.Errorf("保存快照时,必须指定一个endpoint %v", cfg.Endpoints) + } + cli, err := clientv3.New(cfg) + if err != nil { + return err + } + defer cli.Close() + + partpath := dbPath + ".part" + defer os.RemoveAll(partpath) + + var f *os.File + f, err = os.OpenFile(partpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileutil.PrivateFileMode) + if err != nil { + return fmt.Errorf("不能打开 %s (%v)", partpath, err) + } + lg.Info("创建临时快照文件", zap.String("path", partpath)) + + now := time.Now() + var rd io.ReadCloser + rd, err = cli.Snapshot(ctx) + if err != nil { + return err + } + lg.Info("获取快照ing", zap.String("endpoint", cfg.Endpoints[0])) + var size int64 + size, err = io.Copy(f, rd) + if err != nil { + return err + } + if !hasChecksum(size) { + return fmt.Errorf("sha256校验和为发现 [bytes: %d]", size) + } + if err = fileutil.Fsync(f); err != nil { + return err + } + if err = f.Close(); err != nil { + return err + } + lg.Info("已获取快照数据", zap.String("endpoint", cfg.Endpoints[0]), + zap.String("size", humanize.Bytes(uint64(size))), + zap.String("took", humanize.Time(now)), + ) + + if err = os.Rename(partpath, dbPath); err != nil { + return fmt.Errorf("重命名失败 %s to %s (%v)", partpath, dbPath, err) + } + lg.Info("已保存", zap.String("path", dbPath)) + return nil +} diff --git a/client/v3/sort.go b/client_sdk/v3/sort.go similarity index 95% rename from client/v3/sort.go rename to client_sdk/v3/sort.go index 2bb9d9a13b7..9918ea927fe 100644 --- a/client/v3/sort.go +++ b/client_sdk/v3/sort.go @@ -14,8 +14,10 @@ package clientv3 -type SortTarget int -type SortOrder int +type ( + SortTarget int + SortOrder int +) const ( SortNone SortOrder = iota diff --git a/client_sdk/v3/txn.go b/client_sdk/v3/txn.go new file mode 100644 index 00000000000..156b5b91435 --- /dev/null +++ b/client_sdk/v3/txn.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + If(cs ...Cmp) Txn + Then(ops ...Op) Txn + Else(ops ...Op) Txn + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/client_sdk/v3/utils.go b/client_sdk/v3/utils.go new file mode 100644 index 00000000000..b998c41b90f --- /dev/null +++ b/client_sdk/v3/utils.go @@ -0,0 +1,49 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math/rand" + "reflect" + "runtime" + "strings" + "time" +) + +// jitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +// +// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// Check if the provided function is being called in the op options. +func isOpFuncCalled(op string, opts []OpOption) bool { + for _, opt := range opts { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Func { + if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil { + if strings.Contains(opFunc.Name(), op) { + return true + } + } + } + } + return false +} diff --git a/client_sdk/v3/watch.go b/client_sdk/v3/watch.go new file mode 100644 index 00000000000..5daf7a6eb66 --- /dev/null +++ b/client_sdk/v3/watch.go @@ -0,0 +1,964 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + v3rpc "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + // RequestProgress requests a progress notify response be sent in all watch channels. + RequestProgress(ctx context.Context) error + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + CreatedRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +type watcher struct { + remote pb.WatchClient // 可以与后端通信的客户端 + callOpts []grpc.CallOption // + mu sync.Mutex // + streams map[string]*watchGrpcStream // 持有CTX 键值对的所有活动的GRPC流. + lg *zap.Logger // +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + ctx context.Context // remote.Watch requests + ctxKey string // ctxKey 用来找流的上下文信息 + cancel context.CancelFunc + substreams map[int64]*watcherStream // 持有此 grpc 流上的所有活动的watchers + resuming []*watcherStream // 恢复保存此 grpc 流上的所有正在恢复的观察者 + reqc chan watchStreamRequest // reqc 从 Watch() 向主协程发送观察请求 + respc chan *pb.WatchResponse // respc 从 watch 客户端接收数据 + donec chan struct{} // donec 通知广播进行退出 + errc chan error + closingc chan *watcherStream // 获取关闭观察者的观察者流 + wg sync.WaitGroup // 当所有子流 goroutine 都退出时,wg 完成 + resumec chan struct{} // resumec 关闭以表示所有子流都应开始恢复 + closeErr error // closeErr 是关闭监视流的错误 + lg *zap.Logger +} + +// watchStreamRequest is a union of the supported watch request operation types +type watchStreamRequest interface { + toPB() *pb.WatchRequest +} + +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + createdNotify bool // 如果该字段为true,则发送创建的通知事件 + progressNotify bool // 进度更新 + fragment bool // 是否切分响应,当数据较大时 + filters []pb.WatchCreateRequest_FilterType + prevKV bool + retc chan chan WatchResponse +} + +// progressRequest is issued by the subscriber to request watch progress +type progressRequest struct{} + +// watcherStream 代表注册的观察者 +// watch()时,构造watchgrpcstream时构造的watcherStream,用于封装一个watch rpc请求,包含订阅监听key,通知key变更通道,一些重要标志. +type watcherStream struct { + initReq watchRequest // initReq 是发起这个请求的请求 + outc chan WatchResponse // outc 向订阅者发布watch响应 + recvc chan *WatchResponse // recvc buffers watch responses before publishing + donec chan struct{} // 当 watcherStream goroutine 停止时 donec 关闭 + closing bool // 当应该安排流关闭时,closures 设置为 true. + id int64 // id 是在 grpc 流上注册的 watch id + buf []*WatchResponse // buf 保存从 etcd 收到但尚未被客户端消费的所有事件 +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +// NewWatchFromWatchClient watch客户端,已经建立链接 +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + w.lg = c.lg + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +// 与后端建立流 gRPC调用,请求放入serverWatchStream.recvLoop() +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan watchStreamRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + lg: w.lg, + } + go wgs.run() + return wgs +} + +// Watch 提交watch请求,等待返回响应 +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) // 检查watch请求 + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: ow.key, + end: ow.end, + rev: ow.rev, + progressNotify: ow.progressNotify, + fragment: ow.fragment, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) // map[hasleader:[true]] + + var closeCh chan WatchResponse + for { + // 找到或分配适当的GRPC表流 链接复用 + w.mu.Lock() + if w.streams == nil { // 初始化结构体 + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + // streams是一个map,保存所有由 ctx 值键控的活动 grpc 流 + // 如果该请求对应的流为空,则新建 + wgs := w.streams[ctxKey] + if wgs == nil { + // newWatcherGrpcStream new一个watch grpc stream来传输watch请求 + // 创建goroutine来处理监听key的watch各种事件 + wgs = w.newWatcherGrpcStream(ctx) // 客户端返回watch流 + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + if closeCh == nil { + closeCh = make(chan WatchResponse, 1) + } + + // submit request + select { + case reqc <- wr: // reqc 从 Watch() 向主协程发送观察请求 + ok = true + case <-wr.ctx.Done(): + ok = false + case <-donec: + ok = false + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + continue + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + continue + } + } + break + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + // Consider context.Canceled as a successful close + if err == context.Canceled { + err = nil + } + return err +} + +// RequestProgress requests a progress notify response be sent in all watch channels. +func (w *watcher) RequestProgress(ctx context.Context) (err error) { + ctxKey := streamKeyFromCtx(ctx) + + w.mu.Lock() + if w.streams == nil { + w.mu.Unlock() + return fmt.Errorf("no stream found for context") + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) // 客户端建立watch流 + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + pr := &progressRequest{} + + select { + case reqc <- pr: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-donec: + if wgs.closeErr != nil { + return wgs.closeErr + } + // retry; may have dropped stream from no ctxs + return w.RequestProgress(ctx) + } +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + // check watch ID for backward compatibility (<= v3.3) + if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run 用于管理watch client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // 子流标记为关闭,但goroutine仍在运行;需要避免双关闭recvc在GRPC流拆卸 + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // 关闭子流并恢复子流 + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // 与etcd开启grpc流 + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + var cur *pb.WatchResponse + for { + select { + // Watch() requested + case req := <-w.reqc: + switch wreq := req.(type) { + case *watchRequest: + outc := make(chan WatchResponse, 1) + // TODO: pass custom watch ID? + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + case *progressRequest: + if err := wc.Send(wreq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + + case pbresp := <-w.respc: // 来自watch client的新事件 + if cur == nil || pbresp.Created || pbresp.Canceled { + cur = pbresp + } else if cur != nil && cur.WatchId == pbresp.WatchId { + // 合并新事件 + cur.Events = append(cur.Events, pbresp.Events...) + // update "Fragment" field; last response with "Fragment" == false + cur.Fragment = pbresp.Fragment + } + + switch { + case pbresp.Created: // 表示是创建的请求 + // response to head of queue creation + if len(w.resuming) != 0 { + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + } + + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + + // reset for next iteration + cur = nil + + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + + // reset for next iteration + cur = nil + + case cur.Fragment: // 因为是流的方式传输,所以支持分片传输,遇到分片事件直接跳过 + continue + + default: + // dispatch to appropriate watch stream + ok := w.dispatchEvent(cur) + + // reset for next iteration + cur = nil + + if ok { + break + } + + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{WatchRequest_CancelRequest: cr} + w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId)) + if err := wc.Send(req); err != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err)) + } + } + + // 查看client Recv失败.如果可能,生成另一个,重新尝试发送watch请求 + // 证明发送watch请求失败,会创建watch client再次尝试发送 + + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + if err := wc.Send(ws.initReq.toPB()); err != nil { + w.lg.Debug("error when sending request", zap.Error(err)) + } + } + cancelSet = make(map[int64]struct{}) + + case <-w.ctx.Done(): + return + + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + // no more watchers on this stream, shutdown, skip cancellation + if len(w.substreams)+len(w.resuming) == 0 { + return + } + if ws.id != -1 { + // client is closing an established watch; close it on the etcd proactively instead of waiting + // to close when the next message arrives + cancelSet[ws.id] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: ws.id, + }, + } + req := &pb.WatchRequest{ + WatchRequest_CancelRequest: cr, + } + w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id)) + if err := wc.Send(req); err != nil { + w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err)) + } + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + // TODO: return watch ID? + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // indicate they should be broadcast. + if wr.IsProgressNotify() && pbresp.WatchId == -1 { + return w.broadcastResponse(wr) + } + + return w.unicastResponse(wr, pbresp.WatchId) +} + +// broadcastResponse send a watch response to all watch substreams. +func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { + for _, ws := range w.substreams { + select { + case ws.recvc <- wr: + case <-ws.donec: + } + } + return true +} + +// unicastResponse sends a watch response to a specific watch substream. +func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { + ws, ok := w.substreams[watchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // 将所有子流标记为恢复 + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams 等待所有sub stream完成 +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +var maxBackoff = 100 * time.Millisecond + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + backoff := time.Millisecond + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: string([]byte(wr.key)), + RangeEnd: string([]byte(wr.end)), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + Fragment: wr.fragment, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{WatchRequest_CreateRequest: cr} +} + +// toPB converts an internal progress request structure to its protobuf WatchRequest structure. +func (pr *progressRequest) toPB() *pb.WatchRequest { + req := &pb.WatchProgressRequest{} + cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} + return &pb.WatchRequest{WatchRequest_ProgressRequest: cr} +} + +// 将ctx转换成str +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/client_sdk/v3/yaml/config.go b/client_sdk/v3/yaml/config.go new file mode 100644 index 00000000000..480d00da408 --- /dev/null +++ b/client_sdk/v3/yaml/config.go @@ -0,0 +1,91 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml handles yaml-formatted clientv3 configuration data. +package yaml + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + + "sigs.k8s.io/yaml" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil" + "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +type yamlConfig struct { + clientv3.Config + + InsecureTransport bool `json:"insecure-transport"` + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` + Certfile string `json:"cert-file"` + Keyfile string `json:"key-file"` + TrustedCAfile string `json:"trusted-ca-file"` + + // CAfile is being deprecated. Use 'TrustedCAfile' instead. + // TODO: deprecate this in v4 + CAfile string `json:"ca-file"` +} + +// NewConfig creates a new clientv3.Config from a yaml file. +func NewConfig(fpath string) (*clientv3.Config, error) { + b, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + yc := &yamlConfig{} + + err = yaml.Unmarshal(b, yc) + if err != nil { + return nil, err + } + + if yc.InsecureTransport { + return &yc.Config, nil + } + + var ( + cert *tls.Certificate + cp *x509.CertPool + ) + + if yc.Certfile != "" && yc.Keyfile != "" { + cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) + if err != nil { + return nil, err + } + } + + if yc.TrustedCAfile != "" { + cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile}) + if err != nil { + return nil, err + } + } + + tlscfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: yc.InsecureSkipTLSVerify, + RootCAs: cp, + } + if cert != nil { + tlscfg.Certificates = []tls.Certificate{*cert} + } + yc.Config.TLS = tlscfg + + return &yc.Config, nil +} diff --git a/code-of-conduct.md b/code-of-conduct.md deleted file mode 100644 index d79cc5488d3..00000000000 --- a/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -## etcd Community Code of Conduct - -etcd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/code_debug/conf/conf.go b/code_debug/conf/conf.go new file mode 100644 index 00000000000..7263e271ff8 --- /dev/null +++ b/code_debug/conf/conf.go @@ -0,0 +1,3 @@ +package conf + +var Perf = false diff --git a/code_debug/conn/addr.go b/code_debug/conn/addr.go new file mode 100644 index 00000000000..12be0d43587 --- /dev/null +++ b/code_debug/conn/addr.go @@ -0,0 +1,72 @@ +package conn + +import ( + "fmt" + "net" + "os/exec" + "strings" + + "github.com/ls-2018/etcd_cn/code_debug/conf" +) + +func PrintConn(line string, c net.Conn) { + if conf.Perf { + return + } + + _, port, _ := net.SplitHostPort(c.RemoteAddr().String()) + res := SubCommand([]string{"zsh", "-c", fmt.Sprintf("lsof -itcp -n|grep '%s->'", port)}) + if res == "" { + return + } + pid := SubCommand([]string{"zsh", "-c", fmt.Sprintf("echo '%s'| awk '{print $2}'", res)}) + if pid == "" { + return + } + commandRes := SubCommand([]string{"zsh", "-c", fmt.Sprintf("ps -ef|grep -v grep|grep -v zsh |grep '%s'", pid)}) + if commandRes == "" { + return + } + command := SubCommand([]string{"zsh", "-c", fmt.Sprintf("echo '%s'| awk '{$1=$2=$3=$4=$5=$6=$7=\"\"; print $0}'", commandRes)}) + if command == "" { + return + } + pr := fmt.Sprintf("%s RemoteAddr:%s-->localAddr:%s [%s]", line, c.RemoteAddr().String(), c.LocalAddr().String(), strings.Trim(command, " \n")) + Green(pr) +} + +func SubCommand(opt []string) (result string) { + cmd := exec.Command(opt[0], opt[1:]...) + // 命令的错误输出和标准输出都连接到同一个管道 + stdout, err := cmd.StdoutPipe() + cmd.Stderr = cmd.Stdout + if err != nil { + panic(err) + } + if err = cmd.Start(); err != nil { + panic(err) + } + for { + tmp := make([]byte, 1024) + _, err := stdout.Read(tmp) + res := strings.Split(string(tmp), "\n") + for _, v := range res { + if len(v) > 0 && v[0] != '\u0000' { + // fmt.Println(v) + result += v + // log.Debug(v) + } + } + if err != nil { + break + } + } + return result + //if err = cmd.Wait(); err != nil { + // log.Fatal(err) + //} +} + +func Green(pr string) { + fmt.Println(fmt.Sprintf("\033[1;32;42m %s \033[0m", pr)) +} diff --git a/code_debug/db/main.go b/code_debug/db/main.go new file mode 100644 index 00000000000..16685d8f6a0 --- /dev/null +++ b/code_debug/db/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "log" + + bolt "go.etcd.io/bbolt" +) + +func main() { + f() +} + +func f() error { + // 打开boltdb文件,获取db对象 + db, err := bolt.Open("db", 0o600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + // 参数true表示创建一个写事务,false读事务 + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + // 使用事务对象创建key bucket + b, err := tx.CreateBucketIfNotExists([]byte("key")) + if err != nil { + return err + } + // 使用bucket对象更新一个key + if err := b.Put([]byte("r94"), []byte("world")); err != nil { + return err + } + // 提交事务 + if err := tx.Commit(); err != nil { + return err + } + return nil +} diff --git a/code_debug/host/host.go b/code_debug/host/host.go new file mode 100644 index 00000000000..ec4c8f292dc --- /dev/null +++ b/code_debug/host/host.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + "net" + + "go.uber.org/zap" +) + +func main() { + fmt.Println(net.ParseIP("www.baidu.com")) + fmt.Println(net.ParseIP("127.168.1.2")) + // + // 127.168.1.2 + zap.NewNop().Debug("asdasdasdasd") + zap.NewNop().Fatal("asdasdasdasd") +} diff --git a/code_debug/init.sh b/code_debug/init.sh new file mode 100644 index 00000000000..fbe28809807 --- /dev/null +++ b/code_debug/init.sh @@ -0,0 +1,7 @@ +#!/bin/bash +etcdctl put a 1 +etcdctl put b 1 +etcdctl put b1 1 +etcdctl put b2 1 +etcdctl put b3 1 +etcdctl put c 1 diff --git a/code_debug/main.go b/code_debug/main.go new file mode 100644 index 00000000000..860f8c9ef6b --- /dev/null +++ b/code_debug/main.go @@ -0,0 +1,104 @@ +package main + +import ( + "crypto/md5" + "encoding/json" + "fmt" + math_bits "math/bits" + "net" + "sort" + "strconv" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +func mai2n() { + fmt.Println(net.ParseIP("http://127.0.0.1:8080")) + fmt.Println(net.ParseIP("127.0.0.1:8080")) + fmt.Println(net.ParseIP("www.baidu.com")) + fmt.Println(strconv.Atoi("12h")) + fmt.Println(strconv.FormatUint(uint64(123456), 16)) + var ch chan int + ch = nil + select { + case a := <-ch: + fmt.Println("<-ch", a) + default: + + } + var x uint64 + x = 1 + fmt.Println((math_bits.Len64(x|1) + 6) / 7) + fmt.Println("over") + hash := md5.New() + hash.Write([]byte("hello")) + fmt.Println(fmt.Sprintf("%x", hash.Sum(nil))) + + a := `{"header":{"ID":7587861231285799685},"put":{"key":"YQ==","value":"Yg=="}}` + b := `{"ID":7587861231285799684,"Method":"PUT","Path":"/0/version","Val":"3.5.0","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false}` + fmt.Println(json.Unmarshal([]byte(a), &etcdserverpb.InternalRaftRequest{})) + fmt.Println(json.Unmarshal([]byte(b), &etcdserverpb.InternalRaftRequest{})) + var c time.Time + fmt.Println(c.IsZero()) + var d JointConfig + fmt.Println(d[1]["a"]) +} + +type ( + Config map[string]string + JointConfig [2]Config +) + +func main3() { + fmt.Println(strings.Compare("a", "b")) + fmt.Println(strings.Compare("a", "a")) + fmt.Println(strings.Compare("b", "ab")) + a := []*A{ + {Key: "a"}, + {Key: "b"}, + {Key: "c"}, + {Key: "d"}, + } + sort.Sort(permSlice(a)) + for _, i := range a { + fmt.Println(i.Key) + } + + // 在已有的权限中, + idx := sort.Search(len(a), func(i int) bool { + // a,a 0 + // a b -1 + // b a 1 + // a,b,c,d,e + // c + return strings.Compare(a[i].Key, "gc") >= 0 + }) + fmt.Println(idx) +} + +type A struct { + Key string +} + +type permSlice []*A + +func (perms permSlice) Len() int { + return len(perms) +} + +func (perms permSlice) Less(i, j int) bool { + // a,a 0 + // a b -1 + // b a 1 + + return strings.Compare(perms[i].Key, perms[j].Key) < 0 +} + +func (perms permSlice) Swap(i, j int) { + perms[i], perms[j] = perms[j], perms[i] +} + +func main() { +} diff --git a/code_debug/txn/main.go b/code_debug/txn/main.go new file mode 100644 index 00000000000..d301ef0bb2f --- /dev/null +++ b/code_debug/txn/main.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + "fmt" + "log" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" +) + +func main() { + endpoints := []string{"127.0.0.1:2379"} + cli, err := clientv3.New(clientv3.Config{Endpoints: endpoints}) + if err != nil { + log.Fatal(err) + } + defer cli.Close() + + // 创建租约 + lease := clientv3.NewLease(cli) + + // 设置租约时间 + leaseResp, err := lease.Grant(context.TODO(), 10) // 秒 + if err != nil { + fmt.Printf("设置租约时间失败:%s\n", err.Error()) + } + _, err = cli.Put(context.Background(), "a", "x", clientv3.WithLease(leaseResp.ID)) + if err != nil { + log.Fatal(err) + } + resp, err := cli.Txn(context.TODO()).If( + clientv3.Compare(clientv3.LeaseValue("a"), "=", leaseResp.ID), + ).Then( + clientv3.OpPut("b", "v30"), + ).Commit() + if err != nil { + log.Fatal(err) + } + + if err != nil { + log.Fatal(err) + } + for _, rp := range resp.Responses { + res := rp.GetResponseRange() + if res == nil { + continue + } + for _, ev := range res.Kvs { + fmt.Printf("%s -> %s, create revision = %d\n", + ev.Key, + ev.Value, + ev.CreateRevision) + } + } +} diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 5dfc7b9b934..00000000000 --- a/codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -codecov: - token: "6040de41-c073-4d6f-bbf8-d89256ef31e1" - disable_default_path_fixes: true - -fixes: - - "go.etcd.io/etcd/api/v3/::api/" - - "go.etcd.io/etcd/client/v3/::client/v3/" - - "go.etcd.io/etcd/client/v2/::client/v2/" - - "go.etcd.io/etcd/etcdctl/v3/::etcdctl/" - - "go.etcd.io/etcd/pkg/v3/::pkg/" - - "go.etcd.io/etcd/server/v3/::server/" - -ignore: - - "**/*.pb.go" - - "**/*.pb.gw.go" - - "tests/**/*" - - "go.etcd.io/etcd/tests/**/*" diff --git a/config.ini b/config.ini new file mode 100644 index 00000000000..e27c543aaea --- /dev/null +++ b/config.ini @@ -0,0 +1,17 @@ +# List of all opts: +# port +# debug +# offset +# should_check_version +# open_browser +# open_neat_window + +# Port for website +port=:500 +debug=false +# number of records on a single screen +offset=100 +should_check_version=true +open_browser=true +# has effect only if 'open browser' is true +open_neat_window=true diff --git a/contrib/README.md b/contrib/README.md deleted file mode 100644 index 33af884c1f1..00000000000 --- a/contrib/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## Contrib - -Scripts and files which may be useful but aren't part of the core etcd project. - -* [systemd](systemd) - an example unit file for deploying etcd on systemd-based distributions -* [raftexample](raftexample) - an example distributed key-value store using raft -* [systemd/etcd3-multinode](systemd/etcd3-multinode) - multi-node cluster setup with systemd diff --git a/contrib/lock/README.md b/contrib/lock/README.md deleted file mode 100644 index d33630e25fa..00000000000 --- a/contrib/lock/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# What is this? -This directory provides an executable example of the scenarios described in [the article by Martin Kleppmann][fencing]. - -Generally speaking, a lease-based lock service cannot provide mutual exclusion to processes. This is because such a lease mechanism depends on the physical clock of both the lock service and client processes. Many factors (e.g. stop-the-world GC pause of a language runtime) can cause false expiration of a granted lease as depicted in the below figure: ![unsafe lock][unsafe-lock] - -As discussed in [notes on the usage of lock and lease][why], such a problem can be solved with a technique called version number validation or fencing tokens. With this technique a shared resource (storage in the figures) needs to validate requests from clients based on their tokens like this: ![fencing tokens][fencing-tokens] - -This directory contains two programs: `client` and `storage`. With `etcd`, you can reproduce the expired lease problem of distributed locking and a simple example solution of the validation technique which can avoid incorrect access from a client with an expired lease. - -`storage` works as a very simple key value in-memory store which is accessible through HTTP and a custom JSON protocol. `client` works as client processes which tries to write a key/value to `storage` with coordination of etcd locking. - -## How to build - -For building `client` and `storage`, just execute `go build` in each directory. - -## How to try - -At first, you need to start an etcd cluster, which works as lock service in the figures. On top of the etcd source directory, execute commands like below: -``` -$ make # build etcd -$ bin/etcd # start etcd -``` - -Then run `storage` command in `storage` directory: -``` -$ ./storage -``` - -Now client processes ("Client 1" and "Client 2" in the figures) can be started. At first, execute below command for starting a client process which corresponds to "Client 1": -``` -$ ./client 1 -``` -It will show an output like this: -``` -client 1 starts -created etcd client and session -acquired lock, version: 694d82254d5fa305 -please manually revoke the lease using 'etcdctl lease revoke 694d82254d5fa305' or wait for it to expire, then start executing client 2 and hit any key... -``` - -Verify the lease was created using: -``` -$ bin/etcdctl lease list -found 1 leases -694d82254d5fa305 -``` - -Then proceed to manually revoke the lease using: -``` -$ bin/etcdctl lease revoke 694d82254d5fa305 -lease 694d82254d5fa305 revoked -``` - -Now another client process can be started like this: -``` -$ ./client 2 -client 2 starts -created etcd client and session -acquired lock, version: 694d82254e18770a -this is client 2, continuing -``` -If things go well the second client process invoked as `./client 2` finishes soon. It successfully writes a key to `storage` process. - -After checking this, please hit any key for `./client 1` and resume the process. It will show an output like below: -``` -resuming client 1 -expected fail to write to storage with old lease version: error: given version (694d82254d5fa305) is different from the existing version (694d82254e18770a) -``` - -[fencing]: https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html -[fencing-tokens]: https://martin.kleppmann.com/2016/02/fencing-tokens.png -[unsafe-lock]: https://martin.kleppmann.com/2016/02/unsafe-lock.png -[why]: https://etcd.io/docs/next/learning/why/#notes-on-the-usage-of-lock-and-lease diff --git a/contrib/lock/client/.gitignore b/contrib/lock/client/.gitignore deleted file mode 100644 index 2a11f8b9558..00000000000 --- a/contrib/lock/client/.gitignore +++ /dev/null @@ -1 +0,0 @@ -client \ No newline at end of file diff --git a/contrib/lock/client/client.go b/contrib/lock/client/client.go deleted file mode 100644 index 066b70e51d2..00000000000 --- a/contrib/lock/client/client.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An example distributed locking with fencing in the case of etcd -// Based on https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html - -package main - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "log" - "net/http" - "os" - "strconv" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -type request struct { - Op string `json:"op"` - Key string `json:"key"` - Val string `json:"val"` - Version int64 `json:"version"` -} - -type response struct { - Val string `json:"val"` - Version int64 `json:"version"` - Err string `json:"err"` -} - -func write(key string, value string, version int64) error { - req := request{ - Op: "write", - Key: key, - Val: value, - Version: version, - } - - reqBytes, err := json.Marshal(&req) - if err != nil { - log.Fatalf("failed to marshal request: %s", err) - } - - httpResp, err := http.Post("http://localhost:8080", "application/json", bytes.NewReader(reqBytes)) - if err != nil { - log.Fatalf("failed to send a request to storage: %s", err) - } - - respBytes, err := io.ReadAll(httpResp.Body) - if err != nil { - log.Fatalf("failed to read request body: %s", err) - } - - resp := new(response) - err = json.Unmarshal(respBytes, resp) - if err != nil { - log.Fatalf("failed to unmarshal response json: %s", err) - } - - if resp.Err != "" { - return fmt.Errorf("error: %s", resp.Err) - } - - return nil -} - -func main() { - if len(os.Args) != 2 { - log.Fatalf("usage: %s <1 or 2>", os.Args[0]) - } - - mode, err := strconv.Atoi(os.Args[1]) - if err != nil || mode != 1 && mode != 2 { - log.Fatalf("mode should be 1 or 2 (given value is %s)", os.Args[1]) - } - - log.Printf("client %d starts\n", mode) - - client, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"}, - }) - if err != nil { - log.Fatalf("failed to create an etcd client: %s", err) - } - - // do a connection check first, otherwise it will hang infinitely on newSession - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - _, err = client.MemberList(ctx) - if err != nil { - log.Fatalf("failed to reach etcd: %s", err) - } - - session, err := concurrency.NewSession(client, concurrency.WithTTL(1)) - if err != nil { - log.Fatalf("failed to create a session: %s", err) - } - - log.Print("created etcd client and session") - - locker := concurrency.NewLocker(session, "/lock") - locker.Lock() - defer locker.Unlock() - version := session.Lease() - log.Printf("acquired lock, version: %x", version) - - if mode == 1 { - log.Printf("please manually revoke the lease using 'etcdctl lease revoke %x' or wait for it to expire, then start executing client 2 and hit any key...", version) - reader := bufio.NewReader(os.Stdin) - _, _ = reader.ReadByte() - log.Print("resuming client 1") - } else { - log.Print("this is client 2, continuing\n") - } - - err = write("key0", fmt.Sprintf("value from client %x", mode), int64(version)) - if err != nil { - if mode == 1 { - log.Printf("expected fail to write to storage with old lease version: %s\n", err) // client 1 should show this message - } else { - log.Fatalf("unexpected fail to write to storage: %s\n", err) - } - } else { - log.Printf("successfully write a key to storage using lease %x\n", int64(version)) - } -} diff --git a/contrib/lock/storage/.gitignore b/contrib/lock/storage/.gitignore deleted file mode 100644 index 5d252d7c9fb..00000000000 --- a/contrib/lock/storage/.gitignore +++ /dev/null @@ -1 +0,0 @@ -storage \ No newline at end of file diff --git a/contrib/lock/storage/storage.go b/contrib/lock/storage/storage.go deleted file mode 100644 index 7e39e38f62d..00000000000 --- a/contrib/lock/storage/storage.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "strings" -) - -type value struct { - val string - version int64 -} - -var data = make(map[string]*value) - -type request struct { - Op string `json:"op"` - Key string `json:"key"` - Val string `json:"val"` - Version int64 `json:"version"` -} - -type response struct { - Val string `json:"val"` - Version int64 `json:"version"` - Err string `json:"err"` -} - -func writeResponse(resp response, w http.ResponseWriter) { - wBytes, err := json.Marshal(resp) - if err != nil { - fmt.Printf("failed to marshal json: %s\n", err) - os.Exit(1) - } - _, err = w.Write(wBytes) - if err != nil { - fmt.Printf("failed to write a response: %s\n", err) - os.Exit(1) - } -} - -func handler(w http.ResponseWriter, r *http.Request) { - rBytes, err := io.ReadAll(r.Body) - if err != nil { - fmt.Printf("failed to read http request: %s\n", err) - os.Exit(1) - } - - var req request - err = json.Unmarshal(rBytes, &req) - if err != nil { - fmt.Printf("failed to unmarshal json: %s\n", err) - os.Exit(1) - } - - if strings.Compare(req.Op, "read") == 0 { - if val, ok := data[req.Key]; ok { - writeResponse(response{val.val, val.version, ""}, w) - } else { - writeResponse(response{"", -1, "key not found"}, w) - } - } else if strings.Compare(req.Op, "write") == 0 { - if val, ok := data[req.Key]; ok { - if req.Version != val.version { - writeResponse(response{"", -1, fmt.Sprintf("given version (%x) is different from the existing version (%x)", req.Version, val.version)}, w) - } else { - data[req.Key].val = req.Val - data[req.Key].version = req.Version - writeResponse(response{req.Val, req.Version, ""}, w) - } - } else { - data[req.Key] = &value{req.Val, req.Version} - writeResponse(response{req.Val, req.Version, ""}, w) - } - } else { - fmt.Printf("unknown op: %s\n", escape(req.Op)) - return - } -} - -func escape(s string) string { - escaped := strings.ReplaceAll(s, "\n", " ") - escaped = strings.ReplaceAll(escaped, "\r", " ") - return escaped -} - -func main() { - http.HandleFunc("/", handler) - err := http.ListenAndServe(":8080", nil) - if err != nil { - fmt.Printf("failed to listen and serve: %s\n", err) - os.Exit(1) - } -} diff --git a/contrib/mixin/Makefile b/contrib/mixin/Makefile deleted file mode 100644 index 843215b00c4..00000000000 --- a/contrib/mixin/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -.PHONY: tools manifests test clean - -OS := linux -ARCH ?= amd64 -PROMETHEUS_VERSION := 2.33.1 - -tools: - go install github.com/google/go-jsonnet/cmd/jsonnet@latest - go install github.com/brancz/gojsontoyaml@latest - wget -qO- "https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}.tar.gz" |\ - tar xvz --strip-components=1 -C "$$(go env GOPATH)/bin" prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}/promtool - -manifests: manifests/etcd-prometheusRules.yaml - -manifests/etcd-prometheusRules.yaml: - mkdir -p manifests - jsonnet -e '(import "mixin.libsonnet").prometheusAlerts' | gojsontoyaml > manifests/etcd-prometheusRules.yaml - -test: manifests/etcd-prometheusRules.yaml - promtool test rules test.yaml - -clean: - rm -rf manifests/*.yaml diff --git a/contrib/mixin/README.md b/contrib/mixin/README.md deleted file mode 100644 index 2ec70004cda..00000000000 --- a/contrib/mixin/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Prometheus Monitoring Mixin for etcd - -> NOTE: This project is *alpha* stage. Flags, configuration, behaviour and design may change significantly in following releases. - -A set of customisable Prometheus alerts for etcd. - -Instructions for use are the same as the [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin). - -## Background - -* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#). - -## Testing alerts - -Make sure to have [jsonnet](https://jsonnet.org/) and [gojsontoyaml](https://github.com/brancz/gojsontoyaml) installed. You can fetch it via - -``` -make tools -``` - -First compile the mixin to a YAML file, which the promtool will read: -``` -make manifests -``` - -Then run the unit test: -``` -promtool test rules test.yaml -``` diff --git a/contrib/mixin/mixin.libsonnet b/contrib/mixin/mixin.libsonnet deleted file mode 100644 index f220eab56de..00000000000 --- a/contrib/mixin/mixin.libsonnet +++ /dev/null @@ -1,1445 +0,0 @@ -{ - _config+:: { - etcd_selector: 'job=~".*etcd.*"', - // etcd_instance_labels are the label names that are uniquely - // identifying an instance and need to be aggreated away for alerts - // that are about an etcd cluster as a whole. For example, if etcd - // instances are deployed on K8s, you will likely want to change - // this to 'instance, pod'. - etcd_instance_labels: 'instance', - // scrape_interval_seconds is the global scrape interval which can be - // used to dynamically adjust rate windows as a function of the interval. - scrape_interval_seconds: 30, - // Dashboard variable refresh option on Grafana (https://grafana.com/docs/grafana/latest/datasources/prometheus/). - // 0 : Never (Will never refresh the Dashboard variables values) - // 1 : On Dashboard Load (Will refresh Dashboards variables when dashboard are loaded) - // 2 : On Time Range Change (Will refresh Dashboards variables when time range will be changed) - dashboard_var_refresh: 2, - // clusterLabel is used to identify a cluster. - clusterLabel: 'job', - }, - - prometheusAlerts+:: { - groups+: [ - { - name: 'etcd', - rules: [ - { - alert: 'etcdMembersDown', - expr: ||| - max without (endpoint) ( - sum without (%(etcd_instance_labels)s) (up{%(etcd_selector)s} == bool 0) - or - count without (To) ( - sum without (%(etcd_instance_labels)s) (rate(etcd_network_peer_sent_failures_total{%(etcd_selector)s}[%(network_failure_range)ss])) > 0.01 - ) - ) - > 0 - ||| % { etcd_instance_labels: $._config.etcd_instance_labels, etcd_selector: $._config.etcd_selector, network_failure_range: $._config.scrape_interval_seconds * 4 }, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": members are down ({{ $value }}).' % $._config.clusterLabel, - summary: 'etcd cluster members are down.', - }, - }, - { - alert: 'etcdInsufficientMembers', - expr: ||| - sum(up{%(etcd_selector)s} == bool 1) without (%(etcd_instance_labels)s) < ((count(up{%(etcd_selector)s}) without (%(etcd_instance_labels)s) + 1) / 2) - ||| % $._config, - 'for': '3m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": insufficient members ({{ $value }}).' % $._config.clusterLabel, - summary: 'etcd cluster has insufficient number of members.', - }, - }, - { - alert: 'etcdNoLeader', - expr: ||| - etcd_server_has_leader{%(etcd_selector)s} == 0 - ||| % $._config, - 'for': '1m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": member {{ $labels.instance }} has no leader.' % $._config.clusterLabel, - summary: 'etcd cluster has no leader.', - }, - }, - { - alert: 'etcdHighNumberOfLeaderChanges', - expr: ||| - increase((max without (%(etcd_instance_labels)s) (etcd_server_leader_changes_seen_total{%(etcd_selector)s}) or 0*absent(etcd_server_leader_changes_seen_total{%(etcd_selector)s}))[15m:1m]) >= 4 - ||| % $._config, - 'for': '5m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.' % $._config.clusterLabel, - summary: 'etcd cluster has high number of leader changes.', - }, - }, - { - alert: 'etcdHighNumberOfFailedGRPCRequests', - expr: ||| - 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) - / - sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code) - > 1 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster has high number of failed grpc requests.', - }, - }, - { - alert: 'etcdHighNumberOfFailedGRPCRequests', - expr: ||| - 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) - / - sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code) - > 5 - ||| % $._config, - 'for': '5m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster has high number of failed grpc requests.', - }, - }, - { - alert: 'etcdGRPCRequestsSlow', - expr: ||| - histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{%(etcd_selector)s, grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) - > 0.15 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.' % $._config.clusterLabel, - summary: 'etcd grpc requests are slow', - }, - }, - { - alert: 'etcdMemberCommunicationSlow', - expr: ||| - histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{%(etcd_selector)s}[5m])) - > 0.15 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster member communication is slow.', - }, - }, - { - alert: 'etcdHighNumberOfFailedProposals', - expr: ||| - rate(etcd_server_proposals_failed_total{%(etcd_selector)s}[15m]) > 5 - ||| % $._config, - 'for': '15m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster has high number of proposal failures.', - }, - }, - { - alert: 'etcdHighFsyncDurations', - expr: ||| - histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m])) - > 0.5 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster 99th percentile fsync durations are too high.', - }, - }, - { - alert: 'etcdHighFsyncDurations', - expr: ||| - histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m])) - > 1 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster 99th percentile fsync durations are too high.', - }, - }, - { - alert: 'etcdHighCommitDurations', - expr: ||| - histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{%(etcd_selector)s}[5m])) - > 0.25 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel, - summary: 'etcd cluster 99th percentile commit durations are too high.', - }, - }, - { - alert: 'etcdDatabaseQuotaLowSpace', - expr: ||| - (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'critical', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' % $._config.clusterLabel, - summary: 'etcd cluster database is running full.', - }, - }, - { - alert: 'etcdExcessiveDatabaseGrowth', - expr: ||| - predict_linear(etcd_mvcc_db_total_size_in_bytes[4h], 4*60*60) > etcd_server_quota_backend_bytes - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.' % $._config.clusterLabel, - summary: 'etcd cluster database growing very fast.', - }, - }, - { - alert: 'etcdDatabaseHighFragmentationRatio', - expr: ||| - (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5 - ||| % $._config, - 'for': '10m', - labels: { - severity: 'warning', - }, - annotations: { - description: 'etcd cluster "{{ $labels.%s }}": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' % $._config.clusterLabel, - summary: 'etcd database size in use is less than 50% of the actual allocated storage.', - runbook_url: 'https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation', - }, - }, - ], - }, - ], - }, - - grafanaDashboards+:: { - 'etcd.json': { - uid: std.md5('etcd.json'), - title: 'etcd', - description: 'etcd sample Grafana dashboard with Prometheus', - tags: ['etcd-mixin'], - style: 'dark', - timezone: 'browser', - editable: true, - hideControls: false, - sharedCrosshair: false, - rows: [ - { - collapse: false, - editable: true, - height: '250px', - panels: [ - { - cacheTimeout: null, - colorBackground: false, - colorValue: false, - colors: [ - 'rgba(245, 54, 54, 0.9)', - 'rgba(237, 129, 40, 0.89)', - 'rgba(50, 172, 45, 0.97)', - ], - datasource: '$datasource', - editable: true, - 'error': false, - format: 'none', - gauge: { - maxValue: 100, - minValue: 0, - show: false, - thresholdLabels: false, - thresholdMarkers: true, - }, - id: 28, - interval: null, - isNew: true, - links: [], - mappingType: 1, - mappingTypes: [ - { - name: 'value to text', - value: 1, - }, - { - name: 'range to text', - value: 2, - }, - ], - maxDataPoints: 100, - nullPointMode: 'connected', - nullText: null, - postfix: '', - postfixFontSize: '50%', - prefix: '', - prefixFontSize: '50%', - rangeMaps: [{ - from: 'null', - text: 'N/A', - to: 'null', - }], - span: 3, - sparkline: { - fillColor: 'rgba(31, 118, 189, 0.18)', - full: false, - lineColor: 'rgb(31, 120, 193)', - show: false, - }, - targets: [{ - expr: 'sum(etcd_server_has_leader{%s="$cluster"})' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '', - metric: 'etcd_server_has_leader', - refId: 'A', - step: 20, - }], - thresholds: '', - title: 'Up', - type: 'singlestat', - valueFontSize: '200%', - valueMaps: [{ - op: '=', - text: 'N/A', - value: 'null', - }], - valueName: 'avg', - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - id: 23, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 5, - stack: false, - steppedLine: false, - targets: [ - { - expr: 'sum(rate(grpc_server_started_total{%s="$cluster",grpc_type="unary"}[$__rate_interval]))' % $._config.clusterLabel, - format: 'time_series', - intervalFactor: 2, - legendFormat: 'RPC Rate', - metric: 'grpc_server_started_total', - refId: 'A', - step: 2, - }, - { - expr: 'sum(rate(grpc_server_handled_total{%s="$cluster",grpc_type="unary",grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[$__rate_interval]))' % $._config.clusterLabel, - format: 'time_series', - intervalFactor: 2, - legendFormat: 'RPC Failed Rate', - metric: 'grpc_server_handled_total', - refId: 'B', - step: 2, - }, - ], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'RPC Rate', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'ops', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - id: 41, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 4, - stack: true, - steppedLine: false, - targets: [ - { - expr: 'sum(grpc_server_started_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})' % $._config, - intervalFactor: 2, - legendFormat: 'Watch Streams', - metric: 'grpc_server_handled_total', - refId: 'A', - step: 4, - }, - { - expr: 'sum(grpc_server_started_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})' % $._config, - intervalFactor: 2, - legendFormat: 'Lease Streams', - metric: 'grpc_server_handled_total', - refId: 'B', - step: 4, - }, - ], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Active Streams', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'short', - label: '', - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - ], - showTitle: false, - title: 'Row', - }, - { - collapse: false, - editable: true, - height: '250px', - panels: [ - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - decimals: null, - editable: true, - 'error': false, - fill: 0, - grid: {}, - id: 1, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 4, - stack: false, - steppedLine: false, - targets: [{ - expr: 'etcd_mvcc_db_total_size_in_bytes{%s="$cluster"}' % $._config.clusterLabel, - hide: false, - interval: '', - intervalFactor: 2, - legendFormat: '{{instance}} DB Size', - metric: '', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'DB Size', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'cumulative', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'bytes', - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - logBase: 1, - max: null, - min: null, - show: false, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - grid: {}, - id: 3, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 1, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 4, - stack: false, - steppedLine: true, - targets: [ - { - expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{%s="$cluster"}[$__rate_interval])) by (instance, le))' % $._config.clusterLabel, - hide: false, - intervalFactor: 2, - legendFormat: '{{instance}} WAL fsync', - metric: 'etcd_disk_wal_fsync_duration_seconds_bucket', - refId: 'A', - step: 4, - }, - { - expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{%s="$cluster"}[$__rate_interval])) by (instance, le))' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} DB fsync', - metric: 'etcd_disk_backend_commit_duration_seconds_bucket', - refId: 'B', - step: 4, - }, - ], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Disk Sync Duration', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'cumulative', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 's', - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - logBase: 1, - max: null, - min: null, - show: false, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - id: 29, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 4, - stack: false, - steppedLine: false, - targets: [{ - expr: 'process_resident_memory_bytes{%s="$cluster"}' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} Resident Memory', - metric: 'process_resident_memory_bytes', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Memory', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'bytes', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - ], - title: 'New row', - }, - { - collapse: false, - editable: true, - height: '250px', - panels: [ - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 5, - id: 22, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 3, - stack: true, - steppedLine: false, - targets: [{ - expr: 'rate(etcd_network_client_grpc_received_bytes_total{%s="$cluster"}[$__rate_interval])' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} Client Traffic In', - metric: 'etcd_network_client_grpc_received_bytes_total', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Client Traffic In', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'Bps', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 5, - id: 21, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 3, - stack: true, - steppedLine: false, - targets: [{ - expr: 'rate(etcd_network_client_grpc_sent_bytes_total{%s="$cluster"}[$__rate_interval])' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} Client Traffic Out', - metric: 'etcd_network_client_grpc_sent_bytes_total', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Client Traffic Out', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'Bps', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - id: 20, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 3, - stack: false, - steppedLine: false, - targets: [{ - expr: 'sum(rate(etcd_network_peer_received_bytes_total{%s="$cluster"}[$__rate_interval])) by (instance)' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} Peer Traffic In', - metric: 'etcd_network_peer_received_bytes_total', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Peer Traffic In', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'Bps', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - decimals: null, - editable: true, - 'error': false, - fill: 0, - grid: {}, - id: 16, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 3, - stack: false, - steppedLine: false, - targets: [{ - expr: 'sum(rate(etcd_network_peer_sent_bytes_total{%s="$cluster"}[$__rate_interval])) by (instance)' % $._config.clusterLabel, - hide: false, - interval: '', - intervalFactor: 2, - legendFormat: '{{instance}} Peer Traffic Out', - metric: 'etcd_network_peer_sent_bytes_total', - refId: 'A', - step: 4, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Peer Traffic Out', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'cumulative', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'Bps', - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - ], - title: 'New row', - }, - { - collapse: false, - editable: true, - height: '250px', - panels: [ - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - editable: true, - 'error': false, - fill: 0, - id: 40, - isNew: true, - legend: { - avg: false, - current: false, - max: false, - min: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 6, - stack: false, - steppedLine: false, - targets: [ - { - expr: 'sum(rate(etcd_server_proposals_failed_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: 'Proposal Failure Rate', - metric: 'etcd_server_proposals_failed_total', - refId: 'A', - step: 2, - }, - { - expr: 'sum(etcd_server_proposals_pending{%s="$cluster"})' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: 'Proposal Pending Total', - metric: 'etcd_server_proposals_pending', - refId: 'B', - step: 2, - }, - { - expr: 'sum(rate(etcd_server_proposals_committed_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: 'Proposal Commit Rate', - metric: 'etcd_server_proposals_committed_total', - refId: 'C', - step: 2, - }, - { - expr: 'sum(rate(etcd_server_proposals_applied_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: 'Proposal Apply Rate', - refId: 'D', - step: 2, - }, - ], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Raft Proposals', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'short', - label: '', - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - datasource: '$datasource', - decimals: 0, - editable: true, - 'error': false, - fill: 0, - id: 19, - isNew: true, - legend: { - alignAsTable: false, - avg: false, - current: false, - max: false, - min: false, - rightSide: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - percentage: false, - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - span: 6, - stack: false, - steppedLine: false, - targets: [{ - expr: 'changes(etcd_server_leader_changes_seen_total{%s="$cluster"}[1d])' % $._config.clusterLabel, - intervalFactor: 2, - legendFormat: '{{instance}} Total Leader Elections Per Day', - metric: 'etcd_server_leader_changes_seen_total', - refId: 'A', - step: 2, - }], - thresholds: [], - timeFrom: null, - timeShift: null, - title: 'Total Leader Elections Per Day', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - }, - { - aliasColors: {}, - bars: false, - dashLength: 10, - dashes: false, - datasource: '$datasource', - decimals: 0, - editable: true, - 'error': false, - fieldConfig: { - defaults: { - custom: {}, - }, - overrides: [], - }, - fill: 0, - fillGradient: 0, - gridPos: { - h: 7, - w: 12, - x: 0, - y: 28, - }, - hiddenSeries: false, - id: 42, - isNew: true, - legend: { - alignAsTable: false, - avg: false, - current: false, - max: false, - min: false, - rightSide: false, - show: false, - total: false, - values: false, - }, - lines: true, - linewidth: 2, - links: [], - nullPointMode: 'connected', - options: { - alertThreshold: true, - }, - percentage: false, - pluginVersion: '7.4.3', - pointradius: 5, - points: false, - renderer: 'flot', - seriesOverrides: [], - spaceLength: 10, - stack: false, - steppedLine: false, - targets: [ - { - expr: 'histogram_quantile(0.99, sum by (instance, le) (rate(etcd_network_peer_round_trip_time_seconds_bucket{%s="$cluster"}[$__rate_interval])))' % $._config.clusterLabel, - interval: '', - intervalFactor: 2, - legendFormat: '{{instance}} Peer round trip time', - metric: 'etcd_network_peer_round_trip_time_seconds_bucket', - refId: 'A', - step: 2, - }, - ], - thresholds: [], - timeFrom: null, - timeRegions: [], - timeShift: null, - title: 'Peer round trip time', - tooltip: { - msResolution: false, - shared: true, - sort: 0, - value_type: 'individual', - }, - type: 'graph', - xaxis: { - buckets: null, - mode: 'time', - name: null, - show: true, - values: [], - }, - yaxes: [ - { - '$$hashKey': 'object:925', - decimals: null, - format: 's', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - { - '$$hashKey': 'object:926', - format: 'short', - label: null, - logBase: 1, - max: null, - min: null, - show: true, - }, - ], - yaxis: { - align: false, - alignLevel: null, - }, - }, - ], - title: 'New row', - }, - ], - time: { - from: 'now-15m', - to: 'now', - }, - timepicker: { - now: true, - refresh_intervals: [ - '5s', - '10s', - '30s', - '1m', - '5m', - '15m', - '30m', - '1h', - '2h', - '1d', - ], - time_options: [ - '5m', - '15m', - '1h', - '6h', - '12h', - '24h', - '2d', - '7d', - '30d', - ], - }, - templating: { - list: [ - { - current: { - text: 'Prometheus', - value: 'Prometheus', - }, - hide: 0, - label: 'Data Source', - name: 'datasource', - options: [], - query: 'prometheus', - refresh: 1, - regex: '', - type: 'datasource', - }, - { - allValue: null, - current: { - text: 'prod', - value: 'prod', - }, - datasource: '$datasource', - hide: 0, - includeAll: false, - label: 'cluster', - multi: false, - name: 'cluster', - options: [], - query: 'label_values(etcd_server_has_leader, %s)' % $._config.clusterLabel, - refresh: $._config.dashboard_var_refresh, - regex: '', - sort: 2, - tagValuesQuery: '', - tags: [], - tagsQuery: '', - type: 'query', - useTags: false, - }, - ], - }, - annotations: { - list: [], - }, - refresh: '10s', - schemaVersion: 13, - version: 215, - links: [], - gnetId: null, - }, - }, -} diff --git a/contrib/mixin/test.yaml b/contrib/mixin/test.yaml deleted file mode 100644 index 8cf18a0e080..00000000000 --- a/contrib/mixin/test.yaml +++ /dev/null @@ -1,164 +0,0 @@ -rule_files: - - manifests/etcd-prometheusRules.yaml - -evaluation_interval: 1m - -tests: - - interval: 1m - input_series: - - series: 'up{job="etcd",instance="10.10.10.0"}' - values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0' - - series: 'up{job="etcd",instance="10.10.10.1"}' - values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0' - - series: 'up{job="etcd",instance="10.10.10.2"}' - values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0' - alert_rule_test: - - eval_time: 3m - alertname: etcdInsufficientMembers - - eval_time: 5m - alertname: etcdInsufficientMembers - - eval_time: 12m - alertname: etcdMembersDown - - eval_time: 14m - alertname: etcdMembersDown - exp_alerts: - - exp_labels: - job: etcd - severity: critical - exp_annotations: - description: 'etcd cluster "etcd": members are down (3).' - summary: 'etcd cluster members are down.' - - eval_time: 7m - alertname: etcdInsufficientMembers - - eval_time: 11m - alertname: etcdInsufficientMembers - exp_alerts: - - exp_labels: - job: etcd - severity: critical - exp_annotations: - description: 'etcd cluster "etcd": insufficient members (1).' - summary: 'etcd cluster has insufficient number of members.' - - eval_time: 15m - alertname: etcdInsufficientMembers - exp_alerts: - - exp_labels: - job: etcd - severity: critical - exp_annotations: - description: 'etcd cluster "etcd": insufficient members (0).' - summary: 'etcd cluster has insufficient number of members.' - - - interval: 1m - input_series: - - series: 'up{job="etcd",instance="10.10.10.0"}' - values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0' - - series: 'up{job="etcd",instance="10.10.10.1"}' - values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0' - - series: 'up{job="etcd",instance="10.10.10.2"}' - values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' - alert_rule_test: - - eval_time: 14m - alertname: etcdMembersDown - exp_alerts: - - exp_labels: - job: etcd - severity: critical - exp_annotations: - description: 'etcd cluster "etcd": members are down (3).' - summary: 'etcd cluster members are down.' - - - interval: 1m - input_series: - - series: 'up{job="etcd",instance="10.10.10.0"}' - values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0' - - series: 'up{job="etcd",instance="10.10.10.1"}' - values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0' - - series: 'etcd_network_peer_sent_failures_total{To="member-1",job="etcd",endpoint="test"}' - values: '0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18' - alert_rule_test: - - eval_time: 13m - alertname: etcdMembersDown - exp_alerts: - - exp_labels: - job: etcd - severity: critical - exp_annotations: - description: 'etcd cluster "etcd": members are down (1).' - summary: 'etcd cluster members are down.' - - - interval: 1m - input_series: - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}' - values: '0 0 2 0 0 1 0 0 0 0 0 0 0 0 0 0' - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}' - values: '0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0' - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}' - values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' - alert_rule_test: - - eval_time: 10m - alertname: etcdHighNumberOfLeaderChanges - exp_alerts: - - exp_labels: - job: etcd - severity: warning - exp_annotations: - description: 'etcd cluster "etcd": 4 leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.' - summary: 'etcd cluster has high number of leader changes.' - - interval: 1m - input_series: - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}' - values: '0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0' - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}' - values: '0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0' - - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}' - values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' - alert_rule_test: - - eval_time: 10m - alertname: etcdHighNumberOfLeaderChanges - exp_alerts: - - - interval: 1m - input_series: - - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}' - values: '0+8192x240' - - series: 'etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.0"}' - values: '524288+0x240' - - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}' - values: '0+1024x240' - - series: 'etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.1"}' - values: '524288+0x240' - alert_rule_test: - - eval_time: 11m - alertname: etcdExcessiveDatabaseGrowth - exp_alerts: - - exp_labels: - instance: '10.10.10.0' - job: etcd - severity: warning - exp_annotations: - description: 'etcd cluster "etcd": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance 10.10.10.0, please check as it might be disruptive.' - summary: 'etcd cluster database growing very fast.' - - - interval: 1m - input_series: - - series: 'etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.0"}' - values: '30000+0x10' - - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}' - values: '100000+0x10' - - series: 'etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.1"}' - values: '70000+0x10' - - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}' - values: '100000+0x10' - alert_rule_test: - - eval_time: 11m - alertname: etcdDatabaseHighFragmentationRatio - exp_alerts: - - exp_labels: - instance: '10.10.10.0' - job: etcd - severity: warning - exp_annotations: - description: 'etcd cluster "etcd": database size in use on instance 10.10.10.0 is 30% of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' - runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation - summary: 'etcd database size in use is less than 50% of the actual allocated storage.' diff --git a/contrib/raftexample/Procfile b/contrib/raftexample/Procfile deleted file mode 100644 index f6e87132693..00000000000 --- a/contrib/raftexample/Procfile +++ /dev/null @@ -1,4 +0,0 @@ -# Use goreman to run `go install github.com/mattn/goreman@latest` -raftexample1: ./raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380 -raftexample2: ./raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380 -raftexample3: ./raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380 diff --git a/contrib/raftexample/README.md b/contrib/raftexample/README.md deleted file mode 100644 index 2e73996a6a6..00000000000 --- a/contrib/raftexample/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# raftexample - -raftexample is an example usage of etcd's [raft library](../../raft). It provides a simple REST API for a key-value store cluster backed by the [Raft][raft] consensus algorithm. - -[raft]: http://raftconsensus.github.io/ - -## Getting Started - -### Building raftexample - -Clone `etcd` to `/src/go.etcd.io/etcd` - -```sh -export GOPATH= -cd /src/go.etcd.io/etcd/contrib/raftexample -go build -o raftexample -``` - -### Running single node raftexample - -First start a single-member cluster of raftexample: - -```sh -raftexample --id 1 --cluster http://127.0.0.1:12379 --port 12380 -``` - -Each raftexample process maintains a single raft instance and a key-value server. -The process's list of comma separated peers (--cluster), its raft ID index into the peer list (--id), and http key-value server port (--port) are passed through the command line. - -Next, store a value ("hello") to a key ("my-key"): - -``` -curl -L http://127.0.0.1:12380/my-key -XPUT -d hello -``` - -Finally, retrieve the stored key: - -``` -curl -L http://127.0.0.1:12380/my-key -``` - -### Running a local cluster - -First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications. - -The [Procfile script](./Procfile) will set up a local example cluster. Start it with: - -```sh -goreman start -``` - -This will bring up three raftexample instances. - -Now it's possible to write a key-value pair to any member of the cluster and likewise retrieve it from any member. - -### Fault Tolerance - -To test cluster recovery, first start a cluster and write a value "foo": -```sh -goreman start -curl -L http://127.0.0.1:12380/my-key -XPUT -d foo -``` - -Next, remove a node and replace the value with "bar" to check cluster availability: - -```sh -goreman run stop raftexample2 -curl -L http://127.0.0.1:12380/my-key -XPUT -d bar -curl -L http://127.0.0.1:32380/my-key -``` - -Finally, bring the node back up and verify it recovers with the updated value "bar": -```sh -goreman run start raftexample2 -curl -L http://127.0.0.1:22380/my-key -``` - -### Dynamic cluster reconfiguration - -Nodes can be added to or removed from a running cluster using requests to the REST API. - -For example, suppose we have a 3-node cluster that was started with the commands: -```sh -raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380 -raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380 -raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380 -``` - -A fourth node with ID 4 can be added by issuing a POST: -```sh -curl -L http://127.0.0.1:12380/4 -XPOST -d http://127.0.0.1:42379 -``` - -Then the new node can be started as the others were, using the --join option: -```sh -raftexample --id 4 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379,http://127.0.0.1:42379 --port 42380 --join -``` - -The new node should join the cluster and be able to service key/value requests. - -We can remove a node using a DELETE request: -```sh -curl -L http://127.0.0.1:12380/3 -XDELETE -``` - -Node 3 should shut itself down once the cluster has processed this request. - -## Design - -The raftexample consists of three components: a raft-backed key-value store, a REST API server, and a raft consensus server based on etcd's raft implementation. - -The raft-backed key-value store is a key-value map that holds all committed key-values. -The store bridges communication between the raft server and the REST server. -Key-value updates are issued through the store to the raft server. -The store updates its map once raft reports the updates are committed. - -The REST server exposes the current raft consensus by accessing the raft-backed key-value store. -A GET command looks up a key in the store and returns the value, if any. -A key-value PUT command issues an update proposal to the store. - -The raft server participates in consensus with its cluster peers. -When the REST server submits a proposal, the raft server transmits the proposal to its peers. -When raft reaches a consensus, the server publishes all committed updates over a commit channel. -For raftexample, this commit channel is consumed by the key-value store. - diff --git a/contrib/raftexample/doc.go b/contrib/raftexample/doc.go deleted file mode 100644 index b2dc8416037..00000000000 --- a/contrib/raftexample/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// raftexample is a simple KV store using the raft and rafthttp libraries. -package main diff --git a/contrib/raftexample/httpapi.go b/contrib/raftexample/httpapi.go deleted file mode 100644 index dbe226add33..00000000000 --- a/contrib/raftexample/httpapi.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "io" - "log" - "net/http" - "strconv" - - "go.etcd.io/raft/v3/raftpb" -) - -// Handler for a http based key-value store backed by raft -type httpKVAPI struct { - store *kvstore - confChangeC chan<- raftpb.ConfChange -} - -func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { - key := r.RequestURI - defer r.Body.Close() - switch r.Method { - case http.MethodPut: - v, err := io.ReadAll(r.Body) - if err != nil { - log.Printf("Failed to read on PUT (%v)\n", err) - http.Error(w, "Failed on PUT", http.StatusBadRequest) - return - } - - h.store.Propose(key, string(v)) - - // Optimistic-- no waiting for ack from raft. Value is not yet - // committed so a subsequent GET on the key may return old value - w.WriteHeader(http.StatusNoContent) - case http.MethodGet: - if v, ok := h.store.Lookup(key); ok { - w.Write([]byte(v)) - } else { - http.Error(w, "Failed to GET", http.StatusNotFound) - } - case http.MethodPost: - url, err := io.ReadAll(r.Body) - if err != nil { - log.Printf("Failed to read on POST (%v)\n", err) - http.Error(w, "Failed on POST", http.StatusBadRequest) - return - } - - nodeID, err := strconv.ParseUint(key[1:], 0, 64) - if err != nil { - log.Printf("Failed to convert ID for conf change (%v)\n", err) - http.Error(w, "Failed on POST", http.StatusBadRequest) - return - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: nodeID, - Context: url, - } - h.confChangeC <- cc - // As above, optimistic that raft will apply the conf change - w.WriteHeader(http.StatusNoContent) - case http.MethodDelete: - nodeID, err := strconv.ParseUint(key[1:], 0, 64) - if err != nil { - log.Printf("Failed to convert ID for conf change (%v)\n", err) - http.Error(w, "Failed on DELETE", http.StatusBadRequest) - return - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: nodeID, - } - h.confChangeC <- cc - - // As above, optimistic that raft will apply the conf change - w.WriteHeader(http.StatusNoContent) - default: - w.Header().Set("Allow", http.MethodPut) - w.Header().Add("Allow", http.MethodGet) - w.Header().Add("Allow", http.MethodPost) - w.Header().Add("Allow", http.MethodDelete) - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -// serveHTTPKVAPI starts a key-value server with a GET/PUT API and listens. -func serveHTTPKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) { - srv := http.Server{ - Addr: ":" + strconv.Itoa(port), - Handler: &httpKVAPI{ - store: kv, - confChangeC: confChangeC, - }, - } - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Fatal(err) - } - }() - - // exit when raft goes down - if err, ok := <-errorC; ok { - log.Fatal(err) - } -} diff --git a/contrib/raftexample/kvstore.go b/contrib/raftexample/kvstore.go deleted file mode 100644 index 22f8915fe1c..00000000000 --- a/contrib/raftexample/kvstore.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "log" - "sync" - - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3/raftpb" -) - -// a key-value store backed by raft -type kvstore struct { - proposeC chan<- string // channel for proposing updates - mu sync.RWMutex - kvStore map[string]string // current committed key-value pairs - snapshotter *snap.Snapshotter -} - -type kv struct { - Key string - Val string -} - -func newKVStore(snapshotter *snap.Snapshotter, proposeC chan<- string, commitC <-chan *commit, errorC <-chan error) *kvstore { - s := &kvstore{proposeC: proposeC, kvStore: make(map[string]string), snapshotter: snapshotter} - snapshot, err := s.loadSnapshot() - if err != nil { - log.Panic(err) - } - if snapshot != nil { - log.Printf("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index) - if err := s.recoverFromSnapshot(snapshot.Data); err != nil { - log.Panic(err) - } - } - // read commits from raft into kvStore map until error - go s.readCommits(commitC, errorC) - return s -} - -func (s *kvstore) Lookup(key string) (string, bool) { - s.mu.RLock() - defer s.mu.RUnlock() - v, ok := s.kvStore[key] - return v, ok -} - -func (s *kvstore) Propose(k string, v string) { - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil { - log.Fatal(err) - } - s.proposeC <- buf.String() -} - -func (s *kvstore) readCommits(commitC <-chan *commit, errorC <-chan error) { - for commit := range commitC { - if commit == nil { - // signaled to load snapshot - snapshot, err := s.loadSnapshot() - if err != nil { - log.Panic(err) - } - if snapshot != nil { - log.Printf("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index) - if err := s.recoverFromSnapshot(snapshot.Data); err != nil { - log.Panic(err) - } - } - continue - } - - for _, data := range commit.data { - var dataKv kv - dec := gob.NewDecoder(bytes.NewBufferString(data)) - if err := dec.Decode(&dataKv); err != nil { - log.Fatalf("raftexample: could not decode message (%v)", err) - } - s.mu.Lock() - s.kvStore[dataKv.Key] = dataKv.Val - s.mu.Unlock() - } - close(commit.applyDoneC) - } - if err, ok := <-errorC; ok { - log.Fatal(err) - } -} - -func (s *kvstore) getSnapshot() ([]byte, error) { - s.mu.RLock() - defer s.mu.RUnlock() - return json.Marshal(s.kvStore) -} - -func (s *kvstore) loadSnapshot() (*raftpb.Snapshot, error) { - snapshot, err := s.snapshotter.Load() - if err == snap.ErrNoSnapshot { - return nil, nil - } - if err != nil { - return nil, err - } - return snapshot, nil -} - -func (s *kvstore) recoverFromSnapshot(snapshot []byte) error { - var store map[string]string - if err := json.Unmarshal(snapshot, &store); err != nil { - return err - } - s.mu.Lock() - defer s.mu.Unlock() - s.kvStore = store - return nil -} diff --git a/contrib/raftexample/kvstore_test.go b/contrib/raftexample/kvstore_test.go deleted file mode 100644 index 231f778f2ee..00000000000 --- a/contrib/raftexample/kvstore_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "reflect" - "testing" -) - -func Test_kvstore_snapshot(t *testing.T) { - tm := map[string]string{"foo": "bar"} - s := &kvstore{kvStore: tm} - - v, _ := s.Lookup("foo") - if v != "bar" { - t.Fatalf("foo has unexpected value, got %s", v) - } - - data, err := s.getSnapshot() - if err != nil { - t.Fatal(err) - } - s.kvStore = nil - - if err := s.recoverFromSnapshot(data); err != nil { - t.Fatal(err) - } - v, _ = s.Lookup("foo") - if v != "bar" { - t.Fatalf("foo has unexpected value, got %s", v) - } - if !reflect.DeepEqual(s.kvStore, tm) { - t.Fatalf("store expected %+v, got %+v", tm, s.kvStore) - } -} diff --git a/contrib/raftexample/listener.go b/contrib/raftexample/listener.go deleted file mode 100644 index d67e16f5dee..00000000000 --- a/contrib/raftexample/listener.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "net" - "time" -) - -// stoppableListener sets TCP keep-alive timeouts on accepted -// connections and waits on stopc message -type stoppableListener struct { - *net.TCPListener - stopc <-chan struct{} -} - -func newStoppableListener(addr string, stopc <-chan struct{}) (*stoppableListener, error) { - ln, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - return &stoppableListener{ln.(*net.TCPListener), stopc}, nil -} - -func (ln stoppableListener) Accept() (c net.Conn, err error) { - connc := make(chan *net.TCPConn, 1) - errc := make(chan error, 1) - go func() { - tc, err := ln.AcceptTCP() - if err != nil { - errc <- err - return - } - connc <- tc - }() - select { - case <-ln.stopc: - return nil, errors.New("server stopped") - case err := <-errc: - return nil, err - case tc := <-connc: - tc.SetKeepAlive(true) - tc.SetKeepAlivePeriod(3 * time.Minute) - return tc, nil - } -} diff --git a/contrib/raftexample/main.go b/contrib/raftexample/main.go deleted file mode 100644 index 73f02787a35..00000000000 --- a/contrib/raftexample/main.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "flag" - "strings" - - "go.etcd.io/raft/v3/raftpb" -) - -func main() { - cluster := flag.String("cluster", "http://127.0.0.1:9021", "comma separated cluster peers") - id := flag.Int("id", 1, "node ID") - kvport := flag.Int("port", 9121, "key-value server port") - join := flag.Bool("join", false, "join an existing cluster") - flag.Parse() - - proposeC := make(chan string) - defer close(proposeC) - confChangeC := make(chan raftpb.ConfChange) - defer close(confChangeC) - - // raft provides a commit stream for the proposals from the http api - var kvs *kvstore - getSnapshot := func() ([]byte, error) { return kvs.getSnapshot() } - commitC, errorC, snapshotterReady := newRaftNode(*id, strings.Split(*cluster, ","), *join, getSnapshot, proposeC, confChangeC) - - kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC) - - // the key-value http handler will propose updates to raft - serveHTTPKVAPI(kvs, *kvport, confChangeC, errorC) -} diff --git a/contrib/raftexample/raft.go b/contrib/raftexample/raft.go deleted file mode 100644 index 971141ae359..00000000000 --- a/contrib/raftexample/raft.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "fmt" - "log" - "net/http" - "net/url" - "os" - "strconv" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "go.uber.org/zap" -) - -type commit struct { - data []string - applyDoneC chan<- struct{} -} - -// A key-value stream backed by raft -type raftNode struct { - proposeC <-chan string // proposed messages (k,v) - confChangeC <-chan raftpb.ConfChange // proposed cluster config changes - commitC chan<- *commit // entries committed to log (k,v) - errorC chan<- error // errors from raft session - - id int // client ID for raft session - peers []string // raft peer URLs - join bool // node is joining an existing cluster - waldir string // path to WAL directory - snapdir string // path to snapshot directory - getSnapshot func() ([]byte, error) - - confState raftpb.ConfState - snapshotIndex uint64 - appliedIndex uint64 - - // raft backing for the commit/error channel - node raft.Node - raftStorage *raft.MemoryStorage - wal *wal.WAL - - snapshotter *snap.Snapshotter - snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready - - snapCount uint64 - transport *rafthttp.Transport - stopc chan struct{} // signals proposal channel closed - httpstopc chan struct{} // signals http server to shutdown - httpdonec chan struct{} // signals http server shutdown complete - - logger *zap.Logger -} - -var defaultSnapshotCount uint64 = 10000 - -// newRaftNode initiates a raft instance and returns a committed log entry -// channel and error channel. Proposals for log updates are sent over the -// provided the proposal channel. All log entries are replayed over the -// commit channel, followed by a nil message (to indicate the channel is -// current), then new log entries. To shutdown, close proposeC and read errorC. -func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string, - confChangeC <-chan raftpb.ConfChange) (<-chan *commit, <-chan error, <-chan *snap.Snapshotter) { - - commitC := make(chan *commit) - errorC := make(chan error) - - rc := &raftNode{ - proposeC: proposeC, - confChangeC: confChangeC, - commitC: commitC, - errorC: errorC, - id: id, - peers: peers, - join: join, - waldir: fmt.Sprintf("raftexample-%d", id), - snapdir: fmt.Sprintf("raftexample-%d-snap", id), - getSnapshot: getSnapshot, - snapCount: defaultSnapshotCount, - stopc: make(chan struct{}), - httpstopc: make(chan struct{}), - httpdonec: make(chan struct{}), - - logger: zap.NewExample(), - - snapshotterReady: make(chan *snap.Snapshotter, 1), - // rest of structure populated after WAL replay - } - go rc.startRaft() - return commitC, errorC, rc.snapshotterReady -} - -func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error { - walSnap := walpb.Snapshot{ - Index: snap.Metadata.Index, - Term: snap.Metadata.Term, - ConfState: &snap.Metadata.ConfState, - } - // save the snapshot file before writing the snapshot to the wal. - // This makes it possible for the snapshot file to become orphaned, but prevents - // a WAL snapshot entry from having no corresponding snapshot file. - if err := rc.snapshotter.SaveSnap(snap); err != nil { - return err - } - if err := rc.wal.SaveSnapshot(walSnap); err != nil { - return err - } - return rc.wal.ReleaseLockTo(snap.Metadata.Index) -} - -func (rc *raftNode) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) { - if len(ents) == 0 { - return ents - } - firstIdx := ents[0].Index - if firstIdx > rc.appliedIndex+1 { - log.Fatalf("first index of committed entry[%d] should <= progress.appliedIndex[%d]+1", firstIdx, rc.appliedIndex) - } - if rc.appliedIndex-firstIdx+1 < uint64(len(ents)) { - nents = ents[rc.appliedIndex-firstIdx+1:] - } - return nents -} - -// publishEntries writes committed log entries to commit channel and returns -// whether all entries could be published. -func (rc *raftNode) publishEntries(ents []raftpb.Entry) (<-chan struct{}, bool) { - if len(ents) == 0 { - return nil, true - } - - data := make([]string, 0, len(ents)) - for i := range ents { - switch ents[i].Type { - case raftpb.EntryNormal: - if len(ents[i].Data) == 0 { - // ignore empty messages - break - } - s := string(ents[i].Data) - data = append(data, s) - case raftpb.EntryConfChange: - var cc raftpb.ConfChange - cc.Unmarshal(ents[i].Data) - rc.confState = *rc.node.ApplyConfChange(cc) - switch cc.Type { - case raftpb.ConfChangeAddNode: - if len(cc.Context) > 0 { - rc.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)}) - } - case raftpb.ConfChangeRemoveNode: - if cc.NodeID == uint64(rc.id) { - log.Println("I've been removed from the cluster! Shutting down.") - return nil, false - } - rc.transport.RemovePeer(types.ID(cc.NodeID)) - } - } - } - - var applyDoneC chan struct{} - - if len(data) > 0 { - applyDoneC = make(chan struct{}, 1) - select { - case rc.commitC <- &commit{data, applyDoneC}: - case <-rc.stopc: - return nil, false - } - } - - // after commit, update appliedIndex - rc.appliedIndex = ents[len(ents)-1].Index - - return applyDoneC, true -} - -func (rc *raftNode) loadSnapshot() *raftpb.Snapshot { - if wal.Exist(rc.waldir) { - walSnaps, err := wal.ValidSnapshotEntries(rc.logger, rc.waldir) - if err != nil { - log.Fatalf("raftexample: error listing snapshots (%v)", err) - } - snapshot, err := rc.snapshotter.LoadNewestAvailable(walSnaps) - if err != nil && err != snap.ErrNoSnapshot { - log.Fatalf("raftexample: error loading snapshot (%v)", err) - } - return snapshot - } - return &raftpb.Snapshot{} -} - -// openWAL returns a WAL ready for reading. -func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL { - if !wal.Exist(rc.waldir) { - if err := os.Mkdir(rc.waldir, 0750); err != nil { - log.Fatalf("raftexample: cannot create dir for wal (%v)", err) - } - - w, err := wal.Create(zap.NewExample(), rc.waldir, nil) - if err != nil { - log.Fatalf("raftexample: create wal error (%v)", err) - } - w.Close() - } - - walsnap := walpb.Snapshot{} - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - log.Printf("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index) - w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap) - if err != nil { - log.Fatalf("raftexample: error loading wal (%v)", err) - } - - return w -} - -// replayWAL replays WAL entries into the raft instance. -func (rc *raftNode) replayWAL() *wal.WAL { - log.Printf("replaying WAL of member %d", rc.id) - snapshot := rc.loadSnapshot() - w := rc.openWAL(snapshot) - _, st, ents, err := w.ReadAll() - if err != nil { - log.Fatalf("raftexample: failed to read WAL (%v)", err) - } - rc.raftStorage = raft.NewMemoryStorage() - if snapshot != nil { - rc.raftStorage.ApplySnapshot(*snapshot) - } - rc.raftStorage.SetHardState(st) - - // append to storage so raft starts at the right place in log - rc.raftStorage.Append(ents) - - return w -} - -func (rc *raftNode) writeError(err error) { - rc.stopHTTP() - close(rc.commitC) - rc.errorC <- err - close(rc.errorC) - rc.node.Stop() -} - -func (rc *raftNode) startRaft() { - if !fileutil.Exist(rc.snapdir) { - if err := os.Mkdir(rc.snapdir, 0750); err != nil { - log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err) - } - } - rc.snapshotter = snap.New(zap.NewExample(), rc.snapdir) - - oldwal := wal.Exist(rc.waldir) - rc.wal = rc.replayWAL() - - // signal replay has finished - rc.snapshotterReady <- rc.snapshotter - - rpeers := make([]raft.Peer, len(rc.peers)) - for i := range rpeers { - rpeers[i] = raft.Peer{ID: uint64(i + 1)} - } - c := &raft.Config{ - ID: uint64(rc.id), - ElectionTick: 10, - HeartbeatTick: 1, - Storage: rc.raftStorage, - MaxSizePerMsg: 1024 * 1024, - MaxInflightMsgs: 256, - MaxUncommittedEntriesSize: 1 << 30, - } - - if oldwal || rc.join { - rc.node = raft.RestartNode(c) - } else { - rc.node = raft.StartNode(c, rpeers) - } - - rc.transport = &rafthttp.Transport{ - Logger: rc.logger, - ID: types.ID(rc.id), - ClusterID: 0x1000, - Raft: rc, - ServerStats: stats.NewServerStats("", ""), - LeaderStats: stats.NewLeaderStats(zap.NewExample(), strconv.Itoa(rc.id)), - ErrorC: make(chan error), - } - - rc.transport.Start() - for i := range rc.peers { - if i+1 != rc.id { - rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]}) - } - } - - go rc.serveRaft() - go rc.serveChannels() -} - -// stop closes http, closes all channels, and stops raft. -func (rc *raftNode) stop() { - rc.stopHTTP() - close(rc.commitC) - close(rc.errorC) - rc.node.Stop() -} - -func (rc *raftNode) stopHTTP() { - rc.transport.Stop() - close(rc.httpstopc) - <-rc.httpdonec -} - -func (rc *raftNode) publishSnapshot(snapshotToSave raftpb.Snapshot) { - if raft.IsEmptySnap(snapshotToSave) { - return - } - - log.Printf("publishing snapshot at index %d", rc.snapshotIndex) - defer log.Printf("finished publishing snapshot at index %d", rc.snapshotIndex) - - if snapshotToSave.Metadata.Index <= rc.appliedIndex { - log.Fatalf("snapshot index [%d] should > progress.appliedIndex [%d]", snapshotToSave.Metadata.Index, rc.appliedIndex) - } - rc.commitC <- nil // trigger kvstore to load snapshot - - rc.confState = snapshotToSave.Metadata.ConfState - rc.snapshotIndex = snapshotToSave.Metadata.Index - rc.appliedIndex = snapshotToSave.Metadata.Index -} - -var snapshotCatchUpEntriesN uint64 = 10000 - -func (rc *raftNode) maybeTriggerSnapshot(applyDoneC <-chan struct{}) { - if rc.appliedIndex-rc.snapshotIndex <= rc.snapCount { - return - } - - // wait until all committed entries are applied (or server is closed) - if applyDoneC != nil { - select { - case <-applyDoneC: - case <-rc.stopc: - return - } - } - - log.Printf("start snapshot [applied index: %d | last snapshot index: %d]", rc.appliedIndex, rc.snapshotIndex) - data, err := rc.getSnapshot() - if err != nil { - log.Panic(err) - } - snap, err := rc.raftStorage.CreateSnapshot(rc.appliedIndex, &rc.confState, data) - if err != nil { - panic(err) - } - if err := rc.saveSnap(snap); err != nil { - panic(err) - } - - compactIndex := uint64(1) - if rc.appliedIndex > snapshotCatchUpEntriesN { - compactIndex = rc.appliedIndex - snapshotCatchUpEntriesN - } - if err := rc.raftStorage.Compact(compactIndex); err != nil { - panic(err) - } - - log.Printf("compacted log at index %d", compactIndex) - rc.snapshotIndex = rc.appliedIndex -} - -func (rc *raftNode) serveChannels() { - snap, err := rc.raftStorage.Snapshot() - if err != nil { - panic(err) - } - rc.confState = snap.Metadata.ConfState - rc.snapshotIndex = snap.Metadata.Index - rc.appliedIndex = snap.Metadata.Index - - defer rc.wal.Close() - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - // send proposals over raft - go func() { - confChangeCount := uint64(0) - - for rc.proposeC != nil && rc.confChangeC != nil { - select { - case prop, ok := <-rc.proposeC: - if !ok { - rc.proposeC = nil - } else { - // blocks until accepted by raft state machine - rc.node.Propose(context.TODO(), []byte(prop)) - } - - case cc, ok := <-rc.confChangeC: - if !ok { - rc.confChangeC = nil - } else { - confChangeCount++ - cc.ID = confChangeCount - rc.node.ProposeConfChange(context.TODO(), cc) - } - } - } - // client closed channel; shutdown raft if not already - close(rc.stopc) - }() - - // event loop on raft state machine updates - for { - select { - case <-ticker.C: - rc.node.Tick() - - // store raft entries to wal, then publish over commit channel - case rd := <-rc.node.Ready(): - // Must save the snapshot file and WAL snapshot entry before saving any other entries - // or hardstate to ensure that recovery after a snapshot restore is possible. - if !raft.IsEmptySnap(rd.Snapshot) { - rc.saveSnap(rd.Snapshot) - } - rc.wal.Save(rd.HardState, rd.Entries) - if !raft.IsEmptySnap(rd.Snapshot) { - rc.raftStorage.ApplySnapshot(rd.Snapshot) - rc.publishSnapshot(rd.Snapshot) - } - rc.raftStorage.Append(rd.Entries) - rc.transport.Send(rc.processMessages(rd.Messages)) - applyDoneC, ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries)) - if !ok { - rc.stop() - return - } - rc.maybeTriggerSnapshot(applyDoneC) - rc.node.Advance() - - case err := <-rc.transport.ErrorC: - rc.writeError(err) - return - - case <-rc.stopc: - rc.stop() - return - } - } -} - -// When there is a `raftpb.EntryConfChange` after creating the snapshot, -// then the confState included in the snapshot is out of date. so We need -// to update the confState before sending a snapshot to a follower. -func (rc *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { - for i := 0; i < len(ms); i++ { - if ms[i].Type == raftpb.MsgSnap { - ms[i].Snapshot.Metadata.ConfState = rc.confState - } - } - return ms -} - -func (rc *raftNode) serveRaft() { - url, err := url.Parse(rc.peers[rc.id-1]) - if err != nil { - log.Fatalf("raftexample: Failed parsing URL (%v)", err) - } - - ln, err := newStoppableListener(url.Host, rc.httpstopc) - if err != nil { - log.Fatalf("raftexample: Failed to listen rafthttp (%v)", err) - } - - err = (&http.Server{Handler: rc.transport.Handler()}).Serve(ln) - select { - case <-rc.httpstopc: - default: - log.Fatalf("raftexample: Failed to serve rafthttp (%v)", err) - } - close(rc.httpdonec) -} - -func (rc *raftNode) Process(ctx context.Context, m raftpb.Message) error { - return rc.node.Step(ctx, m) -} -func (rc *raftNode) IsIDRemoved(id uint64) bool { return false } -func (rc *raftNode) ReportUnreachable(id uint64) { rc.node.ReportUnreachable(id) } -func (rc *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) { - rc.node.ReportSnapshot(id, status) -} diff --git a/contrib/raftexample/raft_test.go b/contrib/raftexample/raft_test.go deleted file mode 100644 index 5a0385be226..00000000000 --- a/contrib/raftexample/raft_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "reflect" - "testing" - - "go.etcd.io/raft/v3/raftpb" -) - -func TestProcessMessages(t *testing.T) { - cases := []struct { - name string - confState raftpb.ConfState - InputMessages []raftpb.Message - ExpectedMessages []raftpb.Message - }{ - { - name: "only one snapshot message", - confState: raftpb.ConfState{ - Voters: []uint64{2, 6, 8, 10}, - }, - InputMessages: []raftpb.Message{ - { - Type: raftpb.MsgSnap, - To: 8, - Snapshot: &raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: 100, - Term: 3, - ConfState: raftpb.ConfState{ - Voters: []uint64{2, 6, 8}, - AutoLeave: true, - }, - }, - }, - }, - }, - ExpectedMessages: []raftpb.Message{ - { - Type: raftpb.MsgSnap, - To: 8, - Snapshot: &raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: 100, - Term: 3, - ConfState: raftpb.ConfState{ - Voters: []uint64{2, 6, 8, 10}, - }, - }, - }, - }, - }, - }, - { - name: "one snapshot message and one other message", - confState: raftpb.ConfState{ - Voters: []uint64{2, 7, 8, 12}, - }, - InputMessages: []raftpb.Message{ - { - Type: raftpb.MsgSnap, - To: 8, - Snapshot: &raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: 100, - Term: 3, - ConfState: raftpb.ConfState{ - Voters: []uint64{2, 6, 8}, - AutoLeave: true, - }, - }, - }, - }, - { - Type: raftpb.MsgApp, - From: 6, - To: 8, - }, - }, - ExpectedMessages: []raftpb.Message{ - { - Type: raftpb.MsgSnap, - To: 8, - Snapshot: &raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: 100, - Term: 3, - ConfState: raftpb.ConfState{ - Voters: []uint64{2, 7, 8, 12}, - }, - }, - }, - }, - { - Type: raftpb.MsgApp, - From: 6, - To: 8, - }, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - rn := &raftNode{ - confState: tc.confState, - } - - outputMessages := rn.processMessages(tc.InputMessages) - - if !reflect.DeepEqual(outputMessages, tc.ExpectedMessages) { - t.Fatalf("Unexpected messages, expected: %v, got %v", tc.ExpectedMessages, outputMessages) - } - }) - } -} diff --git a/contrib/raftexample/raftexample_test.go b/contrib/raftexample/raftexample_test.go deleted file mode 100644 index f7aa335eb04..00000000000 --- a/contrib/raftexample/raftexample_test.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "sync" - "testing" - "time" - - "go.etcd.io/raft/v3/raftpb" -) - -func getSnapshotFn() (func() ([]byte, error), <-chan struct{}) { - snapshotTriggeredC := make(chan struct{}) - return func() ([]byte, error) { - snapshotTriggeredC <- struct{}{} - return nil, nil - }, snapshotTriggeredC -} - -type cluster struct { - peers []string - commitC []<-chan *commit - errorC []<-chan error - proposeC []chan string - confChangeC []chan raftpb.ConfChange - snapshotTriggeredC []<-chan struct{} -} - -// newCluster creates a cluster of n nodes -func newCluster(n int) *cluster { - peers := make([]string, n) - for i := range peers { - peers[i] = fmt.Sprintf("http://127.0.0.1:%d", 10000+i) - } - - clus := &cluster{ - peers: peers, - commitC: make([]<-chan *commit, len(peers)), - errorC: make([]<-chan error, len(peers)), - proposeC: make([]chan string, len(peers)), - confChangeC: make([]chan raftpb.ConfChange, len(peers)), - snapshotTriggeredC: make([]<-chan struct{}, len(peers)), - } - - for i := range clus.peers { - os.RemoveAll(fmt.Sprintf("raftexample-%d", i+1)) - os.RemoveAll(fmt.Sprintf("raftexample-%d-snap", i+1)) - clus.proposeC[i] = make(chan string, 1) - clus.confChangeC[i] = make(chan raftpb.ConfChange, 1) - fn, snapshotTriggeredC := getSnapshotFn() - clus.snapshotTriggeredC[i] = snapshotTriggeredC - clus.commitC[i], clus.errorC[i], _ = newRaftNode(i+1, clus.peers, false, fn, clus.proposeC[i], clus.confChangeC[i]) - } - - return clus -} - -// Close closes all cluster nodes and returns an error if any failed. -func (clus *cluster) Close() (err error) { - for i := range clus.peers { - go func(i int) { - for range clus.commitC[i] { - // drain pending commits - } - }(i) - close(clus.proposeC[i]) - // wait for channel to close - if erri := <-clus.errorC[i]; erri != nil { - err = erri - } - // clean intermediates - os.RemoveAll(fmt.Sprintf("raftexample-%d", i+1)) - os.RemoveAll(fmt.Sprintf("raftexample-%d-snap", i+1)) - } - return err -} - -func (clus *cluster) closeNoErrors(t *testing.T) { - t.Log("closing cluster...") - if err := clus.Close(); err != nil { - t.Fatal(err) - } - t.Log("closing cluster [done]") -} - -// TestProposeOnCommit starts three nodes and feeds commits back into the proposal -// channel. The intent is to ensure blocking on a proposal won't block raft progress. -func TestProposeOnCommit(t *testing.T) { - clus := newCluster(3) - defer clus.closeNoErrors(t) - - donec := make(chan struct{}) - for i := range clus.peers { - // feedback for "n" committed entries, then update donec - go func(pC chan<- string, cC <-chan *commit, eC <-chan error) { - for n := 0; n < 100; n++ { - c, ok := <-cC - if !ok { - pC = nil - } - select { - case pC <- c.data[0]: - continue - case err := <-eC: - t.Errorf("eC message (%v)", err) - } - } - donec <- struct{}{} - for range cC { - // acknowledge the commits from other nodes so - // raft continues to make progress - } - }(clus.proposeC[i], clus.commitC[i], clus.errorC[i]) - - // one message feedback per node - go func(i int) { clus.proposeC[i] <- "foo" }(i) - } - - for range clus.peers { - <-donec - } -} - -// TestCloseProposerBeforeReplay tests closing the producer before raft starts. -func TestCloseProposerBeforeReplay(t *testing.T) { - clus := newCluster(1) - // close before replay so raft never starts - defer clus.closeNoErrors(t) -} - -// TestCloseProposerInflight tests closing the producer while -// committed messages are being published to the client. -func TestCloseProposerInflight(t *testing.T) { - clus := newCluster(1) - defer clus.closeNoErrors(t) - - var wg sync.WaitGroup - wg.Add(1) - - // some inflight ops - go func() { - defer wg.Done() - clus.proposeC[0] <- "foo" - clus.proposeC[0] <- "bar" - }() - - // wait for one message - if c, ok := <-clus.commitC[0]; !ok || c.data[0] != "foo" { - t.Fatalf("Commit failed") - } - - wg.Wait() -} - -func TestPutAndGetKeyValue(t *testing.T) { - clusters := []string{"http://127.0.0.1:9021"} - - proposeC := make(chan string) - defer close(proposeC) - - confChangeC := make(chan raftpb.ConfChange) - defer close(confChangeC) - - var kvs *kvstore - getSnapshot := func() ([]byte, error) { return kvs.getSnapshot() } - commitC, errorC, snapshotterReady := newRaftNode(1, clusters, false, getSnapshot, proposeC, confChangeC) - - kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC) - - srv := httptest.NewServer(&httpKVAPI{ - store: kvs, - confChangeC: confChangeC, - }) - defer srv.Close() - - // wait server started - <-time.After(time.Second * 3) - - wantKey, wantValue := "test-key", "test-value" - url := fmt.Sprintf("%s/%s", srv.URL, wantKey) - body := bytes.NewBufferString(wantValue) - cli := srv.Client() - - req, err := http.NewRequest("PUT", url, body) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Content-Type", "text/html; charset=utf-8") - _, err = cli.Do(req) - if err != nil { - t.Fatal(err) - } - - // wait for a moment for processing message, otherwise get would be failed. - <-time.After(time.Second) - - resp, err := cli.Get(url) - if err != nil { - t.Fatal(err) - } - - data, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if gotValue := string(data); wantValue != gotValue { - t.Fatalf("expect %s, got %s", wantValue, gotValue) - } -} - -// TestAddNewNode tests adding new node to the existing cluster. -func TestAddNewNode(t *testing.T) { - clus := newCluster(3) - defer clus.closeNoErrors(t) - - os.RemoveAll("raftexample-4") - os.RemoveAll("raftexample-4-snap") - defer func() { - os.RemoveAll("raftexample-4") - os.RemoveAll("raftexample-4-snap") - }() - - newNodeURL := "http://127.0.0.1:10004" - clus.confChangeC[0] <- raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 4, - Context: []byte(newNodeURL), - } - - proposeC := make(chan string) - defer close(proposeC) - - confChangeC := make(chan raftpb.ConfChange) - defer close(confChangeC) - - newRaftNode(4, append(clus.peers, newNodeURL), true, nil, proposeC, confChangeC) - - go func() { - proposeC <- "foo" - }() - - if c, ok := <-clus.commitC[0]; !ok || c.data[0] != "foo" { - t.Fatalf("Commit failed") - } -} - -func TestSnapshot(t *testing.T) { - prevDefaultSnapshotCount := defaultSnapshotCount - prevSnapshotCatchUpEntriesN := snapshotCatchUpEntriesN - defaultSnapshotCount = 4 - snapshotCatchUpEntriesN = 4 - defer func() { - defaultSnapshotCount = prevDefaultSnapshotCount - snapshotCatchUpEntriesN = prevSnapshotCatchUpEntriesN - }() - - clus := newCluster(3) - defer clus.closeNoErrors(t) - - go func() { - clus.proposeC[0] <- "foo" - }() - - c := <-clus.commitC[0] - - select { - case <-clus.snapshotTriggeredC[0]: - t.Fatalf("snapshot triggered before applying done") - default: - } - close(c.applyDoneC) - <-clus.snapshotTriggeredC[0] -} diff --git a/contrib/systemd/etcd.service b/contrib/systemd/etcd.service deleted file mode 100644 index 8fc0570c6dd..00000000000 --- a/contrib/systemd/etcd.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=etcd key-value store -Documentation=https://github.com/etcd-io/etcd -After=network-online.target local-fs.target remote-fs.target time-sync.target -Wants=network-online.target local-fs.target remote-fs.target time-sync.target - -[Service] -User=etcd -Type=notify -Environment=ETCD_DATA_DIR=/var/lib/etcd -Environment=ETCD_NAME=%m -ExecStart=/usr/bin/etcd -Restart=always -RestartSec=10s -LimitNOFILE=40000 - -[Install] -WantedBy=multi-user.target diff --git a/contrib/systemd/etcd3-multinode/README.md b/contrib/systemd/etcd3-multinode/README.md deleted file mode 100644 index cab9ab4d4cb..00000000000 --- a/contrib/systemd/etcd3-multinode/README.md +++ /dev/null @@ -1,173 +0,0 @@ -# etcd3 multi-node cluster - -Here's how to deploy etcd cluster with systemd. - -## Set up data directory - -etcd needs data directory on host machine. Configure the data directory accessible to systemd as: - -``` -sudo mkdir -p /var/lib/etcd -sudo chown -R root:$(whoami) /var/lib/etcd -sudo chmod -R a+rw /var/lib/etcd -``` - -## Write systemd service file - -In each machine, write etcd systemd service files: - -``` -cat > /tmp/my-etcd-1.service < /tmp/my-etcd-2.service < /tmp/my-etcd-3.service <] + storageType: S3 + backupPolicy: + # 0 > enable periodic backup + backupIntervalInSecond: 125 + maxBackups: 4 + s3: + # The format of "path" must be: "/" + # e.g: "mybucket/etcd.backup" + path: + awsSecret: \ No newline at end of file diff --git a/etcd-io-chaos.yaml b/etcd-io-chaos.yaml new file mode 100644 index 00000000000..bc332c0466d --- /dev/null +++ b/etcd-io-chaos.yaml @@ -0,0 +1,17 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: IoChaos +metadata: + name: io-delay-example +spec: + action: latency + mode: one + selector: + labelSelectors: + app: etcd + volumePath: /var/run/etcd + path: '/var/run/etcd/**/*' + delay: '100ms' + percent: 50 + duration: '400s' + scheduler: + cron: '@every 10m' \ No newline at end of file diff --git a/etcd-monitor.yaml b/etcd-monitor.yaml new file mode 100644 index 00000000000..8ae25bf3697 --- /dev/null +++ b/etcd-monitor.yaml @@ -0,0 +1,20 @@ +apiVersion: etcd.cloud.tencent.com/v1beta1 +kind: EtcdMonitor +metadata: + labels: + clusterName: gz-qcloud-etcd-03 + region: gz + name: gz-qcloud-etcd-03-etcd-node-key-diff + namespace: gz +spec: + clusterId: gz-qcloud-etcd-03 + metricName: etcd-node-key-diff + metricProviderName: cruiser + name: gz-qcloud-etcd-03 + productName: tke + region: gz + status: + records: + - endTime: "2021-02-25T11:22:26Z" + message: collectEtcdNodeKeyDiff,etcd cluster gz-qcloud-etcd-03,total key num is + 122143,nodeKeyDiff is 0 diff --git a/etcd.conf.yml.sample b/etcd.conf.yml.sample deleted file mode 100644 index 38d74bcb793..00000000000 --- a/etcd.conf.yml.sample +++ /dev/null @@ -1,140 +0,0 @@ -# This is the configuration file for the etcd server. - -# Human-readable name for this member. -name: 'default' - -# Path to the data directory. -data-dir: - -# Path to the dedicated wal directory. -wal-dir: - -# Number of committed transactions to trigger a snapshot to disk. -snapshot-count: 10000 - -# Time (in milliseconds) of a heartbeat interval. -heartbeat-interval: 100 - -# Time (in milliseconds) for an election to timeout. -election-timeout: 1000 - -# Raise alarms when backend size exceeds the given quota. 0 means use the -# default quota. -quota-backend-bytes: 0 - -# List of comma separated URLs to listen on for peer traffic. -listen-peer-urls: http://localhost:2380 - -# List of comma separated URLs to listen on for client traffic. -listen-client-urls: http://localhost:2379 - -# Maximum number of snapshot files to retain (0 is unlimited). -max-snapshots: 5 - -# Maximum number of wal files to retain (0 is unlimited). -max-wals: 5 - -# Comma-separated white list of origins for CORS (cross-origin resource sharing). -cors: - -# List of this member's peer URLs to advertise to the rest of the cluster. -# The URLs needed to be a comma-separated list. -initial-advertise-peer-urls: http://localhost:2380 - -# List of this member's client URLs to advertise to the public. -# The URLs needed to be a comma-separated list. -advertise-client-urls: http://localhost:2379 - -# Discovery URL used to bootstrap the cluster. -discovery: - -# Valid values include 'exit', 'proxy' -discovery-fallback: 'proxy' - -# HTTP proxy to use for traffic to discovery service. -discovery-proxy: - -# DNS domain used to bootstrap initial cluster. -discovery-srv: - -# Initial cluster configuration for bootstrapping. -initial-cluster: - -# Initial cluster token for the etcd cluster during bootstrap. -initial-cluster-token: 'etcd-cluster' - -# Initial cluster state ('new' or 'existing'). -initial-cluster-state: 'new' - -# Reject reconfiguration requests that would cause quorum loss. -strict-reconfig-check: false - -# Enable runtime profiling data via HTTP server -enable-pprof: true - -# Valid values include 'on', 'readonly', 'off' -proxy: 'off' - -# Time (in milliseconds) an endpoint will be held in a failed state. -proxy-failure-wait: 5000 - -# Time (in milliseconds) of the endpoints refresh interval. -proxy-refresh-interval: 30000 - -# Time (in milliseconds) for a dial to timeout. -proxy-dial-timeout: 1000 - -# Time (in milliseconds) for a write to timeout. -proxy-write-timeout: 5000 - -# Time (in milliseconds) for a read to timeout. -proxy-read-timeout: 0 - -client-transport-security: - # Path to the client server TLS cert file. - cert-file: - - # Path to the client server TLS key file. - key-file: - - # Enable client cert authentication. - client-cert-auth: false - - # Path to the client server TLS trusted CA cert file. - trusted-ca-file: - - # Client TLS using generated certificates - auto-tls: false - -peer-transport-security: - # Path to the peer server TLS cert file. - cert-file: - - # Path to the peer server TLS key file. - key-file: - - # Enable peer client cert authentication. - client-cert-auth: false - - # Path to the peer server TLS trusted CA cert file. - trusted-ca-file: - - # Peer TLS using generated certificates. - auto-tls: false - -# The validity period of the self-signed certificate, the unit is year. -self-signed-cert-validity: 1 - -# Enable debug-level logging for etcd. -log-level: debug - -logger: zap - -# Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd. -log-outputs: [stderr] - -# Force to create a new one member cluster. -force-new-cluster: false - -auto-compaction-mode: periodic -auto-compaction-retention: "1" diff --git a/server/auth/doc.go b/etcd/auth/doc.go similarity index 100% rename from server/auth/doc.go rename to etcd/auth/doc.go diff --git a/etcd/auth/jwt_token.go b/etcd/auth/jwt_token.go new file mode 100644 index 00000000000..e0dcc88e7a4 --- /dev/null +++ b/etcd/auth/jwt_token.go @@ -0,0 +1,163 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "crypto/ecdsa" + "crypto/rsa" + "errors" + "time" + + jwt "github.com/form3tech-oss/jwt-go" + "go.uber.org/zap" +) + +type tokenJWT struct { + lg *zap.Logger + signMethod jwt.SigningMethod + key interface{} + ttl time.Duration + verifyOnly bool +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +// 从ctx中的token获取用户信息 +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + if token.Method.Alg() != t.signMethod.Alg() { + return nil, errors.New("invalid signing method") + } + switch k := t.key.(type) { + case *rsa.PrivateKey: + return &k.PublicKey, nil + case *ecdsa.PrivateKey: + return &k.PublicKey, nil + default: + return t.key, nil + } + }) + if err != nil { + t.lg.Warn( + "failed to parse a JWT token", + zap.String("token", token), + zap.Error(err), + ) + return nil, false + } + + claims, ok := parsed.Claims.(jwt.MapClaims) + if !parsed.Valid || !ok { + t.lg.Warn("invalid JWT token", zap.String("token", token)) + return nil, false + } + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + if t.verifyOnly { + return "", ErrVerifyOnly + } + + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(t.signMethod, + jwt.MapClaims{ + "username": username, + "revision": revision, + "exp": time.Now().Add(t.ttl).Unix(), + }) + + token, err := tk.SignedString(t.key) + if err != nil { + t.lg.Debug( + "failed to sign a JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.Error(err), + ) + return "", err + } + + t.lg.Debug( + "created/assigned a new JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.String("token", token), + ) + return token, err +} + +func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) { + if lg == nil { + lg = zap.NewNop() + } + var err error + var opts jwtOptions + err = opts.ParseWithDefaults(optMap) + if err != nil { + lg.Error("problem loading JWT options", zap.Error(err)) + return nil, ErrInvalidAuthOpts + } + + keys := make([]string, 0, len(optMap)) + for k := range optMap { + if !knownOptions[k] { + keys = append(keys, k) + } + } + if len(keys) > 0 { + lg.Warn("unknown JWT options", zap.Strings("keys", keys)) + } + + key, err := opts.Key() + if err != nil { + return nil, err + } + + t := &tokenJWT{ + lg: lg, + ttl: opts.TTL, + signMethod: opts.SignMethod, + key: key, + } + + switch t.signMethod.(type) { + case *jwt.SigningMethodECDSA: + if _, ok := t.key.(*ecdsa.PublicKey); ok { + t.verifyOnly = true + } + case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: + if _, ok := t.key.(*rsa.PublicKey); ok { + t.verifyOnly = true + } + } + + return t, nil +} diff --git a/server/auth/nop.go b/etcd/auth/nop.go similarity index 99% rename from server/auth/nop.go rename to etcd/auth/nop.go index d4378747bd8..8ba3f8c893c 100644 --- a/server/auth/nop.go +++ b/etcd/auth/nop.go @@ -27,9 +27,11 @@ func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil } func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { return nil, false } + func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) { return "", ErrAuthFailed } + func newTokenProviderNop() (*tokenNop, error) { return &tokenNop{}, nil } diff --git a/etcd/auth/options.go b/etcd/auth/options.go new file mode 100644 index 00000000000..633df6635b5 --- /dev/null +++ b/etcd/auth/options.go @@ -0,0 +1,191 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "io/ioutil" + "time" + + jwt "github.com/form3tech-oss/jwt-go" +) + +const ( + optSignMethod = "sign-method" + optPublicKey = "pub-key" + optPrivateKey = "priv-key" + optTTL = "ttl" +) + +var knownOptions = map[string]bool{ + optSignMethod: true, + optPublicKey: true, + optPrivateKey: true, + optTTL: true, +} + +// DefaultTTL will be used when a 'ttl' is not specified +var DefaultTTL = 5 * time.Minute + +type jwtOptions struct { + SignMethod jwt.SigningMethod // jwt header里存储的签名方法 + PublicKey []byte + PrivateKey []byte + TTL time.Duration +} + +// ParseWithDefaults will load options from the specified map or set defaults where appropriate +func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error { + if opts.TTL == 0 && optMap[optTTL] == "" { + opts.TTL = DefaultTTL + } + + return opts.Parse(optMap) +} + +// Parse will load options from the specified map +func (opts *jwtOptions) Parse(optMap map[string]string) error { + var err error + if ttl := optMap[optTTL]; ttl != "" { + opts.TTL, err = time.ParseDuration(ttl) + if err != nil { + return err + } + } + + if file := optMap[optPublicKey]; file != "" { + opts.PublicKey, err = ioutil.ReadFile(file) + if err != nil { + return err + } + } + + if file := optMap[optPrivateKey]; file != "" { + opts.PrivateKey, err = ioutil.ReadFile(file) + if err != nil { + return err + } + } + + // signing method is a required field + method := optMap[optSignMethod] + opts.SignMethod = jwt.GetSigningMethod(method) + if opts.SignMethod == nil { + return ErrInvalidAuthMethod + } + + return nil +} + +// Key will parse and return the appropriately typed key for the selected signature method +// --auth-token jwt,ttl=30s,sign-method=HS256 +func (opts *jwtOptions) Key() (interface{}, error) { + switch opts.SignMethod.(type) { + case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: + return opts.rsaKey() + case *jwt.SigningMethodECDSA: // ES256、ES384、ES512 公钥、私钥至少一个 + return opts.ecKey() + case *jwt.SigningMethodHMAC: // HS256、 HS384、HS512 需要私钥 + return opts.hmacKey() + default: + return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod) + } +} + +func (opts *jwtOptions) hmacKey() (interface{}, error) { + if len(opts.PrivateKey) == 0 { + return nil, ErrMissingKey + } + return opts.PrivateKey, nil +} + +func (opts *jwtOptions) rsaKey() (interface{}, error) { + var ( + priv *rsa.PrivateKey + pub *rsa.PublicKey + err error + ) + + if len(opts.PrivateKey) > 0 { + priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey) + if err != nil { + return nil, err + } + } + + if len(opts.PublicKey) > 0 { + pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey) + if err != nil { + return nil, err + } + } + + if priv == nil { + if pub == nil { + // Neither key given + return nil, ErrMissingKey + } + // Public key only, can verify tokens + return pub, nil + } + + // both keys provided, make sure they match + if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 { + return nil, ErrKeyMismatch + } + + return priv, nil +} + +func (opts *jwtOptions) ecKey() (interface{}, error) { + var ( + priv *ecdsa.PrivateKey + pub *ecdsa.PublicKey + err error + ) + + if len(opts.PrivateKey) > 0 { + priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey) + if err != nil { + return nil, err + } + } + + if len(opts.PublicKey) > 0 { + pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey) + if err != nil { + return nil, err + } + } + + if priv == nil { + if pub == nil { + // Neither key given + return nil, ErrMissingKey + } + // Public key only, can verify tokens + return pub, nil + } + + // both keys provided, make sure they match + if pub != nil && pub.Curve != priv.Curve && + pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 { + return nil, ErrKeyMismatch + } + + return priv, nil +} diff --git a/etcd/auth/range_perm_cache.go b/etcd/auth/range_perm_cache.go new file mode 100644 index 00000000000..33d24d69162 --- /dev/null +++ b/etcd/auth/range_perm_cache.go @@ -0,0 +1,143 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/offical/api/v3/authpb" + "github.com/ls-2018/etcd_cn/pkg/adt" + + "go.uber.org/zap" +) + +func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions { + user := getUser(lg, tx, userName) + if user == nil { + return nil + } + + readPerms := adt.NewIntervalTree() + writePerms := adt.NewIntervalTree() + + for _, roleName := range user.Roles { + role := getRole(lg, tx, roleName) + if role == nil { + continue + } + + for _, perm := range role.KeyPermission { + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = []byte(perm.RangeEnd) + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval([]byte(perm.Key), rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint([]byte(perm.Key)) + } + + switch perm.PermType { + case authpb.READWRITE: + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) + + case authpb.READ: + readPerms.Insert(ivl, struct{}{}) + + case authpb.WRITE: + writePerms.Insert(ivl, struct{}{}) + } + } + } + + return &unifiedRangePermissions{ + readPerms: readPerms, + writePerms: writePerms, + } +} + +func checkKeyInterval( + lg *zap.Logger, + cachedPerms *unifiedRangePermissions, + key, rangeEnd []byte, + permtyp authpb.Permission_Type, +) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + + ivl := adt.NewBytesAffineInterval(key, rangeEnd) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Contains(ivl) + case authpb.WRITE: + return cachedPerms.writePerms.Contains(ivl) + default: + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } + return false +} + +func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } + return false +} + +func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + // assumption: tx is Lock()ed + _, ok := as.rangePermCache[userName] + if !ok { + perms := getMergedPerms(as.lg, tx, userName) + if perms == nil { + as.lg.Error( + "failed to create a merged permission", + zap.String("user-name", userName), + ) + return false + } + as.rangePermCache[userName] = perms + } + + if len(rangeEnd) == 0 { + return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp) +} + +func (as *authStore) clearCachedPerm() { + as.rangePermCache = make(map[string]*unifiedRangePermissions) +} + +// 清除缓存中的全新信息, 之后重新生成 +func (as *authStore) invalidateCachedPerm(userName string) { + delete(as.rangePermCache, userName) +} + +type unifiedRangePermissions struct { + readPerms adt.IntervalTree + writePerms adt.IntervalTree +} diff --git a/server/auth/simple_token.go b/etcd/auth/simple_token.go similarity index 91% rename from server/auth/simple_token.go rename to etcd/auth/simple_token.go index fb9485b4ff9..2788ddbc2a2 100644 --- a/server/auth/simple_token.go +++ b/etcd/auth/simple_token.go @@ -20,7 +20,6 @@ package auth import ( "context" "crypto/rand" - "errors" "fmt" "math/big" "strconv" @@ -75,7 +74,7 @@ func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { } func (tm *simpleTokenTTLKeeper) run() { - tokenTicker := time.NewTicker(simpleTokenTTLResolution) + tokenTicker := time.NewTicker(simpleTokenTTLResolution) // 1s defer func() { tokenTicker.Stop() close(tm.donec) @@ -89,6 +88,7 @@ func (tm *simpleTokenTTLKeeper) run() { if nowtime.After(tokenendtime) { tm.deleteTokenFunc(t) delete(tm.tokens, t) + // 不过你要注意的是,Simple Token 字符串本身并未含任何有价值信息,因此 client 无法及时、准确获取到 Token 过期时间.所以 client 不容易提前去规避因 Token 失效导致的请求报错. } } tm.mu.Unlock() @@ -157,11 +157,6 @@ func (t *tokenSimple) invalidateUser(username string) { } func (t *tokenSimple) enable() { - t.simpleTokensMu.Lock() - defer t.simpleTokensMu.Unlock() - if t.simpleTokenKeeper != nil { // already enabled - return - } if t.simpleTokenTTL <= 0 { t.simpleTokenTTL = simpleTokenTTLDefault } @@ -184,7 +179,7 @@ func (t *tokenSimple) enable() { mu: &t.simpleTokensMu, simpleTokenTTL: t.simpleTokenTTL, } - go t.simpleTokenKeeper.run() + go t.simpleTokenKeeper.run() // 定时检查你的 Token 是否过期,若过期则从 map 数据结构中删除此 Token. } func (t *tokenSimple) disable() { @@ -213,11 +208,7 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) ( func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { // rev isn't used in simple token, it is only used in JWT - var index uint64 - var ok bool - if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok { - return "", errors.New("failed to assign") - } + index := ctx.Value(AuthenticateParamIndex{}).(uint64) simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string) token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index) t.assignSimpleTokenToUser(username, token) @@ -236,7 +227,7 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool } select { - case <-t.indexWaiter(index): + case <-t.indexWaiter(uint64(index)): return true case <-ctx.Done(): } diff --git a/etcd/auth/store.go b/etcd/auth/store.go new file mode 100644 index 00000000000..54c67fba2c3 --- /dev/null +++ b/etcd/auth/store.go @@ -0,0 +1,1229 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/authpb" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +var ( + enableFlagKey = []byte("authEnabled") + authEnabled = []byte{1} + authDisabled = []byte{0} + + revisionKey = []byte("authRevision") // 鉴权版本号 + + ErrRootUserNotExist = errors.New("auth: root用户不存在") + ErrRootRoleNotExist = errors.New("auth: root用户没有root角色") + ErrUserAlreadyExist = errors.New("auth: 用户已存在") + ErrUserEmpty = errors.New("auth: 用户名是空的") + ErrUserNotFound = errors.New("auth: 没有找到该用户") + ErrRoleAlreadyExist = errors.New("auth: 角色已存在") + ErrRoleNotFound = errors.New("auth: 角色不存在") + ErrRoleEmpty = errors.New("auth: 角色名不能为空") + ErrPermissionNotGiven = errors.New("auth: permission not given") + ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") + ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user") + ErrPermissionDenied = errors.New("auth: permission denied") + ErrRoleNotGranted = errors.New("auth: role is not granted to the user") + ErrPermissionNotGranted = errors.New("auth: 角色没有权限") + ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") + ErrAuthOldRevision = errors.New("auth: 请求头里的修订版本是旧的") + ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") + ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method") + ErrMissingKey = errors.New("auth: missing key data") + ErrKeyMismatch = errors.New("auth: public and private keys don't match") + ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key") +) + +const ( + rootUser = "root" + rootRole = "root" + tokenTypeSimple = "simple" + tokenTypeJWT = "jwt" + revBytesLen = 8 +) + +type AuthInfo struct { + Username string + Revision uint64 +} + +// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate() +type AuthenticateParamIndex struct{} + +// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate() +type AuthenticateParamSimpleTokenPrefix struct{} + +type AuthStore interface { + AuthEnable() error + AuthDisable() + IsAuthEnabled() bool + Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) + // Recover recovers the state of auth store from the given backend + Recover(b backend.Backend) + UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + + RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + + RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) + IsPutPermitted(authInfo *AuthInfo, key []byte) error + IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error // 检查用户的范围权限 + IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error // + IsAdminPermitted(authInfo *AuthInfo) error // + GenTokenPrefix() (string, error) // 在简单令牌的情况下生成一个随机字符串,在JWT的情况下,它生成一个空字符串 + Revision() uint64 // + CheckPassword(username, password string) (uint64, error) // 检查给定的一对用户名和密码是否正确 + Close() error // 清理AuthStore + AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) // 从grpc上下文获取认证信息 + AuthInfoFromTLS(ctx context.Context) *AuthInfo // 从grpc证书上下文获取认证信息 + WithRoot(ctx context.Context) context.Context // 生成并安装可作为根凭据使用的令牌 + UserHasRole(user, role string) bool // 检查用户是否有该角色 + BcryptCost() int // 获取加密认证密码的散列强度 +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + invalidateUser(string) + genTokenPrefix() (string, error) +} + +type authStore struct { + revision uint64 // 鉴权版本号 + lg *zap.Logger // + be backend.Backend // + enabled bool // 是否开启认证 + enabledMu sync.RWMutex // + rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions + tokenProvider TokenProvider // TODO + bcryptCost int // the algorithm cost / strength for hashing auth passwords +} + +func (as *authStore) AuthEnable() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if as.enabled { + as.lg.Info("authentication is already enabled; ignored auth enable request") + return nil + } + b := as.be + tx := b.BatchTx() + tx.Lock() + defer func() { + tx.Unlock() + b.ForceCommit() + }() + + u := getUser(as.lg, tx, rootUser) + if u == nil { + return ErrRootUserNotExist + } + + if !hasRootRole(u) { + return ErrRootRoleNotExist + } + + tx.UnsafePut(buckets.Auth, enableFlagKey, authEnabled) + + as.enabled = true + as.tokenProvider.enable() + + as.rangePermCache = make(map[string]*unifiedRangePermissions) + + as.setRevision(getRevision(tx)) + + as.lg.Info("enabled authentication") + return nil +} + +func (as *authStore) AuthDisable() { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return + } + b := as.be + tx := b.BatchTx() + tx.Lock() + tx.UnsafePut(buckets.Auth, enableFlagKey, authDisabled) + as.commitRevision(tx) + tx.Unlock() + b.ForceCommit() + + as.enabled = false + as.tokenProvider.disable() + + as.lg.Info("disabled authentication") +} + +func (as *authStore) Close() error { + as.enabledMu.Lock() + defer as.enabledMu.Unlock() + if !as.enabled { + return nil + } + as.tokenProvider.disable() + return nil +} + +func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { + if !as.IsAuthEnabled() { + return nil, ErrAuthNotEnabled + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, username) + if user == nil { + return nil, ErrAuthFailed + } + + if user.Options != nil && user.Options.NoPassword { + return nil, ErrAuthFailed + } + + // 密码在API已经校验了,因此在这不用再校验 + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } + + as.lg.Debug("用户认证", zap.String("user-name", username), zap.String("token", token)) + return &pb.AuthenticateResponse{Token: token}, nil +} + +func (as *authStore) Recover(be backend.Backend) { + enabled := false + as.be = be + tx := be.BatchTx() + tx.Lock() + _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as.setRevision(getRevision(tx)) + + tx.Unlock() + + as.enabledMu.Lock() + as.enabled = enabled + as.enabledMu.Unlock() +} + +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) +} + +func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { + // 这个函数的开销很大,所以我们需要一个缓存机制 + if !as.IsAuthEnabled() { + return nil + } + + // only gets rev == 0 when passed AuthInfo{}; no user given + if revision == 0 { + return ErrUserEmpty + } + rev := as.Revision() + if revision < rev { + as.lg.Warn("请求认证的版本小于当前节点认证的版本", + zap.Uint64("current node auth revision", rev), + zap.Uint64("request auth revision", revision), + zap.ByteString("request key", key), + zap.Error(ErrAuthOldRevision)) + return ErrAuthOldRevision + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, userName) + if user == nil { + as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName)) + return ErrPermissionDenied + } + + // root role should have permission on all ranges + if hasRootRole(user) { + return nil + } + + if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { + return nil + } + + return ErrPermissionDenied +} + +func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) +} + +func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) // '' ,0 ,health,nil +} + +func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { + return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) +} + +func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { + if !as.IsAuthEnabled() { + return nil + } + if authInfo == nil || authInfo.Username == "" { + return ErrUserEmpty + } + + tx := as.be.BatchTx() + tx.Lock() + u := getUser(as.lg, tx, authInfo.Username) + tx.Unlock() + + if u == nil { + return ErrUserNotFound + } + + if !hasRootRole(u) { + return ErrPermissionDenied + } + + return nil +} + +// IsAuthEnabled 是否启用认证 +func (as *authStore) IsAuthEnabled() bool { + as.enabledMu.RLock() + defer as.enabledMu.RUnlock() + return as.enabled +} + +// NewAuthStore creates a new AuthStore. +func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore { + if lg == nil { + lg = zap.NewNop() + } + + if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost { + lg.Warn( + "使用默认的加密强度替换提供的加密强度", + zap.Int("min-cost", bcrypt.MinCost), + zap.Int("max-cost", bcrypt.MaxCost), + zap.Int("default-cost", bcrypt.DefaultCost), + zap.Int("given-cost", bcryptCost), + ) + bcryptCost = bcrypt.DefaultCost + } + + tx := be.BatchTx() + tx.Lock() + + tx.UnsafeCreateBucket(buckets.Auth) + tx.UnsafeCreateBucket(buckets.AuthUsers) + tx.UnsafeCreateBucket(buckets.AuthRoles) + + enabled := false + _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + enabled = true + } + } + + as := &authStore{ + revision: getRevision(tx), + lg: lg, + be: be, + enabled: enabled, + rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, + bcryptCost: bcryptCost, + } + + if enabled { + as.tokenProvider.enable() + } + + if as.Revision() == 0 { + as.commitRevision(tx) + } + + tx.Unlock() + be.ForceCommit() + + return as +} + +func hasRootRole(u *authpb.User) bool { + // u.Roles is sorted in UserGrantRole(), so we can use binary search. + idx := sort.SearchStrings(u.Roles, rootRole) + return idx != len(u.Roles) && u.Roles[idx] == rootRole +} + +// ok 持久化,鉴权版本号 +func (as *authStore) commitRevision(tx backend.BatchTx) { + atomic.AddUint64(&as.revision, 1) + revBytes := make([]byte, revBytesLen) + binary.BigEndian.PutUint64(revBytes, as.Revision()) + tx.UnsafePut(buckets.Auth, revisionKey, revBytes) +} + +// ok +func getRevision(tx backend.BatchTx) uint64 { + _, vs := tx.UnsafeRange(buckets.Auth, revisionKey, nil, 0) + if len(vs) != 1 { + return 0 + } + return binary.BigEndian.Uint64(vs[0]) +} + +// ok + +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) +} + +// Revision 返回鉴权版本号 +func (as *authStore) Revision() uint64 { + return atomic.LoadUint64(&as.revision) +} + +func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil + } + + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + if len(chains) < 1 { + continue + } + ai = &AuthInfo{ + Username: chains[0].Subject.CommonName, + Revision: as.Revision(), + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil + } + + // gRPC-gateway proxy request to etcd etcd includes Grpcgateway-Accept + // header. The proxy uses etcd client etcd certificate. If the certificate + // has a CommonName we should never use this for authentication. + if gw := md["grpcgateway-accept"]; len(gw) > 0 { + as.lg.Warn( + "ignoring common name in gRPC-gateway proxy request", + zap.String("common-name", ai.Username), + zap.String("user-name", ai.Username), + zap.Uint64("revision", ai.Revision), + ) + return nil + } + as.lg.Debug( + "found command name", + zap.String("common-name", ai.Username), + zap.String("user-name", ai.Username), + zap.Uint64("revision", ai.Revision), + ) + break + } + return ai +} + +func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, nil + } + + // TODO(mitake|hexfusion) review unifying key names + ts, ok := md[rpctypes.TokenFieldNameGRPC] + if !ok { + ts, ok = md[rpctypes.TokenFieldNameSwagger] + } + if !ok { + return nil, nil + } + + token := ts[0] + authInfo, uok := as.authInfoFromToken(ctx, token) + if !uok { + as.lg.Warn("invalid auth token", zap.String("token", token)) + return nil, ErrInvalidAuthToken + } + + return authInfo, nil +} + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + if lg != nil { + lg.Error("invalid token option", zap.String("option", optstr)) + } + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + if lg != nil { + lg.Error( + "invalid token option", + zap.String("option", optstr), + zap.String("duplicate-parameter", pair[0]), + ) + } + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil +} + +func NewTokenProvider(lg *zap.Logger, tokenOpts string, indexWaiter func(uint64) <-chan struct{}, TokenTTL time.Duration) (TokenProvider, error) { // token提供商 + tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts) // 认证格式 simple、jwt + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case tokenTypeSimple: + if lg != nil { + lg.Warn("简单令牌没有经过加密签名") + } + return newTokenProviderSimple(lg, indexWaiter, TokenTTL), nil + + case tokenTypeJWT: + return newTokenProviderJWT(lg, typeSpecificOpts) + + case "": + return newTokenProviderNop() + + default: + if lg != nil { + lg.Warn( + "unknown token type", + zap.String("type", tokenType), + zap.Error(ErrInvalidAuthOpts), + ) + } + return nil, ErrInvalidAuthOpts + } +} + +func (as *authStore) WithRoot(ctx context.Context) context.Context { + if !as.IsAuthEnabled() { + return ctx + } + + var ctxForAssign context.Context + if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil { + ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0)) + prefix, err := ts.genTokenPrefix() + if err != nil { + as.lg.Error( + "failed to generate prefix of internally used token", + zap.Error(err), + ) + return ctx + } + ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix) + } else { + ctxForAssign = ctx + } + + token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision()) + if err != nil { + // this must not happen + as.lg.Error( + "failed to assign token for lease revoking", + zap.Error(err), + ) + return ctx + } + + mdMap := map[string]string{ + rpctypes.TokenFieldNameGRPC: token, + } + tokenMD := metadata.New(mdMap) + + // use "mdIncomingKey{}" since it's called from local etcdserver + return metadata.NewIncomingContext(ctx, tokenMD) +} + +func (as *authStore) BcryptCost() int { + return as.bcryptCost +} + +// ---------------------------------------------------------------------------------------------------v + +// RoleRevokePermission ok +func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(as.lg, tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + updatedRole := &authpb.Role{ + Name: role.Name, + } + + for _, perm := range role.KeyPermission { + if !strings.EqualFold(perm.Key, r.Key) || !strings.EqualFold(perm.RangeEnd, r.RangeEnd) { + updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) + } + } + + if len(role.KeyPermission) == len(updatedRole.KeyPermission) { + return nil, ErrPermissionNotGranted + } + + putRole(as.lg, tx, updatedRole) + + as.clearCachedPerm() + as.commitRevision(tx) + + as.lg.Info("撤销对range的权限", zap.String("role-name", r.Role), zap.String("key", r.Key), zap.String("range-end", r.RangeEnd)) + return &pb.AuthRoleRevokePermissionResponse{}, nil +} + +// RoleGrantPermission ok +func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + if r.Perm == nil { + return nil, ErrPermissionNotGiven + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(as.lg, tx, r.Name) + if role == nil { + return nil, ErrRoleNotFound + } + // 在已有的权限中, 寻找第一个与key相等的,没找到的话 idx =len(role.KeyPermission) + idx := sort.Search(len(role.KeyPermission), func(i int) bool { + // a,a 0 + // a b -1 + // b a 1 + // a,b,c,d,e + // c + return strings.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0 + }) + + if idx < len(role.KeyPermission) && strings.EqualFold(role.KeyPermission[idx].Key, r.Perm.Key) && strings.EqualFold(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { + // 更新存在的权限 + role.KeyPermission[idx].PermType = r.Perm.PermType + } else { + newPerm := &authpb.Permission{ + Key: r.Perm.Key, // / + RangeEnd: r.Perm.RangeEnd, // "" + PermType: r.Perm.PermType, // readwrite + } + + role.KeyPermission = append(role.KeyPermission, newPerm) + // 按照key 升序排列 + sort.Sort(permSlice(role.KeyPermission)) + } + + putRole(as.lg, tx, role) + // 目前,单个角色更新会使每个缓存失效,应该进行优化. + as.clearCachedPerm() + + as.commitRevision(tx) + + as.lg.Info("授予/更新用户权限", zap.String("user-name", r.Name), zap.String("permission-name", authpb.PermissionTypeName[int32(r.Perm.PermType)])) + return &pb.AuthRoleGrantPermissionResponse{}, nil +} + +// RoleList ok +func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + roles := getAllRoles(as.lg, tx) + tx.Unlock() + + resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} + for i := range roles { + resp.Roles[i] = string(roles[i].Name) + } + return resp, nil +} + +// RoleDelete OK +func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + if as.enabled && r.Role == rootRole { + as.lg.Error("不能删除 'root' 角色", zap.String("role-name", r.Role)) + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(as.lg, tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + + delRole(tx, r.Role) + + users := getAllUsers(as.lg, tx) // 获取所有用户 + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + Options: user.Options, + } + for _, role := range user.Roles { + if role != r.Role { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + putUser(as.lg, tx, updatedUser) + as.invalidateCachedPerm(user.Name) + } + + as.commitRevision(tx) + + as.lg.Info("删除了一个角色", zap.String("role-name", r.Role)) + return &pb.AuthRoleDeleteResponse{}, nil +} + +// RoleAdd OK +func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + if len(r.Name) == 0 { + return nil, ErrRoleEmpty + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + role := getRole(as.lg, tx, r.Name) + if role != nil { + return nil, ErrRoleAlreadyExist + } + + newRole := &authpb.Role{ + Name: r.Name, + } + + putRole(as.lg, tx, newRole) + + as.commitRevision(tx) + + as.lg.Info("创建了一个角色", zap.String("role-name", r.Name)) + return &pb.AuthRoleAddResponse{}, nil +} + +// RoleGet ok +func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + var resp pb.AuthRoleGetResponse + + role := getRole(as.lg, tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + resp.Perm = append(resp.Perm, role.KeyPermission...) + return &resp, nil +} + +func (as *authStore) UserHasRole(user, role string) bool { + tx := as.be.BatchTx() + tx.Lock() + u := getUser(as.lg, tx, user) + tx.Unlock() + + if u == nil { + as.lg.Warn("'has-role'请求不存在的用户", zap.String("user-name", user), zap.String("role-name", role)) + return false + } + + for _, r := range u.Roles { + if role == r { + return true + } + } + return false +} + +func getRole(lg *zap.Logger, tx backend.BatchTx, rolename string) *authpb.Role { + _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte(rolename), nil, 0) + if len(vs) == 0 { + return nil + } + + role := &authpb.Role{} + err := role.Unmarshal(vs[0]) + if err != nil { + lg.Panic("反序列化失败 'authpb.Role'", zap.Error(err)) + } + return role +} + +func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role { + _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + roles := make([]*authpb.Role, len(vs)) + for i := range vs { + role := &authpb.Role{} + err := role.Unmarshal(vs[i]) + if err != nil { + lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } + roles[i] = role + } + return roles +} + +// ok +func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) { + b, err := role.Marshal() + if err != nil { + lg.Panic("序列化失败'authpb.Role'", zap.String("role-name", role.Name), zap.Error(err)) + } + + tx.UnsafePut(buckets.AuthRoles, []byte(role.Name), b) +} + +// ok +func delRole(tx backend.BatchTx, rolename string) { + tx.UnsafeDelete(buckets.AuthRoles, []byte(rolename)) +} + +type permSlice []*authpb.Permission + +func (perms permSlice) Len() int { + return len(perms) +} + +func (perms permSlice) Less(i, j int) bool { + // a,a 0 + // a b -1 + // b a 1 + + return strings.Compare(perms[i].Key, perms[j].Key) < 0 +} + +func (perms permSlice) Swap(i, j int) { + perms[i], perms[j] = perms[j], perms[i] +} + +// ---------------------------------------------------------------------------------------------------v + +// 生成密码 +func (as *authStore) selectPassword(password string, hashedPassword string) ([]byte, error) { + if password != "" && hashedPassword == "" { + // 此路径用于处理由版本大于3.5的etcd创建的日志条目 + return bcrypt.GenerateFromPassword([]byte(password), as.bcryptCost) + } + return base64.StdEncoding.DecodeString(hashedPassword) +} + +func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.IsAuthEnabled() { + return 0, ErrAuthNotEnabled + } + + var user *authpb.User + // CompareHashAndPassword is very expensive, so we use closures + // to avoid putting it in the critical section of the tx lock. + revision, err := func() (uint64, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user = getUser(as.lg, tx, username) + if user == nil { + return 0, ErrAuthFailed + } + + if user.Options != nil && user.Options.NoPassword { + return 0, ErrNoPasswordUser + } + + return getRevision(tx), nil + }() + if err != nil { + return 0, err + } + + if bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) != nil { + as.lg.Info("invalid password", zap.String("user-name", username)) + return 0, ErrAuthFailed + } + return revision, nil +} + +func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + if len(r.Name) == 0 { + return nil, ErrUserEmpty + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, r.Name) + if user != nil { + return nil, ErrUserAlreadyExist + } + + options := r.Options + if options == nil { + options = &authpb.UserAddOptions{ + NoPassword: false, + } + } + + var password []byte + var err error + + if !options.NoPassword { + password, err = as.selectPassword(r.Password, r.HashedPassword) + if err != nil { + return nil, ErrNoPasswordUser + } + } + + newUser := &authpb.User{ + Name: r.Name, + Password: string(password), + Options: options, + } + + putUser(as.lg, tx, newUser) + + as.commitRevision(tx) + + as.lg.Info("添加一个用户", zap.String("user-name", r.Name)) + return &pb.AuthUserAddResponse{}, nil +} + +func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && r.Name == rootUser { + as.lg.Error("不能删除 'root' 用户", zap.String("user-name", r.Name)) + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + delUser(tx, r.Name) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + as.lg.Info( + "删除了一个用户", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + return &pb.AuthUserDeleteResponse{}, nil +} + +func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + var password []byte + var err error + + if !user.Options.NoPassword { + password, err = as.selectPassword(r.Password, r.HashedPassword) + if err != nil { + return nil, ErrNoPasswordUser + } + } + + updatedUser := &authpb.User{ + Name: r.Name, + Roles: user.Roles, + Password: string(password), + Options: user.Options, + } + + putUser(as.lg, tx, updatedUser) + + as.commitRevision(tx) + + as.invalidateCachedPerm(r.Name) + as.tokenProvider.invalidateUser(r.Name) + + as.lg.Info( + "更该用户密码", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + return &pb.AuthUserChangePasswordResponse{}, nil +} + +func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, r.User) + if user == nil { + return nil, ErrUserNotFound + } + + if r.Role != rootRole { + role := getRole(as.lg, tx, r.Role) + if role == nil { + return nil, ErrRoleNotFound + } + } + + idx := sort.SearchStrings(user.Roles, r.Role) + if idx < len(user.Roles) && user.Roles[idx] == r.Role { + as.lg.Warn( + "ignored grant role request to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("duplicate-role-name", r.Role), + ) + return &pb.AuthUserGrantRoleResponse{}, nil + } + + user.Roles = append(user.Roles, r.Role) + sort.Strings(user.Roles) + + putUser(as.lg, tx, user) + + as.invalidateCachedPerm(r.User) + + as.commitRevision(tx) + + as.lg.Info( + "granted a role to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("added-role-name", r.Role), + ) + return &pb.AuthUserGrantRoleResponse{}, nil +} + +func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + user := getUser(as.lg, tx, r.Name) + tx.Unlock() + + if user == nil { + return nil, ErrUserNotFound + } + + var resp pb.AuthUserGetResponse + resp.Roles = append(resp.Roles, user.Roles...) + return &resp, nil +} + +func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + tx := as.be.BatchTx() + tx.Lock() + users := getAllUsers(as.lg, tx) + tx.Unlock() + + resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} + for i := range users { + resp.Users[i] = users[i].Name + } + return resp, nil +} + +func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && r.Name == rootUser && r.Role == rootRole { + as.lg.Error( + "'root'用户 不能移除 'root' 角色", + zap.String("user-name", r.Name), + zap.String("role-name", r.Role), + ) + return nil, ErrInvalidAuthMgmt + } + + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + user := getUser(as.lg, tx, r.Name) + if user == nil { + return nil, ErrUserNotFound + } + + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + Options: user.Options, + } + + for _, role := range user.Roles { + if role != r.Role { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + return nil, ErrRoleNotGranted + } + + putUser(as.lg, tx, updatedUser) + + as.invalidateCachedPerm(r.Name) + + as.commitRevision(tx) + + as.lg.Info( + "移除用户角色", + zap.String("user-name", r.Name), + zap.Strings("old-user-roles", user.Roles), + zap.Strings("new-user-roles", updatedUser.Roles), + zap.String("revoked-role-name", r.Role), + ) + return &pb.AuthUserRevokeRoleResponse{}, nil +} + +func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User { + _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte(username), nil, 0) + if len(vs) == 0 { + return nil + } + + user := &authpb.User{} + err := user.Unmarshal(vs[0]) + if err != nil { + lg.Panic( + "failed to unmarshal 'authpb.User'", + zap.String("user-name", username), + zap.Error(err), + ) + } + return user +} + +// 获取所有用户 +func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User { + _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + users := make([]*authpb.User, len(vs)) + for i := range vs { + user := &authpb.User{} + err := user.Unmarshal(vs[i]) + if err != nil { + lg.Panic("不能反序列化 'authpb.User'", zap.Error(err)) + } + users[i] = user + } + return users +} + +// OK +func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) { + b, err := user.Marshal() + if err != nil { + lg.Panic("序列化失败 'authpb.User'", zap.Error(err)) + } + tx.UnsafePut(buckets.AuthUsers, []byte(user.Name), b) +} + +func delUser(tx backend.BatchTx, username string) { + tx.UnsafeDelete(buckets.AuthUsers, []byte(username)) +} diff --git a/etcd/config/over_config.go b/etcd/config/over_config.go new file mode 100644 index 00000000000..cdc26c1e9bb --- /dev/null +++ b/etcd/config/over_config.go @@ -0,0 +1,287 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/datadir" + "github.com/ls-2018/etcd_cn/pkg/netutil" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +// ServerConfig 持有从命令行或发现中获取的etcd的配置. +type ServerConfig struct { + Name string + DiscoveryURL string // 节点发现 + DiscoveryProxy string // discovery代理 + ClientURLs types.URLs + PeerURLs types.URLs + DataDir string + DedicatedWALDir string // 配置将使etcd把WAL写到WALDir 而不是dataDir/member/wal. + SnapshotCount uint64 // 触发一次磁盘快照的提交事务的次数 + SnapshotCatchUpEntries uint64 // 是slow follower在raft存储条目落后追赶的条目数量.我们希望follower与leader有一毫秒级的延迟.最大的吞吐量是10K左右.保持5K的条目就足以帮助follower赶上. + MaxSnapFiles uint + MaxWALFiles uint + BackendBatchInterval time.Duration // 提交后端事务前的最长时间 + BackendBatchLimit int // 提交后端事务前的最大操作量 + BackendFreelistType bolt.FreelistType // boltdb存储的类型 + InitialPeerURLsMap types.URLsMap // 节点 --- 【 通信地址】可能绑定了多块网卡 + InitialClusterToken string + NewCluster bool + PeerTLSInfo transport.TLSInfo + CORS map[string]struct{} + HostWhitelist map[string]struct{} // 列出了客户端请求中可接受的主机名.如果etcd是不安全的(没有TLS),etcd只接受其Host头值存在于此白名单的请求. + + TickMs uint // tick计时器触发间隔 + ElectionTicks int // 返回选举权检查对应多少次tick触发次数 + + // InitialElectionTickAdvance 是否提前初始化选举时钟启动,以便更快的选举 + InitialElectionTickAdvance bool + + BootstrapTimeout time.Duration // 引导超时 + + AutoCompactionRetention time.Duration + AutoCompactionMode string + + CompactionBatchLimit int + QuotaBackendBytes int64 // bolt.db 存储上限 【字节】 + MaxTxnOps uint // 事务中允许的最大操作数 + + // MaxRequestBytes raft发送的最大数据量 + MaxRequestBytes uint + + WarningApplyDuration time.Duration + + StrictReconfigCheck bool // 严格配置变更检查 + ClientCertAuthEnabled bool // 验证客户端证书是不是服务器CA签署的 + AuthToken string // 认证格式 simple、jwt + BcryptCost uint // 为散列身份验证密码指定bcrypt算法的成本/强度默认10 + TokenTTL uint + + InitialCorruptCheck bool // 数据毁坏检测功能,运行之后,在开始服务之前 + CorruptCheckTime time.Duration + + PreVote bool // PreVote 是否启用PreVote + + // SocketOpts are socket options passed to listener config. + SocketOpts transport.SocketOpts + + // Logger logs etcd-side operations. + Logger *zap.Logger + + ForceNewCluster bool + + EnableLeaseCheckpoint bool // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置. + // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints. + LeaseCheckpointInterval time.Duration + // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. + LeaseCheckpointPersist bool + + EnableGRPCGateway bool // 启用grpc网关,将 http 转换成 grpc / true + + // ExperimentalEnableDistributedTracing 使用OpenTelemetry协议实现分布式跟踪. + ExperimentalEnableDistributedTracing bool // 默认false + // ExperimentalTracerOptions are options for OpenTelemetry gRPC interceptor. + ExperimentalTracerOptions []otelgrpc.Option + + WatchProgressNotifyInterval time.Duration + + // UnsafeNoFsync 禁用所有fsync的使用.设置这个是不安全的,会导致数据丢失. + UnsafeNoFsync bool `json:"unsafe-no-fsync"` + + DowngradeCheckTime time.Duration + + // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages. + // The setting improves etcd tail latency in environments were: + // - memory pressure might lead to swapping pages to disk + // - disk latency might be unstable + // Currently all etcd memory gets mlocked, but in future the flag can + // be refined to mlock in-use area of bbolt only. + ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"` + + // ExperimentalTxnModeWriteWithSharedBuffer enable write transaction to use + // a shared buffer in its readonly check operations. + ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"` + + // ExperimentalBootstrapDefragThresholdMegabytes 是指在启动过程中 etcd考虑运行碎片整理所需释放的最小兆字节数.需要设置为非零值才能生效. + ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + + // V2Deprecation defines a phase of v2store deprecation process. + V2Deprecation V2DeprecationEnum `json:"v2-deprecation"` +} + +// VerifyBootstrap 检查初始配置的引导情况,并对不应该发生的事情返回一个错误. +func (c *ServerConfig) VerifyBootstrap() error { + if err := c.hasLocalMember(); err != nil { // initial-cluster 集群至少包含本机节点 + return err + } + // 主要就是验证 这两个参数 --initial-advertise-peer-urls" and "--initial-cluster + if err := c.advertiseMatchesCluster(); err != nil { + return err + } + // 检查所有ip:port 有没有重复的,有就返回 true + if CheckDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("初始集群有重复的网址%s", c.InitialPeerURLsMap) + } + if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { + return fmt.Errorf("初始集群未设置,没有发现discovery的URL") + } + return nil +} + +// VerifyJoinExisting 检查加入现有集群的初始配置,并对不应该发生的事情返回一个错误. +func (c *ServerConfig) VerifyJoinExisting() error { + if err := c.hasLocalMember(); err != nil { + return err + } + if CheckDuplicateURL(c.InitialPeerURLsMap) { + return fmt.Errorf("初始集群 %s 有重复的地址", c.InitialPeerURLsMap) + } + if c.DiscoveryURL != "" { + return fmt.Errorf("discovery URL 不应该设置,当加入一个存在的初始集群") + } + return nil +} + +// hasLocalMember 集群至少包含本机节点 +func (c *ServerConfig) hasLocalMember() error { + if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { + return fmt.Errorf("不能再集群配置中发现本机 %q", c.Name) + } + return nil +} + +// advertiseMatchesCluster 确认peer URL与集群cluster peer中的URL一致. +func (c *ServerConfig) advertiseMatchesCluster() error { + urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() + urls.Sort() + sort.Strings(apurls) + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice()) + if ok { + return nil + } + + initMap, apMap := make(map[string]struct{}), make(map[string]struct{}) + for _, url := range c.PeerURLs { + apMap[url.String()] = struct{}{} + } + for _, url := range c.InitialPeerURLsMap[c.Name] { + initMap[url.String()] = struct{}{} + } + + var missing []string + for url := range initMap { + if _, ok := apMap[url]; !ok { + missing = append(missing, url) + } + } + if len(missing) > 0 { + for i := range missing { + missing[i] = c.Name + "=" + missing[i] + } + mstr := strings.Join(missing, ",") + apStr := strings.Join(apurls, ",") + return fmt.Errorf("--initial-cluster 有 %s但丢失了--initial-advertise-peer-urls=%s (%v)", mstr, apStr, err) + } + + for url := range apMap { + if _, ok := initMap[url]; !ok { + missing = append(missing, url) + } + } + if len(missing) > 0 { + mstr := strings.Join(missing, ",") + umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) + return fmt.Errorf("--initial-advertise-peer-urls 有 %s但丢失了--initial-cluster=%s", mstr, umap.String()) + } + + // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed + apStr := strings.Join(apurls, ",") + umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) + return fmt.Errorf("无法解决 %s 匹配--initial-cluster=%s 的问题(%v)", apStr, umap.String(), err) +} + +// MemberDir default.etcd/member +func (c *ServerConfig) MemberDir() string { + return datadir.ToMemberDir(c.DataDir) +} + +// WALDir default.etcd/member/wal +func (c *ServerConfig) WALDir() string { + if c.DedicatedWALDir != "" { // "" + return c.DedicatedWALDir + } + return datadir.ToWalDir(c.DataDir) +} + +// SnapDir default.etcd/member/snap +func (c *ServerConfig) SnapDir() string { + return datadir.ToSnapDir(c.DataDir) +} + +func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } + +// ReqTimeout 返回请求完成的超时时间 +func (c *ServerConfig) ReqTimeout() time.Duration { + // 5用于队列等待,计算和磁盘IO延迟+ 2倍选举超时 + return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond +} + +// ElectionTimeout 选举超时 +func (c *ServerConfig) ElectionTimeout() time.Duration { + return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond +} + +func (c *ServerConfig) PeerDialTimeout() time.Duration { + return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond +} + +// CheckDuplicateURL 检查所有ip:port 有没有重复的,有就返回 true +func CheckDuplicateURL(urlsmap types.URLsMap) bool { + um := make(map[string]bool) + for _, urls := range urlsmap { + for _, url := range urls { + u := url.String() + if um[u] { + return true + } + um[u] = true + } + } + return false +} + +// BootstrapTimeoutEffective 有效的Bootstrap超时 +func (c *ServerConfig) BootstrapTimeoutEffective() time.Duration { + if c.BootstrapTimeout != 0 { + return c.BootstrapTimeout + } + return time.Second +} + +// BackendPath default.etcd/member/snap/db +func (c *ServerConfig) BackendPath() string { return datadir.ToBackendFileName(c.DataDir) } diff --git a/server/config/v2_deprecation.go b/etcd/config/v2_deprecation.go similarity index 93% rename from server/config/v2_deprecation.go rename to etcd/config/v2_deprecation.go index 862c3bb9343..828bd9a8f43 100644 --- a/server/config/v2_deprecation.go +++ b/etcd/config/v2_deprecation.go @@ -17,7 +17,7 @@ package config type V2DeprecationEnum string const ( - // No longer supported in v3.6 + // Default in v3.5. Issues a warning if v2store have meaningful content. V2_DEPR_0_NOT_YET = V2DeprecationEnum("not-yet") // Default in v3.6. Meaningful v2 state is not allowed. // The V2 files are maintained for v3.5 rollback. @@ -28,7 +28,7 @@ const ( // ability to rollback to etcd v3.5. V2_DEPR_2_GONE = V2DeprecationEnum("gone") - V2_DEPR_DEFAULT = V2_DEPR_1_WRITE_ONLY + V2_DEPR_DEFAULT = V2_DEPR_0_NOT_YET ) func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool { diff --git a/etcd/datadir/over_datadir.go b/etcd/datadir/over_datadir.go new file mode 100644 index 00000000000..4e5e3cb4082 --- /dev/null +++ b/etcd/datadir/over_datadir.go @@ -0,0 +1,41 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datadir + +import "path/filepath" + +const ( + memberDirSegment = "member" + snapDirSegment = "snap" + walDirSegment = "wal" + backendFileSegment = "bolt.db" +) + +func ToBackendFileName(dataDir string) string { + return filepath.Join(ToSnapDir(dataDir), backendFileSegment) // default.etcd/member/snap/db +} + +// ToSnapDir 快照地址 +func ToSnapDir(dataDir string) string { + return filepath.Join(ToMemberDir(dataDir), snapDirSegment) // default.etcd/member/snap +} + +func ToWalDir(dataDir string) string { + return filepath.Join(ToMemberDir(dataDir), walDirSegment) // default.etcd/member/wal +} + +func ToMemberDir(dataDir string) string { + return filepath.Join(dataDir, memberDirSegment) // default.etcd/member +} diff --git a/etcd/embed/config.go b/etcd/embed/config.go new file mode 100644 index 00000000000..559ea95a711 --- /dev/null +++ b/etcd/embed/config.go @@ -0,0 +1,935 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3compactor" + "github.com/ls-2018/etcd_cn/pkg/flags" + "github.com/ls-2018/etcd_cn/pkg/netutil" + + bolt "go.etcd.io/bbolt" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" + "google.golang.org/grpc" + "sigs.k8s.io/yaml" +) + +const ( + // 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集. + ClusterStateFlagNew = "new" + ClusterStateFlagExisting = "existing" + + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxTxnOps = uint(128) + DefaultWarningApplyDuration = 100 * time.Millisecond + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second + DefaultDowngradeCheckTime = 5 * time.Second + + DefaultListenPeerURLs = "http://localhost:2380" + DefaultListenClientURLs = "http://localhost:2379" + + DefaultLogOutput = "default" + JournalLogOutput = "systemd/journal" + StdErrLogOutput = "stderr" + StdOutLogOutput = "stdout" + + // DefaultLogRotationConfig 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的. + // MaxSize = 100 // MB + // MaxAge = 0 // days (no limit) + // MaxBackups = 0 // no limit + // LocalTime = false // use computers local time, UTC by default + // Compress = false // compress the rotated log in gzip format + DefaultLogRotationConfig = `{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}` + + // ExperimentalDistributedTracingAddress is the default collector address. + ExperimentalDistributedTracingAddress = "localhost:4317" + // ExperimentalDistributedTracingServiceName is the default etcd service name. + ExperimentalDistributedTracingServiceName = "etcd" + + // DefaultStrictReconfigCheck 拒绝可能导致仲裁丢失的重新配置请求 + DefaultStrictReconfigCheck = true + + // maxElectionMs specifies the maximum value of election timeout. + // More details are listed in ../Documentation/tuning.md#time-parameters. + maxElectionMs = 50000 + // backend freelist map type + freelistArrayType = "array" +) + +var ( + ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + + "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") + ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") + ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined") + + DefaultInitialAdvertisePeerURLs = "http://localhost:2380" + DefaultAdvertiseClientURLs = "http://localhost:2379" + + // netutil.GetDefaultHost() + defaultHostname string + defaultHostStatus error + + // indirection for testing + getCluster = srv.GetCluster +) + +var ( + // CompactorModePeriodic + // 周期性压缩 eg. 1h + CompactorModePeriodic = v3compactor.ModePeriodic + + // CompactorModeRevision "AutoCompactionRetention" is "1000", + // 当前版本为6000时,它将日志压缩到5000版本. + // 如果有足够多的日志,这将每5分钟运行一次. + CompactorModeRevision = v3compactor.ModeRevision +) + +func init() { + defaultHostname, defaultHostStatus = netutil.GetDefaultHost() + fmt.Println("defaultHostname", defaultHostname) + fmt.Println("defaultHostStatus", defaultHostStatus) + // defaultHostname 172.17.0.2 + // defaultHostStatus +} + +// Config 保存配置etcd的参数etcd. +type Config struct { + Name string `json:"name"` // 节点的名字 + Dir string `json:"data-dir"` // 数据目录 + // 独立设置wal目录.etcd会将WAL文件写入 --wal-dir而不是--data-dir. 独立的wal路径.有助于避免日志记录和其他IO操作之间的竞争. + WalDir string `json:"wal-dir"` // 专用wal目录的路径. + + SnapshotCount uint64 `json:"snapshot-count"` // 触发一次磁盘快照的提交事务的次数 + + // SnapshotCatchUpEntries 是在压缩raft存储条目后,慢的follower要追赶的条目数.我们预计follower与leader之间有毫秒级的延迟. + // 最大吞吐量大约为10K.保持一个5K的条目就足够帮助follower赶上了. + SnapshotCatchUpEntries uint64 + + MaxSnapFiles uint `json:"max-snapshots"` // 最大快照数 + MaxWalFiles uint `json:"max-wals"` // 要保留的最大wal文件数(0表示不受限制). 5 + + // TickMs是心脏跳动间隔的毫秒数. + // TODO:将tickMs和心跳tick解耦(目前的心跳tick=1) + // 使tick成为集群范围内的配置. + TickMs uint `json:"heartbeat-interval"` // 定时器触发间隔 100ms + ElectionMs uint `json:"election-timeout"` // 选举权检查周期 1s + + // InitialElectionTickAdvance is true, then local member fast-forwards + // election ticks to speed up "initial" leader election trigger. This + // benefits the case of larger election ticks. For instance, cross + // datacenter deployment may require longer election timeout of 10-second. + // If true, local node does not need wait up to 10-second. Instead, + // forwards its election ticks to 8-second, and have only 2-second left + // before leader election. + // + // Major assumptions are that: + // - cluster has no active leader thus advancing ticks enables faster + // leader election, or + // - cluster already has an established leader, and rejoining follower + // is likely to receive heartbeats from the leader after tick advance + // and before election timeout. + // + // However, when network from leader to rejoining follower is congested, + // and the follower does not receive leader heartbeat within left election + // ticks, disruptive election has to happen thus affecting cluster + // availabilities. + // + // Disabling this would slow down initial bootstrap process for cross + // datacenter deployments. Make your own tradeoffs by configuring + // --initial-election-tick-advance at the cost of slow initial bootstrap. + // + // If single-node, it advances ticks regardless. + // + // See https://github.com/etcd-io/etcd/issues/9333 for more detail. + // todo 是否在开机时快进初始选举点.以加快选举速度. + InitialElectionTickAdvance bool `json:"initial-election-tick-advance"` // 是否提前初始化选举时钟启动,以便更快的选举 + + BoltBackendBatchInterval time.Duration `json:"backend-batch-interval"` // BackendBatchInterval是提交后端事务前的最长时间 + BoltBackendBatchLimit int `json:"backend-batch-limit"` // BackendBatchLimit是提交后端事务前的最大操作数 + BackendFreelistType string `json:"backend-bbolt-freelist-type"` // BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型). + QuotaBackendBytes int64 `json:"quota-backend-bytes"` // 当后端大小超过给定配额时(0默认为低空间配额).引发警报. + MaxTxnOps uint `json:"max-txn-ops"` // 事务中允许的最大操作数. + MaxRequestBytes uint `json:"max-request-bytes"` // 服务器将接受的最大客户端请求大小(字节). + + LPUrls []url.URL // 和etcd server 成员之间通信的地址.用于监听其他etcd member的url + LCUrls []url.URL // 这个参数是etcd服务器自己监听时用的,也就是说,监听本机上的哪个网卡,哪个端口 + + APUrls []url.URL // 就是客户端(etcd server 等)跟etcd服务进行交互时请求的url + ACUrls []url.URL // 就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url + + ClientTLSInfo transport.TLSInfo // 与 etcdctl 交互的客户端证书信息 + ClientAutoTLS bool + + PeerTLSInfo transport.TLSInfo + PeerAutoTLS bool // 节点之间使用生成的证书通信;默认false + // SelfSignedCertValidity 客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS, + SelfSignedCertValidity uint `json:"self-signed-cert-validity"` + + // CipherSuites is a list of supported TLS cipher suites between + // client/etcd and peers. If empty, Go auto-populates the list. + // Note that cipher suites are prioritized in the given order. + CipherSuites []string `json:"cipher-suites"` + + ClusterState string `json:"initial-cluster-state"` + DNSCluster string `json:"discovery-srv"` // DNS srv域用于引导群集. + DNSClusterServiceName string `json:"discovery-srv-name"` // 使用DNS引导时查询的DNS srv名称的后缀. + Dproxy string `json:"discovery-proxy"` // 用于流量到发现服务的HTTP代理 + Durl string `json:"discovery"` // 用于引导群集的发现URL. + InitialCluster string `json:"initial-cluster"` // 集群中所有节点的信息. default=http://localhost:2380 + InitialClusterToken string `json:"initial-cluster-token"` // 此配置可使重新创建集群.即使配置和之前一样.也会再次生成新的集群和节点 uuid;否则会导致多个集群之间的冲突.造成未知的错误. + StrictReconfigCheck bool `json:"strict-reconfig-check"` // 严格配置变更检查 + + EnableV2 bool `json:"enable-v2"` + // AutoCompactionMode 基于时间保留模式 时间、修订版本 + AutoCompactionMode string `json:"auto-compaction-mode"` + + //--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000; + //--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口. + + AutoCompactionRetention string `json:"auto-compaction-retention"` + + // GRPCKeepAliveMinTime 客户端在ping服务器之前应等待的最短持续时间间隔. + GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` + + // GRPCKeepAliveInterval 服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用). + GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` + // GRPCKeepAliveTimeout 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s + GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` + + // SocketOpts are socket options passed to listener config. + SocketOpts transport.SocketOpts + + // PreVote 为真.以启用Raft预投票.如果启用.Raft会运行一个额外的选举阶段.以检查它是否会获得足够的票数来赢得选举.从而最大限度地减少干扰. + PreVote bool `json:"pre-vote"` // 默认false + + CORS map[string]struct{} + + // 列出可接受的来自HTTP客户端请求的主机名.客户端来源策略可以防止对不安全的etcd服务器的 "DNS重定向 "攻击. + // 也就是说.任何网站可以简单地创建一个授权的DNS名称.并将DNS指向 "localhost"(或任何其他地址). + // 然后.所有监听 "localhost "的etcd的HTTP端点都变得可以访问.从而容易受到DNS重定向攻击. + HostWhitelist map[string]struct{} + + // UserHandlers 是用来注册用户处理程序的,只用于将etcd嵌入到其他应用程序中. + // map key 是处理程序的路径,你必须确保它不能与etcd的路径冲突. + UserHandlers map[string]http.Handler `json:"-"` + // ServiceRegister is for registering users' gRPC services. A simple usage example: + // cfg := embed.NewConfig() + // cfg.ServerRegister = func(s *grpc.Server) { + // pb.RegisterFooServer(s, &fooServer{}) + // pb.RegisterBarServer(s, &barServer{}) + // } + // embed.StartEtcd(cfg) + ServiceRegister func(*grpc.Server) `json:"-"` + + AuthToken string `json:"auth-token"` // 认证格式 simple、jwt + BcryptCost uint `json:"bcrypt-cost"` // 为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间.默认值:10 + + AuthTokenTTL uint `json:"auth-token-ttl"` // token 有效期 + + ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` // 数据毁坏检测功能 + ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` // 数据毁坏检测功能 + // ExperimentalEnableV2V3 configures URLs that expose deprecated V2 API working on V3 store. + // Deprecated in v3.5. + // TODO: Delete in v3.6 (https://github.com/etcd-io/etcd/issues/12913) + ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"` + ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置. + // ExperimentalEnableLeaseCheckpointPersist + // 启用持续的剩余TTL,以防止长期租约的无限期自动续约.在v3.6中始终启用.应该用于确保从启用该功能的v3.5集群顺利升级. + // 需要启用 experimental-enable-lease-checkpoint + // Deprecated in v3.6. + // TODO: Delete in v3.7 + ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"` + ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` + ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"` + // ExperimentalWarningApplyDuration 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告. + ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"` + // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd etcd to + // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. + ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + + // ForceNewCluster starts a new cluster even if previously started; unsafe. + ForceNewCluster bool `json:"force-new-cluster"` + + EnablePprof bool `json:"enable-pprof"` + Metrics string `json:"metrics"` // basic ;extensive + ListenMetricsUrls []url.URL + ListenMetricsUrlsJSON string `json:"listen-metrics-urls"` + + // ExperimentalEnableDistributedTracing 表示是否启用了使用OpenTelemetry的实验性追踪. + ExperimentalEnableDistributedTracing bool `json:"experimental-enable-distributed-tracing"` + // ExperimentalDistributedTracingAddress is the address of the OpenTelemetry Collector. + // Can only be set if ExperimentalEnableDistributedTracing is true. + ExperimentalDistributedTracingAddress string `json:"experimental-distributed-tracing-address"` + // ExperimentalDistributedTracingServiceName is the name of the service. + // Can only be used if ExperimentalEnableDistributedTracing is true. + ExperimentalDistributedTracingServiceName string `json:"experimental-distributed-tracing-service-name"` + // ExperimentalDistributedTracingServiceInstanceID is the ID key of the service. + // This ID必须是unique, as helps to distinguish instances of the same service + // that exist at the same time. + // Can only be used if ExperimentalEnableDistributedTracing is true. + ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"` + + // Logger 使用哪种logger + Logger string `json:"logger"` + // LogLevel 日志等级 debug, info, warn, error, panic, or fatal. Default 'info'. + LogLevel string `json:"log-level"` + // LogOutputs is either: + // - "default" as os.Stderr, + // - "stderr" as os.Stderr, + // - "stdout" as os.Stdout, + // - file path to append etcd logs to. + // 当 logger是zap时,它可以是多个. + LogOutputs []string `json:"log-outputs"` + // EnableLogRotation 启用单个日志输出文件目标的日志旋转. + EnableLogRotation bool `json:"enable-log-rotation"` + // LogRotationConfigJSON is a passthrough allowing a log rotation JSON config to be passed directly. + LogRotationConfigJSON string `json:"log-rotation-config-json"` + // ZapLoggerBuilder 用于给自己构造一个zap logger + ZapLoggerBuilder func(*Config) error + + // logger logs etcd-side operations. The default is nil, + // and "setupLogging"必须是called before starting etcd. + // Do not set logger directly. + loggerMu *sync.RWMutex + logger *zap.Logger + // EnableGRPCGateway 启用grpc网关,将 http 转换成 grpc / true + EnableGRPCGateway bool `json:"enable-grpc-gateway"` + + // UnsafeNoFsync 禁用所有fsync的使用.设置这个是不安全的,会导致数据丢失. + UnsafeNoFsync bool `json:"unsafe-no-fsync"` // 默认false + // 两次降级状态检查之间的时间间隔. + ExperimentalDowngradeCheckTime time.Duration `json:"experimental-downgrade-check-time"` + + // ExperimentalMemoryMlock 启用对etcd拥有的内存页的锁定. 该设置改善了以下环境中的etcd尾部延迟. + // - 内存压力可能会导致将页面交换到磁盘上 + // - 磁盘延迟可能是不稳定的 + // 目前,所有的etcd内存都被锁住了,但在将来,这个标志可以改进为只锁住bbolt的使用区域. + ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"` + + // ExperimentalTxnModeWriteWithSharedBuffer 使得写事务在其只读检查操作中使用一个共享缓冲区. + ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"` + + // V2Deprecation describes phase of API & Storage V2 support + V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"` +} + +// configYAML holds the config suitable for yaml parsing +type configYAML struct { + Config + configJSON +} + +// configJSON 有文件选项,被翻译成配置选项 +type configJSON struct { + LPUrlsJSON string `json:"listen-peer-urls"` // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口 + LCUrlsJSON string `json:"listen-client-urls"` + APUrlsJSON string `json:"initial-advertise-peer-urls"` + ACUrlsJSON string `json:"advertise-client-urls"` + + CORSJSON string `json:"cors"` + HostWhitelistJSON string `json:"host-whitelist"` + + ClientSecurityJSON securityConfig `json:"client-transport-security"` + PeerSecurityJSON securityConfig `json:"peer-transport-security"` +} + +type securityConfig struct { + CertFile string `json:"cert-file"` + KeyFile string `json:"key-file"` + ClientCertFile string `json:"client-cert-file"` + ClientKeyFile string `json:"client-key-file"` + CertAuth bool `json:"client-cert-auth"` + TrustedCAFile string `json:"trusted-ca-file"` + AutoTLS bool `json:"auto-tls"` +} + +// NewConfig 创建一个用默认值填充的新配置. +func NewConfig() *Config { + lpurl, _ := url.Parse(DefaultListenPeerURLs) // "http://localhost:2380" + apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) // "http://localhost:2380" + lcurl, _ := url.Parse(DefaultListenClientURLs) // "http://localhost:2379" + acurl, _ := url.Parse(DefaultAdvertiseClientURLs) // "http://localhost:2379" + cfg := &Config{ + MaxSnapFiles: DefaultMaxSnapshots, // 最大快照数 + MaxWalFiles: DefaultMaxWALs, // wal文件的最大保留数量(0不受限制). + + Name: DefaultName, // 节点的名字 + + SnapshotCount: etcdserver.DefaultSnapshotCount, // 快照数量 + SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries, // 触发快照到磁盘的已提交事务数. + + MaxTxnOps: DefaultMaxTxnOps, // 事务中允许的最大操作数. 128 + MaxRequestBytes: DefaultMaxRequestBytes, // 最大请求体, 1.5M + ExperimentalWarningApplyDuration: DefaultWarningApplyDuration, // 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告. 100ms + + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, // 客户端在ping服务器之前应等待的最短持续时间间隔. 5s + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, // 服务器到客户端ping的探活周期.以检查连接是否处于活动状态(0表示禁用).2h + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, // 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s + + SocketOpts: transport.SocketOpts{}, // 套接字配置 + + TickMs: 100, // 心跳间隔100ms + ElectionMs: 1000, // 选举超时 1s + InitialElectionTickAdvance: true, + + LPUrls: []url.URL{*lpurl}, // "http://localhost:2380" + LCUrls: []url.URL{*lcurl}, // "http://localhost:2380" + APUrls: []url.URL{*apurl}, // "http://localhost:2379" + ACUrls: []url.URL{*acurl}, // "http://localhost:2379" + + // 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集. + ClusterState: ClusterStateFlagNew, // 状态标志、默认new + InitialClusterToken: "etcd-cluster", + StrictReconfigCheck: DefaultStrictReconfigCheck, // 拒绝可能导致仲裁丢失的重新配置请求 + Metrics: "basic", // 基本的 + + CORS: map[string]struct{}{"*": {}}, // 跨域请求 + HostWhitelist: map[string]struct{}{"*": {}}, // 主机白名单 + + AuthToken: "simple", // 认证格式 simple、jwt + BcryptCost: uint(bcrypt.DefaultCost), // 为散列身份验证密码指定bcrypt算法的成本/强度 + AuthTokenTTL: 300, // token 有效期 + + PreVote: true, // Raft会运行一个额外的选举阶段.以检查它是否会获得足够的票数来赢得选举.从而最大限度地减少干扰. + + loggerMu: new(sync.RWMutex), + logger: nil, + Logger: "zap", + LogOutputs: []string{DefaultLogOutput}, // os.Stderr + LogLevel: logutil.DefaultLogLevel, // info + EnableLogRotation: false, // 默认不允许日志旋转 + LogRotationConfigJSON: DefaultLogRotationConfig, // 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的. + EnableGRPCGateway: true, // 将http->grpc + // 实验性 + ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime, // 两次降级状态检查之间的时间间隔. + ExperimentalMemoryMlock: false, // 内存页锁定 + ExperimentalTxnModeWriteWithSharedBuffer: true, // 启用写事务在其只读检查操作中使用共享缓冲区. + + V2Deprecation: config.V2_DEPR_DEFAULT, // not-yet + } + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + return cfg +} + +// ConfigFromFile OK +func ConfigFromFile(path string) (*Config, error) { + cfg := &configYAML{Config: *NewConfig()} + if err := cfg.configFromFile(path); err != nil { // ✅ + return nil, err + } + return &cfg.Config, nil +} + +// OK +func (cfg *configYAML) configFromFile(path string) error { + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + defaultInitialCluster := cfg.InitialCluster + + err = yaml.Unmarshal(b, cfg) + if err != nil { + return err + } + + if cfg.LPUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) + if err != nil { + fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-peer-urls: %v\n", err) + os.Exit(1) + } + cfg.LPUrls = []url.URL(u) + } + + if cfg.LCUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) + if err != nil { + fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-client-urls: %v\n", err) + os.Exit(1) + } + cfg.LCUrls = []url.URL(u) + } + + if cfg.APUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) + if err != nil { + fmt.Fprintf(os.Stderr, "设置时出现意外错误 initial-advertise-peer-urls: %v\n", err) + os.Exit(1) + } + cfg.APUrls = []url.URL(u) + } + + if cfg.ACUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) + if err != nil { + fmt.Fprintf(os.Stderr, "设置时出现意外错误 advertise-peer-urls: %v\n", err) + os.Exit(1) + } + cfg.ACUrls = []url.URL(u) + } + + if cfg.ListenMetricsUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ",")) + if err != nil { + fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-metrics-urls: %v\n", err) + os.Exit(1) + } + cfg.ListenMetricsUrls = []url.URL(u) + } + + if cfg.CORSJSON != "" { + uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*") + cfg.CORS = uv.Values + } + + if cfg.HostWhitelistJSON != "" { + uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON) + cfg.HostWhitelist = uv.Values + } + + // 如果设置了discovery flag则清除由InitialClusterFromName设置的默认初始集群 + if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = "" + } + if cfg.ClusterState == "" { + cfg.ClusterState = ClusterStateFlagNew + } + + copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { + tls.CertFile = ysc.CertFile + tls.KeyFile = ysc.KeyFile + tls.ClientCertFile = ysc.ClientCertFile + tls.ClientKeyFile = ysc.ClientKeyFile + tls.ClientCertAuth = ysc.CertAuth + tls.TrustedCAFile = ysc.TrustedCAFile + } + copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON) + copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) + cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS + cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS + if cfg.SelfSignedCertValidity == 0 { + cfg.SelfSignedCertValidity = 1 + } + return cfg.Validate() // ✅ +} + +// 更新密码套件 +func updateCipherSuites(tls *transport.TLSInfo, ss []string) error { + if len(tls.CipherSuites) > 0 && len(ss) > 0 { + return fmt.Errorf("TLSInfo.CipherSuites已经指定(given %v)", ss) + } + if len(ss) > 0 { + cs := make([]uint16, len(ss)) + for i, s := range ss { + var ok bool + cs[i], ok = tlsutil.GetCipherSuite(s) + if !ok { + return fmt.Errorf("unexpected TLS cipher suite %q", s) + } + } + tls.CipherSuites = cs + } + return nil +} + +// Validate 确保 '*embed.Config' 字段是正确配置的. +func (cfg *Config) Validate() error { + if err := cfg.setupLogging(); err != nil { // ✅ + return err + } + if err := checkBindURLs(cfg.LPUrls); err != nil { + return err + } + if err := checkBindURLs(cfg.LCUrls); err != nil { + return err + } + if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil { + return err + } + if err := checkHostURLs(cfg.APUrls); err != nil { + addrs := cfg.getAPURLs() + return fmt.Errorf(`--initial-advertise-peer-urls %q 必须是 "host:port" (%v)`, strings.Join(addrs, ","), err) + } + if err := checkHostURLs(cfg.ACUrls); err != nil { + addrs := cfg.getACURLs() + return fmt.Errorf(`--advertise-client-urls %q 必须是 "host:port" (%v)`, strings.Join(addrs, ","), err) + } + // 检查是否有冲突的标志通过. + nSet := 0 + for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { + if v { + nSet++ + } + } + + if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting { + return fmt.Errorf("意料之外的集群状态 %q", cfg.ClusterState) + } + + if nSet > 1 { + return ErrConflictBootstrapFlags + } + + if cfg.TickMs == 0 { + return fmt.Errorf("--heartbeat-interval必须是>0 (set to %dms)", cfg.TickMs) + } + if cfg.ElectionMs == 0 { + return fmt.Errorf("--election-timeout必须是>0 (set to %dms)", cfg.ElectionMs) + } + if 5*cfg.TickMs > cfg.ElectionMs { + return fmt.Errorf("--election-timeout[%vms] 必须是5倍 --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs) + } + if cfg.ElectionMs > maxElectionMs { + return fmt.Errorf("--election-timeout[%vms] 时间太长,应该小于 %vms", cfg.ElectionMs, maxElectionMs) + } + + // 最后检查一下,因为在etcdmain中代理可能会使这个问题得到解决. + if cfg.LCUrls != nil && cfg.ACUrls == nil { + return ErrUnsetAdvertiseClientURLsFlag + } + + switch cfg.AutoCompactionMode { + case "": + case CompactorModeRevision, CompactorModePeriodic: + default: + return fmt.Errorf("未知的 auto-compaction-mode %q", cfg.AutoCompactionMode) + } + // false,false 不会走 + if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint { + cfg.logger.Warn("检测到启用了Checkpoint而没有持久性.考虑启用experimental-enable-le-checkpoint-persist") + } + if !cfg.ExperimentalEnableLeaseCheckpoint && !cfg.ExperimentalEnableLeaseCheckpointPersist { + // falsefalse 默认走这里 + return nil + } else if cfg.ExperimentalEnableLeaseCheckpoint && cfg.ExperimentalEnableLeaseCheckpointPersist { + return nil + } + return fmt.Errorf(" experimental-enable-lease-checkpoint-persist experimental-enable-lease-checkpoint 需要同时开启") +} + +// PeerURLsMapAndToken 设置一个初始的peer URLsMap 和token,用于启动或发现. +func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { + token = cfg.InitialClusterToken + switch { + // todo 以下手动注释掉,一般不会使用以下的 + //case cfg.Durl != "": // 用于引导群集的发现URL + // urlsmap = types.URLsMap{} + // // 如果使用discovery,根据advertised peer URLs 生成一个临时的集群 + // urlsmap[cfg.Name] = cfg.APUrls + // token = cfg.Durl + // + //case cfg.DNSCluster != "": // DNS srv域用于引导群集. + // clusterStrs, cerr := cfg.GetDNSClusterNames() + // lg := cfg.logger + // if cerr != nil { + // lg.Warn("如法解析 SRV discovery", zap.Error(cerr)) + // } + // if len(clusterStrs) == 0 { + // return nil, "", cerr + // } + // for _, s := range clusterStrs { + // lg.Info("got bootstrap from DNS for etcd-etcd", zap.String("node", s)) + // } + // clusterStr := strings.Join(clusterStrs, ",") + // if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" { + // cfg.PeerTLSInfo.ServerName = cfg.DNSCluster + // } + // urlsmap, err = types.NewURLsMap(clusterStr) + // // only etcd member must belong to the discovered cluster. + // // proxy does not need to belong to the discovered cluster. + // if which == "etcd" { + // if _, ok := urlsmap[cfg.Name]; !ok { + // return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) + // } + // } + + default: + // 我们是静态配置的, + // infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380 + urlsmap, err = types.NewURLsMap(cfg.InitialCluster) // 仅仅是类型转换 + } + return urlsmap, token, err +} + +// GetDNSClusterNames 使用DNS SRV记录来获取集群启动的初始节点列表.这个函数将返回一个或多个节点的列表,以及在执行服务发现时遇到的任何错误. +// Note: Because this checks multiple sets of SRV records, discovery should only be considered to have +// failed if the returned node list is empty. +func (cfg *Config) GetDNSClusterNames() ([]string, error) { + var ( + clusterStrs []string + cerr error + serviceNameSuffix string + ) + if cfg.DNSClusterServiceName != "" { + serviceNameSuffix = "-" + cfg.DNSClusterServiceName + } + + lg := cfg.GetLogger() + + // Use both etcd-etcd-ssl and etcd-etcd for discovery. + // Combine the results if both are available. + clusterStrs, cerr = getCluster("https", "etcd-etcd-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + clusterStrs = make([]string, 0) + } + lg.Info( + "get cluster for etcd-etcd-ssl SRV", + zap.String("service-scheme", "https"), + zap.String("service-name", "etcd-etcd-ssl"+serviceNameSuffix), + zap.String("etcd-name", cfg.Name), + zap.String("discovery-srv", cfg.DNSCluster), + zap.Strings("advertise-peer-urls", cfg.getAPURLs()), + zap.Strings("found-cluster", clusterStrs), + zap.Error(cerr), + ) + + defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-etcd"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) + if httpCerr == nil { + clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...) + } + lg.Info( + "get cluster for etcd-etcd SRV", + zap.String("service-scheme", "http"), + zap.String("service-name", "etcd-etcd"+serviceNameSuffix), + zap.String("etcd-name", cfg.Name), + zap.String("discovery-srv", cfg.DNSCluster), + zap.Strings("advertise-peer-urls", cfg.getAPURLs()), + zap.Strings("found-cluster", clusterStrs), + zap.Error(httpCerr), + ) + + return clusterStrs, multierr.Combine(cerr, httpCerr) +} + +// 初始化集群节点列表 default=http://localhost:2380 +func (cfg Config) InitialClusterFromName(name string) (ret string) { + if len(cfg.APUrls) == 0 { + return "" + } + n := name + if name == "" { + n = DefaultName + } + for i := range cfg.APUrls { + ret = ret + "," + n + "=" + cfg.APUrls[i].String() + } + return ret[1:] +} + +func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } + +// ElectionTicks 返回选举权检查对应多少次tick触发次数 +func (cfg Config) ElectionTicks() int { + return int(cfg.ElectionMs / cfg.TickMs) +} + +func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum { + if cfg.V2Deprecation == "" { + return config.V2_DEPR_DEFAULT + } + return cfg.V2Deprecation +} + +func (cfg Config) defaultPeerHost() bool { + return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs +} + +func (cfg Config) defaultClientHost() bool { + return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs +} + +// ClientSelfCert etcd LCUrls客户端自签 +func (cfg *Config) ClientSelfCert() (err error) { + if !cfg.ClientAutoTLS { + return nil + } + if !cfg.ClientTLSInfo.Empty() { + cfg.logger.Warn("忽略客户端自动TLS,因为已经给出了证书") + return nil + } + chosts := make([]string, len(cfg.LCUrls)) + for i, u := range cfg.LCUrls { + chosts[i] = u.Host + } + cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity) + if err != nil { + return err + } + return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites) +} + +// PeerSelfCert etcd LPUrls客户端自签 +func (cfg *Config) PeerSelfCert() (err error) { + if !cfg.PeerAutoTLS { + return nil + } + if !cfg.PeerTLSInfo.Empty() { + cfg.logger.Warn("如果证书给出 则忽略peer自动TLS") + return nil + } + phosts := make([]string, len(cfg.LPUrls)) + for i, u := range cfg.LPUrls { + phosts[i] = u.Host + } + cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity) // ?年 + if err != nil { + return err + } + return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites) +} + +// UpdateDefaultClusterFromName 更新集群通信地址 +func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) { + // default=http://localhost:2380 + if defaultHostname == "" || defaultHostStatus != nil { + // 当 指定名称时,更新'initial-cluster'(例如,'etcd --name=abc'). + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + return "", defaultHostStatus + } + + used := false + pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() + if cfg.defaultPeerHost() && pip == "0.0.0.0" { + cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} + used = true + } + // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') + if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { + cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + } + + cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() + if cfg.defaultClientHost() && cip == "0.0.0.0" { + cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} + used = true + } + dhost := defaultHostname + if !used { + dhost = "" + } + return dhost, defaultHostStatus +} + +// checkBindURLs 如果任何URL使用域名,则返回错误. +func checkBindURLs(urls []url.URL) error { + for _, url := range urls { + if url.Scheme == "unix" || url.Scheme == "unixs" { + continue + } + host, _, err := net.SplitHostPort(url.Host) + if err != nil { + return err + } + if host == "localhost" { + // special case for local address + // TODO: support /etc/hosts ? + continue + } + if net.ParseIP(host) == nil { + return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) + } + } + return nil +} + +func checkHostURLs(urls []url.URL) error { + for _, url := range urls { + host, _, err := net.SplitHostPort(url.Host) + if err != nil { + return err + } + if host == "" { + return fmt.Errorf("unexpected empty host (%s)", url.String()) + } + } + return nil +} + +func (cfg *Config) getAPURLs() (ss []string) { + ss = make([]string, len(cfg.APUrls)) + for i := range cfg.APUrls { + ss[i] = cfg.APUrls[i].String() + } + return ss +} + +func (cfg *Config) getLPURLs() (ss []string) { + ss = make([]string, len(cfg.LPUrls)) + for i := range cfg.LPUrls { + ss[i] = cfg.LPUrls[i].String() + } + return ss +} + +func (cfg *Config) getACURLs() (ss []string) { + ss = make([]string, len(cfg.ACUrls)) + for i := range cfg.ACUrls { + ss[i] = cfg.ACUrls[i].String() + } + return ss +} + +func (cfg *Config) getLCURLs() (ss []string) { + ss = make([]string, len(cfg.LCUrls)) + for i := range cfg.LCUrls { + ss[i] = cfg.LCUrls[i].String() + } + return ss +} + +func (cfg *Config) getMetricsURLs() (ss []string) { + ss = make([]string, len(cfg.ListenMetricsUrls)) + for i := range cfg.ListenMetricsUrls { + ss[i] = cfg.ListenMetricsUrls[i].String() + } + return ss +} + +// 返回boltdb存储的数据类型 +func parseBackendFreelistType(freelistType string) bolt.FreelistType { + if freelistType == freelistArrayType { + return bolt.FreelistArrayType + } + + return bolt.FreelistMapType +} diff --git a/etcd/embed/config_logging.go b/etcd/embed/config_logging.go new file mode 100644 index 00000000000..157dc31e890 --- /dev/null +++ b/etcd/embed/config_logging.go @@ -0,0 +1,252 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "gopkg.in/natefinch/lumberjack.v2" +) + +// GetLogger +// err := cfg.ZapLoggerBuilder(cfg) +func (cfg Config) GetLogger() *zap.Logger { + cfg.loggerMu.RLock() + l := cfg.logger + cfg.loggerMu.RUnlock() + return l +} + +// setupLogging 初始化etcd日志.必须在标志解析或完成配置embed.Config后调用. +func (cfg *Config) setupLogging() error { + switch cfg.Logger { + case "zap": + if len(cfg.LogOutputs) == 0 { + cfg.LogOutputs = []string{DefaultLogOutput} + } + if len(cfg.LogOutputs) > 1 { + for _, v := range cfg.LogOutputs { + if v == DefaultLogOutput { + return fmt.Errorf("目前还不支持%q的多重日志输出", DefaultLogOutput) + } + } + } + // todo + if cfg.EnableLogRotation { + if err := setupLogRotation(cfg.LogOutputs, cfg.LogRotationConfigJSON); err != nil { + return err + } + } + + outputPaths, errOutputPaths := make([]string, 0), make([]string, 0) + isJournal := false + for _, v := range cfg.LogOutputs { + switch v { + case DefaultLogOutput: + outputPaths = append(outputPaths, StdErrLogOutput) + errOutputPaths = append(errOutputPaths, StdErrLogOutput) + + case JournalLogOutput: + isJournal = true + + case StdErrLogOutput: + outputPaths = append(outputPaths, StdErrLogOutput) + errOutputPaths = append(errOutputPaths, StdErrLogOutput) + + case StdOutLogOutput: + outputPaths = append(outputPaths, StdOutLogOutput) + errOutputPaths = append(errOutputPaths, StdOutLogOutput) + + default: + var path string + if cfg.EnableLogRotation { + // append rotate scheme to logs managed by lumberjack log rotation + if v[0:1] == "/" { + path = fmt.Sprintf("rotate:/%%2F%s", v[1:]) + } else { + path = fmt.Sprintf("rotate:/%s", v) + } + } else { + path = v + } + outputPaths = append(outputPaths, path) + errOutputPaths = append(errOutputPaths, path) + } + } + + if !isJournal { + copied := logutil.DefaultZapLoggerConfig + copied.OutputPaths = outputPaths + copied.ErrorOutputPaths = errOutputPaths + copied = logutil.MergeOutputPaths(copied) // /dev/null 判断 + copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) // 是一个方便的函数,它创建一个AtomicLevel,然后用给定的级别调用SetLevel. + if cfg.ZapLoggerBuilder == nil { + lg, err := copied.Build() // 从配置和选项中构建一个logger + if err != nil { + return err + } + cfg.ZapLoggerBuilder = NewZapLoggerBuilder(lg) + } + } else { + if len(cfg.LogOutputs) > 1 { + for _, v := range cfg.LogOutputs { + if v != DefaultLogOutput { + return fmt.Errorf("运行systemd/journal,但其他 '--log-outputs' values (%q) 被配置为 'default'; 用其他的值重写 'default'", cfg.LogOutputs) + } + } + } + + // 使用stderr作为后备方案 + syncer, lerr := getJournalWriteSyncer() + if lerr != nil { + return lerr + } + + lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + + // WARN: 不要改变encoder 配置中的字段名 journald日志编写者假定字段名为"level" and "caller" + cr := zapcore.NewCore( + zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig), + syncer, lvl, + ) + if cfg.ZapLoggerBuilder == nil { + cfg.ZapLoggerBuilder = NewZapLoggerBuilder(zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))) + } + } + + err := cfg.ZapLoggerBuilder(cfg) + if err != nil { + return err + } + + logTLSHandshakeFailure := func(conn *tls.Conn, err error) { + // 记录tls握手失败 + state := conn.ConnectionState() + remoteAddr := conn.RemoteAddr().String() + serverName := state.ServerName + if len(state.PeerCertificates) > 0 { + cert := state.PeerCertificates[0] + ips := make([]string, len(cert.IPAddresses)) + for i := range cert.IPAddresses { + ips[i] = cert.IPAddresses[i].String() + } + cfg.logger.Warn( + "拒绝连接", + zap.String("remote-addr", remoteAddr), + zap.String("etcd-name", serverName), + zap.Strings("ip-addresses", ips), + zap.Strings("dns-names", cert.DNSNames), + zap.Error(err), + ) + } else { + cfg.logger.Warn( + "拒绝连接", + zap.String("remote-addr", remoteAddr), + zap.String("etcd-name", serverName), + zap.Error(err), + ) + } + } + cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure + cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure + + default: + return fmt.Errorf("未知的Logger选项 %q", cfg.Logger) + } + + return nil +} + +// NewZapLoggerBuilder 生成一个zap logger builder,为embedded etcd设置给定的loger. +func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error { + return func(cfg *Config) error { + cfg.loggerMu.Lock() + defer cfg.loggerMu.Unlock() + cfg.logger = lg + return nil + } +} + +// SetupGlobalLoggers 配置全loggers (grpc, zapGlobal)基于cfg +// 该方法默认不被embed etcd执行(从3.5开始),以实现grpc/zap.Global日志独立配置或跨越独立生命周期的设置(如测试). +func (cfg *Config) SetupGlobalLoggers() { + lg := cfg.GetLogger() + if lg != nil { + if cfg.LogLevel == "debug" { + grpc.EnableTracing = true + grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) + } else { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + } + zap.ReplaceGlobals(lg) + } +} + +type logRotationConfig struct { + *lumberjack.Logger +} + +// Sync implements zap.Sink +func (logRotationConfig) Sync() error { return nil } + +// setupLogRotation 初始化单个文件路径目标的日志旋转. +func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error { + var logRotationConfig logRotationConfig + outputFilePaths := 0 + for _, v := range logOutputs { + switch v { + case DefaultLogOutput, StdErrLogOutput, StdOutLogOutput: + continue + default: + outputFilePaths++ + } + } + // 日志旋转需要文件目标 + if len(logOutputs) == 1 && outputFilePaths == 0 { + return ErrLogRotationInvalidLogOutput + } + // support max 1 file target for log rotation + if outputFilePaths > 1 { + return ErrLogRotationInvalidLogOutput + } + + if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil { + var unmarshalTypeError *json.UnmarshalTypeError + var syntaxError *json.SyntaxError + switch { + case errors.As(err, &syntaxError): + return fmt.Errorf("improperly formatted log rotation config: %w", err) + case errors.As(err, &unmarshalTypeError): + return fmt.Errorf("invalid log rotation config: %w", err) + } + } + zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) { + logRotationConfig.Filename = u.Path[1:] + return &logRotationConfig, nil + }) + return nil +} diff --git a/server/embed/config_logging_journal_unix.go b/etcd/embed/config_logging_journal_unix.go similarity index 92% rename from server/embed/config_logging_journal_unix.go rename to etcd/embed/config_logging_journal_unix.go index 75d83ff2b55..e9bd844329a 100644 --- a/server/embed/config_logging_journal_unix.go +++ b/etcd/embed/config_logging_journal_unix.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows +// +build !windows package embed @@ -20,7 +21,7 @@ import ( "fmt" "os" - "go.etcd.io/etcd/client/pkg/v3/logutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" "go.uber.org/zap/zapcore" ) diff --git a/server/embed/config_logging_journal_windows.go b/etcd/embed/config_logging_journal_windows.go similarity index 97% rename from server/embed/config_logging_journal_windows.go rename to etcd/embed/config_logging_journal_windows.go index 90dfad944e4..58ed08631bb 100644 --- a/server/embed/config_logging_journal_windows.go +++ b/etcd/embed/config_logging_journal_windows.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build windows +// +build windows package embed diff --git a/etcd/embed/doc.go b/etcd/embed/doc.go new file mode 100644 index 00000000000..735f8da00a7 --- /dev/null +++ b/etcd/embed/doc.go @@ -0,0 +1,45 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package embed provides bindings for embedding an etcd etcd in a program. + +Launch an embedded etcd etcd using the configuration defaults: + + import ( + "log" + "time" + + "github.com/ls-2018/etcd_cn/etcd/embed" + ) + + func main() { + cfg := embed.NewConfig() + cfg.Dir = "default.etcd" + e, err := embed.StartEtcd(cfg) + if err != nil { + log.Fatal(err) + } + defer e.Close() + select { + case <-e.Server.ReadyNotify(): + log.Printf("Server is ready!") + case <-time.After(60 * time.Second): + e.Server.Stop() // trigger a shutdown + log.Printf("Server took too long to start!") + } + log.Fatal(<-e.Err()) + } +*/ +package embed diff --git a/etcd/embed/etcd.go b/etcd/embed/etcd.go new file mode 100644 index 00000000000..ee911216888 --- /dev/null +++ b/etcd/embed/etcd.go @@ -0,0 +1,848 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "crypto/tls" + "fmt" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2v3" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc" + "github.com/ls-2018/etcd_cn/etcd/verify" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "github.com/ls-2018/etcd_cn/pkg/debugutil" + runtimeutil "github.com/ls-2018/etcd_cn/pkg/runtime" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/soheilhy/cmux" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/exporters/otlp" + "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/semconv" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +const ( + // internal fd usage includes disk usage and transport usage. + // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs + // at most 2 to read/lock/write WALs. One case that it needs to 2 is to + // read all logs after some snapshot index, which locates at the end of + // the second last and the head of the last. For purging, it needs to read + // directory, so it needs 1. For fd monitor, it needs 1. + // For transport, rafthttp builds two long-polling connections and at most + // four temporary connections with each member. There are at most 9 members + // in a cluster, so it should reserve 96. + // For the safety, we set the total reserved number to 150. + reservedInternalFDNum = 150 +) + +// Etcd 包含一个正在运行的etcd etcd和它的监听器. +type Etcd struct { + Peers []*peerListener + Clients []net.Listener + // 本机节点监听本地网卡的map 例如 localhost:2379 127.0.0.1:2379 0.0.0.0:2379 等等 + sctxs map[string]*serveCtx + metricsListeners []net.Listener + tracingExporterShutdown func() + Server *etcdserver.EtcdServer + cfg Config + stopc chan struct{} // raft 停止,消息通道 + errc chan error // 接收运行过程中产生的err + closeOnce sync.Once +} + +// 每个server的Listener +type peerListener struct { + net.Listener + serve func() error + close func(context.Context) error // 替换为net.Listener.Close() +} + +// StartEtcd 启动 用于客户端/etcd通信的 `etcd和HTTP处理程序` .不保证返回的Etcd.Server已经加入集群. +// 等待Etcd.Server.ReadyNotify()通道,以了解它何时完成并可以使用. +func StartEtcd(inCfg *Config) (e *Etcd, err error) { + if err = inCfg.Validate(); err != nil { + return nil, err + } + serving := false + e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} + cfg := &e.cfg + defer func() { + if e == nil || err == nil { + return + } + if !serving { + // 在为serveCtx.servicesC启动gRPC etcd之前出现错误. + for _, sctx := range e.sctxs { + close(sctx.serversC) + } + } + e.Close() // 启动失败时, 优雅关闭 + e = nil + }() + + if !cfg.SocketOpts.Empty() { + cfg.logger.Info("配置socket选项", zap.Bool("reuse-address", cfg.SocketOpts.ReuseAddress), zap.Bool("reuse-port", cfg.SocketOpts.ReusePort)) + } + e.cfg.logger.Info("", zap.Strings("listen-peer-urls", e.cfg.getLPURLs())) + // 设置每个server listener 的超时时间、证书、socket选项 + if e.Peers, err = configurePeerListeners(cfg); err != nil { + return e, err + } + + e.cfg.logger.Info("配置peer listener", zap.Strings("listen-client-urls", e.cfg.getLCURLs())) + // 设置每个client listener 的超时时间、证书、socket选项 + if e.sctxs, err = configureClientListeners(cfg); err != nil { + return e, err + } + + for _, sctx := range e.sctxs { + e.Clients = append(e.Clients, sctx.l) + } + + var ( + urlsmap types.URLsMap + token string + ) + // 成员初始化 + memberInitialized := true + if !isMemberInitialized(cfg) { // 判断wal目录存不存在 + memberInitialized = false + urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") // token {name:urls[]} + if err != nil { + return e, fmt.Errorf("设置初始化集群出错: %v", err) + } + } + // 自动压缩配置 + if len(cfg.AutoCompactionRetention) == 0 { // 没有设置 + cfg.AutoCompactionRetention = "0" + } + // 根据压缩类型、压缩配置 返回时间、或条数 + autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention) + if err != nil { + return e, err + } + // 返回boltdb存储的数据类型,array \ map + backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType) + + srvcfg := config.ServerConfig{ + Name: cfg.Name, + ClientURLs: cfg.ACUrls, + PeerURLs: cfg.APUrls, + DataDir: cfg.Dir, + DedicatedWALDir: cfg.WalDir, + SnapshotCount: cfg.SnapshotCount, // 触发一次磁盘快照的提交事务的次数 + SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries, // 快照追赶数据量 + MaxSnapFiles: cfg.MaxSnapFiles, + MaxWALFiles: cfg.MaxWalFiles, // 要保留的最大wal文件数(0表示不受限制). 5 + InitialPeerURLsMap: urlsmap, // 节点--> url + InitialClusterToken: token, + DiscoveryURL: cfg.Durl, + DiscoveryProxy: cfg.Dproxy, + NewCluster: cfg.IsNewCluster(), // new existing + PeerTLSInfo: cfg.PeerTLSInfo, // server 证书信息 + TickMs: cfg.TickMs, // tick计时器触发间隔 + ElectionTicks: cfg.ElectionTicks(), // 返回选举权检查对应多少次tick触发次数 + InitialElectionTickAdvance: cfg.InitialElectionTickAdvance, // 是否提前初始化选举时钟启动,以便更快的选举 + AutoCompactionRetention: autoCompactionRetention, // 自动压缩值 + AutoCompactionMode: cfg.AutoCompactionMode, // 自动压缩模式 + QuotaBackendBytes: cfg.QuotaBackendBytes, // 资源存储阈值 + BackendBatchLimit: cfg.BoltBackendBatchLimit, // BackendBatchLimit是提交后端事务前的最大操作数 + BackendFreelistType: backendFreelistType, // 返回boltdb存储的数据类型 + BackendBatchInterval: cfg.BoltBackendBatchInterval, // BackendBatchInterval是提交后端事务前的最长时间. + MaxTxnOps: cfg.MaxTxnOps, + MaxRequestBytes: cfg.MaxRequestBytes, // 服务器将接受的最大客户端请求大小(字节). + SocketOpts: cfg.SocketOpts, + StrictReconfigCheck: cfg.StrictReconfigCheck, // 严格配置变更检查 + ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, + AuthToken: cfg.AuthToken, // 认证格式 simple、jwt + BcryptCost: cfg.BcryptCost, // 为散列身份验证密码指定bcrypt算法的成本/强度 + TokenTTL: cfg.AuthTokenTTL, + CORS: cfg.CORS, + HostWhitelist: cfg.HostWhitelist, + InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, // 数据毁坏检测功能 + CorruptCheckTime: cfg.ExperimentalCorruptCheckTime, + PreVote: cfg.PreVote, // PreVote 是否启用PreVote + Logger: cfg.logger, + ForceNewCluster: cfg.ForceNewCluster, + EnableGRPCGateway: cfg.EnableGRPCGateway, // 启用grpc网关,将 http 转换成 grpc / true + ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing, // 默认false + UnsafeNoFsync: cfg.UnsafeNoFsync, + EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置. + LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist, + CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit, + WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval, + DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime, // 两次降级状态检查之间的时间间隔. + WarningApplyDuration: cfg.ExperimentalWarningApplyDuration, // 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告. + ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock, + ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer, + ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes, + } + + if srvcfg.ExperimentalEnableDistributedTracing { // 使用OpenTelemetry协议实现分布式跟踪.默认false + tctx := context.Background() + tracingExporter, opts, err := e.setupTracing(tctx) + if err != nil { + return e, err + } + if tracingExporter == nil || len(opts) == 0 { + return e, fmt.Errorf("error setting up distributed tracing") + } + e.tracingExporterShutdown = func() { tracingExporter.Shutdown(tctx) } + srvcfg.ExperimentalTracerOptions = opts + } + + print(e.cfg.logger, *cfg, srvcfg, memberInitialized) + + // TODO 在看 + if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { + return e, err + } + + // buffer channel so goroutines on closed connections won't wait forever + e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) + + // newly started member ("memberInitialized==false") + // does not need corruption check + if memberInitialized { + if err = e.Server.CheckInitialHashKV(); err != nil { + // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()" + // (nothing to close since rafthttp transports have not been started) + + e.cfg.logger.Error("checkInitialHashKV failed", zap.Error(err)) + e.Server.Cleanup() + e.Server = nil + return e, err + } + } + e.Server.Start() + + if err = e.servePeers(); err != nil { + return e, err + } + if err = e.serveClients(); err != nil { + return e, err + } + if err = e.serveMetrics(); err != nil { // ✅ + return e, err + } + + e.cfg.logger.Info( + "启动服务 peer/client/metrics", + zap.String("local-member-id", e.Server.ID().String()), + zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), + zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口 + zap.Strings("advertise-client-urls", e.cfg.getACURLs()), + zap.Strings("listen-client-urls", e.cfg.getLCURLs()), + zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), + ) + serving = true + return e, nil +} + +func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized bool) { + cors := make([]string, 0, len(ec.CORS)) + for v := range ec.CORS { + cors = append(cors, v) + } + sort.Strings(cors) + + hss := make([]string, 0, len(ec.HostWhitelist)) + for v := range ec.HostWhitelist { + hss = append(hss, v) + } + sort.Strings(hss) + + quota := ec.QuotaBackendBytes + if quota == 0 { + quota = etcdserver.DefaultQuotaBytes + } + + fmt.Println("------->", + zap.String("etcd-version", version.Version), + zap.String("git-sha", version.GitSHA), + zap.String("go-version", runtime.Version()), + zap.String("go-os", runtime.GOOS), + zap.String("go-arch", runtime.GOARCH), + zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)), + zap.Int("max-cpu-available", runtime.NumCPU()), + zap.Bool("member-initialized", memberInitialized), + zap.String("name", sc.Name), + zap.String("data-dir", sc.DataDir), + zap.String("wal-dir", ec.WalDir), + zap.String("wal-dir-dedicated", sc.DedicatedWALDir), + zap.String("member-dir", sc.MemberDir()), + zap.Bool("force-new-cluster", sc.ForceNewCluster), + zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)), + zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)), + zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), // 是否提前初始化选举时钟启动,以便更快的选举 + zap.Uint64("snapshot-count", sc.SnapshotCount), // 触发一次磁盘快照的提交事务的次数 + zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), + zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()), + zap.Strings("listen-peer-urls", ec.getLPURLs()), // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口 + zap.Strings("advertise-client-urls", ec.getACURLs()), + zap.Strings("listen-client-urls", ec.getLCURLs()), + zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), + zap.Strings("cors", cors), + zap.Strings("host-whitelist", hss), + zap.String("initial-cluster", sc.InitialPeerURLsMap.String()), + zap.String("initial-cluster-state", ec.ClusterState), + zap.String("initial-cluster-token", sc.InitialClusterToken), + zap.Int64("quota-size-bytes", quota), + zap.Bool("pre-vote", sc.PreVote), + zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck), + zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()), + zap.String("auto-compaction-mode", sc.AutoCompactionMode), + zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention), + zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()), + zap.String("discovery-url", sc.DiscoveryURL), + zap.String("discovery-proxy", sc.DiscoveryProxy), + zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()), + ) +} + +// Config returns the current configuration. +func (e *Etcd) Config() Config { + return e.cfg +} + +// Close 优雅关闭server 以及所有链接 +// 客户端请求在超时之后会终止,之后会被关闭 +func (e *Etcd) Close() { + fields := []zap.Field{ + zap.String("name", e.cfg.Name), + zap.String("data-dir", e.cfg.Dir), + zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()), + zap.Strings("advertise-client-urls", e.cfg.getACURLs()), + } + lg := e.GetLogger() + lg.Info("关闭etcd ing...", fields...) + defer func() { + lg.Info("关闭etcd", fields...) + verify.MustVerifyIfEnabled(verify.Config{Logger: lg, DataDir: e.cfg.Dir, ExactIndex: false}) + lg.Sync() // log都刷到磁盘 + }() + + e.closeOnce.Do(func() { + close(e.stopc) + }) + + // 使用请求超时关闭客户端请求 + timeout := 2 * time.Second + if e.Server != nil { + timeout = e.Server.Cfg.ReqTimeout() + } + for _, sctx := range e.sctxs { + for ss := range sctx.serversC { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + stopServers(ctx, ss) + cancel() + } + } + + for _, sctx := range e.sctxs { + sctx.cancel() + } + + for i := range e.Clients { + if e.Clients[i] != nil { + e.Clients[i].Close() + } + } + + for i := range e.metricsListeners { + e.metricsListeners[i].Close() + } + + // shutdown tracing exporter + if e.tracingExporterShutdown != nil { + e.tracingExporterShutdown() + } + + // 关闭 rafthttp transports + if e.Server != nil { + e.Server.Stop() + } + + // close all idle connections in peer handler (wait up to 1-second) + for i := range e.Peers { + if e.Peers[i] != nil && e.Peers[i].close != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + e.Peers[i].close(ctx) + cancel() + } + } + if e.errc != nil { + close(e.errc) + } +} + +func stopServers(ctx context.Context, ss *servers) { + // first, close the http.Server + ss.http.Shutdown(ctx) + // do not grpc.Server.GracefulStop with TLS enabled etcd etcd + // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 + // and https://github.com/etcd-io/etcd/issues/8916 + if ss.secure { + ss.grpc.Stop() + return + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + ss.grpc.GracefulStop() + }() + + // wait until all pending RPCs are finished + select { + case <-ch: + case <-ctx.Done(): + // took too long, manually close open transports + // e.g. watch streams + ss.grpc.Stop() + + // concurrent GracefulStop should be interrupted + <-ch + } +} + +// Err - return channel used to report errors during etcd run/shutdown. +// Since etcd 3.5 the channel is being closed when the etcd is over. +func (e *Etcd) Err() <-chan error { + return e.errc +} + +// 配置 peer listeners +func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { + // 更新密码套件 + if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil { + return nil, err + } + + if err = cfg.PeerSelfCert(); err != nil { + cfg.logger.Fatal("未能获得peer的自签名证书", zap.Error(err)) + } + if !cfg.PeerTLSInfo.Empty() { + cfg.logger.Info( + "从peer的TLS开始", + zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)), + zap.Strings("cipher-suites", cfg.CipherSuites), + ) + } + + peers = make([]*peerListener, len(cfg.LPUrls)) + defer func() { + if err == nil { + return + } + for i := range peers { + if peers[i] != nil && peers[i].close != nil { + cfg.logger.Warn( + "关闭节点listener", + zap.String("address", cfg.LPUrls[i].String()), + zap.Error(err), + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + peers[i].close(ctx) + cancel() + } + } + }() + + for i, u := range cfg.LPUrls { + if u.Scheme == "http" { + if !cfg.PeerTLSInfo.Empty() { + cfg.logger.Warn("在钥匙和证书文件存在的情况下,方案为HTTP;忽略钥匙和证书文件", zap.String("peer-url", u.String())) + } + if cfg.PeerTLSInfo.ClientCertAuth { + cfg.logger.Warn("方案为HTTP;当启用 --peer-client-cert-auth;忽略钥匙和证书文件", zap.String("peer-url", u.String())) + } + } + // 构造peerListener对象 监听2380 作为服务端模式 + peers[i] = &peerListener{close: func(context.Context) error { return nil }} + // 调用接口,创建listener对象,返回来之后, + // socket套接字已经完成listener监听流程 + peers[i].Listener, err = transport.NewListenerWithOpts(u.Host, u.Scheme, + transport.WithTLSInfo(&cfg.PeerTLSInfo), + transport.WithSocketOpts(&cfg.SocketOpts), + transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout), + ) + if err != nil { + return nil, err + } + // + peers[i].close = func(context.Context) error { + return peers[i].Listener.Close() + } + } + return peers, nil +} + +// 在rafthttp.Transport启动后配置对等处理程序 +func (e *Etcd) servePeers() (err error) { + // 生成http.hander 用于处理peer请求 + httpHandler := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server) + var peerTLScfg *tls.Config + if !e.cfg.PeerTLSInfo.Empty() { + if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { + return err + } + } + + for _, p := range e.Peers { + + u := p.Listener.Addr().String() + grpcServer := v3rpc.Server(e.Server, peerTLScfg, nil) + m := cmux.New(p.Listener) + go grpcServer.Serve(m.Match(cmux.HTTP2())) // 基于http2 tcp://127.0.0.1:2380 + + httpServer := &http.Server{ + Handler: grpcHandlerFunc(grpcServer, httpHandler), + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + go httpServer.Serve(m.Match(cmux.Any())) // http1 + + p.serve = func() error { + // 回调函数,激活服务,主要是Accept方法 + e.cfg.logger.Info("cmux::serve", zap.String("address", u)) + return m.Serve() + } + + p.close = func(ctx context.Context) error { + // 优雅关闭 http.Server、打开的listeners、空闲的connections 直到超时或上下文关闭 + e.cfg.logger.Info("开始停止服务", zap.String("address", u)) + stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: grpcServer, http: httpServer}) + e.cfg.logger.Info("已停止服务", zap.String("address", u)) + m.Close() + return nil + } + } + + // start peer servers in a goroutine + for _, pl := range e.Peers { + go func(l *peerListener) { + u := l.Addr().String() + e.cfg.logger.Info( + "serving peer traffic", + zap.String("address", u), + ) + e.errHandler(l.serve()) + }(pl) + } + return nil +} + +// 配置与etcdctl客户端的listener选项 +func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { + // 更新密码套件 + if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil { + return nil, err + } + // LCURLS 自签证书 + if err = cfg.ClientSelfCert(); err != nil { + cfg.logger.Fatal("未能获得客户自签名的证书", zap.Error(err)) + } + if cfg.EnablePprof { + cfg.logger.Info("允许性能分析", zap.String("path", debugutil.HTTPPrefixPProf)) + } + + sctxs = make(map[string]*serveCtx) + for _, u := range cfg.LCUrls { + sctx := newServeCtx(cfg.logger) + if u.Scheme == "http" || u.Scheme == "unix" { + if !cfg.ClientTLSInfo.Empty() { + cfg.logger.Warn("在钥匙和证书文件存在的情况下,方案为HTTP;忽略钥匙和证书文件", zap.String("client-url", u.String())) + } + if cfg.ClientTLSInfo.ClientCertAuth { + cfg.logger.Warn("方案是HTTP,同时启用了-客户证书认证;该URL忽略了客户证书认证.", zap.String("client-url", u.String())) + } + } + if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { + return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file)必须提供,当协议是%q", u.String()) + } + + network := "tcp" + addr := u.Host + if u.Scheme == "unix" || u.Scheme == "unixs" { + network = "unix" + addr = u.Host + u.Path + } + sctx.network = network + + sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" + sctx.insecure = !sctx.secure // 在处理etcdctl 请求上,是不是启用证书 + if oldctx := sctxs[addr]; oldctx != nil { + oldctx.secure = oldctx.secure || sctx.secure + oldctx.insecure = oldctx.insecure || sctx.insecure + continue + } + + if sctx.l, err = transport.NewListenerWithOpts(addr, u.Scheme, + transport.WithSocketOpts(&cfg.SocketOpts), + transport.WithSkipTLSInfoCheck(true), + ); err != nil { + return nil, err + } + // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking + // hosts that disable ipv6. So, use the address given by the user. + sctx.addr = addr + + if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { + if fdLimit <= reservedInternalFDNum { + cfg.logger.Fatal( + "file descriptor limit of etcd process is too low; please set higher", + zap.Uint64("limit", fdLimit), + zap.Int("recommended-limit", reservedInternalFDNum), + ) + } + sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) + } + + if network == "tcp" { + if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil { + return nil, err + } + } + + defer func(u url.URL) { + if err == nil { + return + } + sctx.l.Close() + cfg.logger.Warn("关闭peer listener", zap.String("address", u.Host), zap.Error(err)) + }(u) + for k := range cfg.UserHandlers { + sctx.userHandlers[k] = cfg.UserHandlers[k] + } + sctx.serviceRegister = cfg.ServiceRegister + if cfg.EnablePprof || cfg.LogLevel == "debug" { + sctx.registerPprof() + } + if cfg.LogLevel == "debug" { + sctx.registerTrace() + } + sctxs[addr] = sctx + } + return sctxs, nil +} + +// OK +func (e *Etcd) serveClients() (err error) { + if !e.cfg.ClientTLSInfo.Empty() { + e.cfg.logger.Info( + "使用证书启动client", + zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)), + zap.Strings("cipher-suites", e.cfg.CipherSuites), + ) + } + + // Start a client server goroutine for each listen address + var h http.Handler + if e.Config().EnableV2 { + if e.Config().V2DeprecationEffective().IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { + return fmt.Errorf("--enable-v2 and --v2-deprecation=%s are mutually exclusive", e.Config().V2DeprecationEffective()) + } + e.cfg.logger.Warn("Flag `enable-v2` is deprecated and will get removed in etcd 3.6.") + if len(e.Config().ExperimentalEnableV2V3) > 0 { + e.cfg.logger.Warn("Flag `experimental-enable-v2v3` is deprecated and will get removed in etcd 3.6.") + srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) + h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout()) + } else { + h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout()) + } + } else { + mux := http.NewServeMux() + etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) + etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, mux, e.Server) + h = mux + } + + mux := http.NewServeMux() // ✅ + etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) // ✅ + h = mux + + var gopts []grpc.ServerOption + if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: e.cfg.GRPCKeepAliveMinTime, + PermitWithoutStream: false, // 默认false + // 如果是true,即使没有活动流(RPCs),服务器也允许keepalive pings.如果是假的,客户端在没有活动流的情况下发送ping 流,服务器将发送GOAWAY并关闭连接. + })) + } + if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: e.cfg.GRPCKeepAliveInterval, + Timeout: e.cfg.GRPCKeepAliveTimeout, + })) + } + + // 启动每一个监听网卡的程序 + for _, sctx := range e.sctxs { + go func(s *serveCtx) { + e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...)) + }(sctx) + } + return nil +} + +func (e *Etcd) serveMetrics() (err error) { + if e.cfg.Metrics == "extensive" { // basic + grpc_prometheus.EnableHandlingTimeHistogram() + } + // 长度为0, 监听etcd ctl客户端请求 + if len(e.cfg.ListenMetricsUrls) > 0 { + for _, murl := range e.cfg.ListenMetricsUrls { + tlsInfo := &e.cfg.ClientTLSInfo + if murl.Scheme == "http" { + tlsInfo = nil + } + ml, err := transport.NewListenerWithOpts(murl.Host, murl.Scheme, + transport.WithTLSInfo(tlsInfo), + transport.WithSocketOpts(&e.cfg.SocketOpts), + ) + if err != nil { + return err + } + e.metricsListeners = append(e.metricsListeners, ml) + go func(u url.URL, ln net.Listener) { + e.cfg.logger.Info( + "serving metrics", + zap.String("address", u.String()), + ) + }(murl, ml) + } + } + return nil +} + +// 处理err +func (e *Etcd) errHandler(err error) { + select { + case <-e.stopc: + return + default: + } + // 一般都卡在这 + select { + case <-e.stopc: + case e.errc <- err: + } +} + +// GetLogger returns the logger. +func (e *Etcd) GetLogger() *zap.Logger { + e.cfg.loggerMu.RLock() + l := e.cfg.logger + e.cfg.loggerMu.RUnlock() + return l +} + +// 解析返回条数、时间 +func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) { + h, err := strconv.Atoi(retention) + if err == nil && h >= 0 { + switch mode { + case CompactorModeRevision: + ret = time.Duration(int64(h)) + case CompactorModePeriodic: + ret = time.Duration(int64(h)) * time.Hour + } + } else { + // 周期性压缩 + ret, err = time.ParseDuration(retention) + if err != nil { + return 0, fmt.Errorf("解析失败CompactionRetention: %v", err) + } + } + return ret, nil +} + +func (e *Etcd) setupTracing(ctx context.Context) (exporter tracesdk.SpanExporter, options []otelgrpc.Option, err error) { + exporter, err = otlp.NewExporter(ctx, + otlpgrpc.NewDriver( + otlpgrpc.WithEndpoint(e.cfg.ExperimentalDistributedTracingAddress), + otlpgrpc.WithInsecure(), + )) + if err != nil { + return nil, nil, err + } + res := resource.NewWithAttributes( + semconv.ServiceNameKey.String(e.cfg.ExperimentalDistributedTracingServiceName), + ) + // As Tracing service Instance ID必须是unique, it should + // never use the empty default string value, so we only set it + // if it's a non empty string. + if e.cfg.ExperimentalDistributedTracingServiceInstanceID != "" { + resWithIDKey := resource.NewWithAttributes( + (semconv.ServiceInstanceIDKey.String(e.cfg.ExperimentalDistributedTracingServiceInstanceID)), + ) + // Merge resources to combine into a new + // resource in case of duplicates. + res = resource.Merge(res, resWithIDKey) + } + + options = append(options, + otelgrpc.WithPropagators( + propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ), + ), + otelgrpc.WithTracerProvider( + tracesdk.NewTracerProvider( + tracesdk.WithBatcher(exporter), + tracesdk.WithResource(res), + ), + ), + ) + + e.cfg.logger.Info( + "distributed tracing enabled", + zap.String("distributed-tracing-address", e.cfg.ExperimentalDistributedTracingAddress), + zap.String("distributed-tracing-service-name", e.cfg.ExperimentalDistributedTracingServiceName), + zap.String("distributed-tracing-service-instance-id", e.cfg.ExperimentalDistributedTracingServiceInstanceID), + ) + + return exporter, options, err +} diff --git a/etcd/embed/inter.go b/etcd/embed/inter.go new file mode 100644 index 00000000000..1a24e6d7b60 --- /dev/null +++ b/etcd/embed/inter.go @@ -0,0 +1,15 @@ +package embed + +import ( + "net/http" + + gw "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" +) + +func mux() { + var _ etcdserver.EtcdServer + var _ gw.ServeMux + var _ http.ServeMux + var _ http.Handler // ServeHTTP方法 +} diff --git a/etcd/embed/serve.go b/etcd/embed/serve.go new file mode 100644 index 00000000000..f6d9744fd93 --- /dev/null +++ b/etcd/embed/serve.go @@ -0,0 +1,422 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "fmt" + "io/ioutil" + defaultLog "log" + "math" + "net" + "net/http" + "strings" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb/gw" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb/gw" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc" + etcdservergw "github.com/ls-2018/etcd_cn/offical/etcdserverpb/gw" + "github.com/ls-2018/etcd_cn/pkg/debugutil" + "github.com/ls-2018/etcd_cn/pkg/httputil" + + gw "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/soheilhy/cmux" + "github.com/tmc/grpc-websocket-proxy/wsproxy" + "go.uber.org/zap" + "golang.org/x/net/trace" + "google.golang.org/grpc" +) + +// 监听一个端口,提供服务, http, rpc +type serveCtx struct { + lg *zap.Logger + l net.Listener // 单个监听本地网卡2379端口的listener + addr string + network string // tcp unix + secure bool // 安全的 + insecure bool // 不安全的 // 在处理etcdctl 请求上,是不是启用证书 由 lcurl 的协议决定, 与secure相反 + + ctx context.Context + cancel context.CancelFunc + + userHandlers map[string]http.Handler + serviceRegister func(*grpc.Server) // 预置的服务注册函数扩展 + serversC chan *servers +} + +type servers struct { + secure bool + grpc *grpc.Server + http *http.Server +} + +// OK +func newServeCtx(lg *zap.Logger) *serveCtx { + ctx, cancel := context.WithCancel(context.Background()) + if lg == nil { + lg = zap.NewNop() // 不会输出的logger + } + return &serveCtx{ + lg: lg, + ctx: ctx, + cancel: cancel, + userHandlers: make(map[string]http.Handler), + serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true + } +} + +// serve 为接收入站请求创建一个goroutine +func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlsinfo *transport.TLSInfo, handler http.Handler, errHandler func(error), + gopts ...grpc.ServerOption, +) (err error) { + logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) + <-s.ReadyNotify() // 准备好了,该channel会被关闭 + + sctx.lg.Info("随时准备为客户的要求提供服务") + // 实例化 连接多路复用器.可以同时解析不同的协议,都跑在一个listener上 + m := cmux.New(sctx.l) + v3c := v3client.New(s) // server的客户端,可以直接操作server + servElection := v3election.NewElectionServer(v3c) + servLock := v3lock.NewLockServer(v3c) + + var gs *grpc.Server + defer func() { + if err != nil && gs != nil { + gs.Stop() + } + }() + // 不安全 + if sctx.insecure { + gs = v3rpc.Server(s, nil, nil, gopts...) // 注册服务、链接参数 + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + grpcListener := m.Match(cmux.HTTP2()) // + + go func() { errHandler(gs.Serve(grpcListener)) }() + + var gwmux *gw.ServeMux + // 启用grpc网关,将 http 转换成 grpc / true + if s.Cfg.EnableGRPCGateway { + gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()}) // ✅ + if err != nil { + return err + } + } + // 该handler + httpmux := sctx.createMux(gwmux, handler) // http->grpc + + srvhttp := &http.Server{ + Handler: createAccessController(sctx.lg, s, httpmux), // ✅ + ErrorLog: logger, + } + httpl := m.Match(cmux.HTTP1()) + go func() { errHandler(srvhttp.Serve(httpl)) }() + + sctx.serversC <- &servers{grpc: gs, http: srvhttp} + sctx.lg.Info("以不安全的方式为客户流量提供服务;这是被强烈反对的.", zap.String("address", sctx.l.Addr().String())) + } + + if sctx.secure { + tlscfg, tlsErr := tlsinfo.ServerConfig() + if tlsErr != nil { + return tlsErr + } + gs = v3rpc.Server(s, tlscfg, nil, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + handler = grpcHandlerFunc(gs, handler) + + var gwmux *gw.ServeMux + if s.Cfg.EnableGRPCGateway { + dtls := tlscfg.Clone() + // trust local etcd + dtls.InsecureSkipVerify = true + bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls}) + opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())} + gwmux, err = sctx.registerGateway(opts) + if err != nil { + return err + } + } + + var tlsl net.Listener + tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) + if err != nil { + return err + } + // TODO: add debug flag; enable logging when debug flag is set + httpmux := sctx.createMux(gwmux, handler) + + srv := &http.Server{ + Handler: createAccessController(sctx.lg, s, httpmux), + TLSConfig: tlscfg, + ErrorLog: logger, // do not log user error + } + go func() { errHandler(srv.Serve(tlsl)) }() + + sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} + sctx.lg.Info( + "serving client traffic securely", + zap.String("address", sctx.l.Addr().String()), + ) + } + + close(sctx.serversC) + return m.Serve() +} + +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error + +// 注册网关 http 转换成 grpc / true +func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { + ctx := sctx.ctx + + addr := sctx.addr + // tcp unix + if network := sctx.network; network == "unix" { + // 明确定义unix网络以支持gRPC套接字 + addr = fmt.Sprintf("%s://%s", network, addr) + } + + opts = append(opts, grpc.WithDefaultCallOptions([]grpc.CallOption{ + grpc.MaxCallRecvMsgSize(math.MaxInt32), + }...)) + // 与etcd 建立grpc连接 + conn, err := grpc.DialContext(ctx, addr, opts...) + if err != nil { + return nil, err + } + gwmux := gw.NewServeMux() + + handlers := []registerHandlerFunc{ + etcdservergw.RegisterKVHandler, // 将grpc转换成了http + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, + } + for _, h := range handlers { + if err := h(ctx, gwmux, conn); err != nil { + return nil, err + } + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + sctx.lg.Warn("关闭连接", zap.String("address", sctx.l.Addr().String()), zap.Error(cerr)) + } + }() + + return gwmux, nil +} + +// OK 将http转换成grpc +func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { + httpmux := http.NewServeMux() // mux 数据选择器 + for path, h := range sctx.userHandlers { + httpmux.Handle(path, h) + } + + if gwmux != nil { + httpmux.Handle( + "/v3/", + wsproxy.WebsocketProxy( + gwmux, + wsproxy.WithRequestMutator( + // 默认为流的POST方法 + func(_ *http.Request, outgoing *http.Request) *http.Request { + outgoing.Method = "POST" + return outgoing + }, + ), + wsproxy.WithMaxRespBodyBufferSize(0x7fffffff), + ), + ) + } + if handler != nil { + httpmux.Handle("/", handler) + } + return httpmux +} + +// createAccessController包装了HTTP多路复用器. +// - 突变gRPC 网关请求路径 +// - 检查主机名白名单 +// 客户端HTTP请求首先在这里进行 +func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler { + if lg == nil { + lg = zap.NewNop() + } + return &accessController{lg: lg, s: s, mux: mux} +} + +type accessController struct { + lg *zap.Logger + s *etcdserver.EtcdServer // + mux *http.ServeMux +} + +func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if req == nil { + http.Error(rw, "请求是空的", http.StatusBadRequest) + return + } + // 重定向以实现向后兼容 + if req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") { + req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1) + } + + if req.TLS == nil { // 如果客户端连接不安全,则检查origin + host := httputil.GetHostname(req) // 请求的主机名、域名、IP + if !ac.s.AccessController.IsHostWhitelisted(host) { + ac.lg.Warn("拒绝HTTP请求,以防止DNS重新绑定攻击", zap.String("host", host)) + http.Error(rw, errCVE20185702(host), http.StatusMisdirectedRequest) + return + } + } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway && + ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") { + // TODO 待看 + for _, chains := range req.TLS.VerifiedChains { + if len(chains) < 1 { + continue + } + if len(chains[0].Subject.CommonName) != 0 { + http.Error(rw, "对网关发送请求的客户端的CommonName将被忽略,不按预期使用.", http.StatusBadRequest) + return + } + } + } + + // 写Origin头 + // 允不允许跨域 + if ac.s.AccessController.OriginAllowed("*") { + addCORSHeader(rw, "*") + } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) { + addCORSHeader(rw, origin) + } + + if req.Method == "OPTIONS" { + rw.WriteHeader(http.StatusOK) + return + } + + ac.mux.ServeHTTP(rw, req) +} + +// addCORSHeader 在给定Origin的情况下,添加正确的cors头信息. +func addCORSHeader(w http.ResponseWriter, origin string) { + w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + w.Header().Add("Access-Control-Allow-Origin", origin) + w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") +} + +// https://github.com/transmission/transmission/pull/468 +func errCVE20185702(host string) string { + return fmt.Sprintf(` +etcd received your request, but the Host header was unrecognized. + +To fix this, choose one of the following options: +- Enable TLS, then any HTTPS request will be allowed. +- Add the hostname you want to use to the whitelist in settings. + - e.g. etcd --host-whitelist %q + +This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702). +`, host) +} + +// WrapCORS wraps existing handler with CORS. +// TODO: deprecate this after v2 proxy deprecate +func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler { + return &corsHandler{ + ac: &etcdserver.AccessController{CORS: cors}, + h: h, + } +} + +type corsHandler struct { + ac *etcdserver.AccessController + h http.Handler +} + +func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if ch.ac.OriginAllowed("*") { + addCORSHeader(rw, "*") + } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) { + addCORSHeader(rw, origin) + } + + if req.Method == "OPTIONS" { + rw.WriteHeader(http.StatusOK) + return + } + + ch.h.ServeHTTP(rw, req) +} + +func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { + if sctx.userHandlers[s] != nil { + sctx.lg.Warn("路径已被用户处理程序注册", zap.String("path", s)) + return + } + sctx.userHandlers[s] = h +} + +func (sctx *serveCtx) registerPprof() { + for p, h := range debugutil.PProfHandlers() { + sctx.registerUserHandler(p, h) + } +} + +func (sctx *serveCtx) registerTrace() { + reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } + sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) + evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } + sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) +} + +// ---------------------------------------- OVER -------------------------------------------------------------- + +// grpcHandlerFunc 返回一个http.Handler,该Handler在接收到gRPC连接时委托给grpcServer,否则返回otherHandler.在gRPC文档中给出. +func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + if otherHandler == nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + grpcServer.ServeHTTP(w, r) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + grpcServer.ServeHTTP(w, r) + } else { + otherHandler.ServeHTTP(w, r) + } + }) +} diff --git a/etcd/embed/util.go b/etcd/embed/util.go new file mode 100644 index 00000000000..7c61b92ab3a --- /dev/null +++ b/etcd/embed/util.go @@ -0,0 +1,30 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "path/filepath" + + "github.com/ls-2018/etcd_cn/etcd/wal" +) + +// 判断wal目录存不存在 +func isMemberInitialized(cfg *Config) bool { + waldir := cfg.WalDir + if waldir == "" { + waldir = filepath.Join(cfg.Dir, "member", "wal") + } + return wal.Exist(waldir) +} diff --git a/etcd/etcdmain/config.go b/etcd/etcdmain/config.go new file mode 100644 index 00000000000..70a49fd00c8 --- /dev/null +++ b/etcd/etcdmain/config.go @@ -0,0 +1,440 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Every change should be reflected on help.go as well. + +package etcdmain + +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "runtime" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + cconfig "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/embed" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "github.com/ls-2018/etcd_cn/pkg/flags" + + "go.uber.org/zap" + "sigs.k8s.io/yaml" +) + +var ( + proxyFlagOff = "off" + proxyFlagReadonly = "readonly" + proxyFlagOn = "on" + + fallbackFlagExit = "exit" + fallbackFlagProxy = "proxy" + + ignored = []string{ + "cluster-active-size", + "cluster-remove-delay", + "cluster-sync-interval", + "config", + "force", + "max-result-buffer", + "max-retry-attempts", + "peer-heartbeat-interval", + "peer-election-timeout", + "retry-interval", + "snapshot", + "v", + "vv", + // for coverage testing + "test.coverprofile", + "test.outputdir", + } +) + +type configProxy struct { + ProxyFailureWaitMs uint `json:"proxy-failure-wait"` // 在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位). + ProxyRefreshIntervalMs uint `json:"proxy-refresh-interval"` + ProxyDialTimeoutMs uint `json:"proxy-dial-timeout"` + ProxyWriteTimeoutMs uint `json:"proxy-write-timeout"` + ProxyReadTimeoutMs uint `json:"proxy-read-timeout"` + Fallback string + Proxy string + ProxyJSON string `json:"proxy"` + FallbackJSON string `json:"discovery-fallback"` +} + +// configFlags 是否有一组标志用于命令行解析配置 +type configFlags struct { + flagSet *flag.FlagSet + clusterState *flags.SelectiveStringValue // todo 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集. + fallback *flags.SelectiveStringValue + proxy *flags.SelectiveStringValue + v2deprecation *flags.SelectiveStringsValue +} + +// config 保存etcd命令行调用的配置 +type config struct { + ec embed.Config + cp configProxy // 代理配置 + cf configFlags // 是否有一组标志用于命令行解析配置 + configFile string // 从文件加载服务器配置. + printVersion bool // 打印版本并退出 + ignored []string +} + +// OK +func newConfig() *config { + cfg := &config{ + ec: *embed.NewConfig(), + cp: configProxy{ + Proxy: proxyFlagOff, // off + ProxyFailureWaitMs: 5000, + ProxyRefreshIntervalMs: 30000, + ProxyDialTimeoutMs: 1000, + ProxyWriteTimeoutMs: 5000, + }, + ignored: ignored, + } + cfg.cf = configFlags{ + flagSet: flag.NewFlagSet("etcd", flag.ContinueOnError), + clusterState: flags.NewSelectiveStringValue( + embed.ClusterStateFlagNew, + embed.ClusterStateFlagExisting, + ), + fallback: flags.NewSelectiveStringValue( + fallbackFlagProxy, + fallbackFlagExit, + ), + proxy: flags.NewSelectiveStringValue( + proxyFlagOff, // off + proxyFlagReadonly, // readonly + proxyFlagOn, // on + ), + v2deprecation: flags.NewSelectiveStringsValue( + string(cconfig.V2_DEPR_0_NOT_YET), + string(cconfig.V2_DEPR_1_WRITE_ONLY), + string(cconfig.V2_DEPR_1_WRITE_ONLY_DROP), + string(cconfig.V2_DEPR_2_GONE)), + } + + fs := cfg.cf.flagSet + fs.Usage = func() { + fmt.Fprintln(os.Stderr, usageline) + } + + fs.StringVar(&cfg.configFile, "config-file", "", "从文件加载服务器配置.") + + // member + fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "服务运行数据保存的路径. ${name}.etcd") + fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "专用wal目录的路径.默认值:--data-dir的路径下") + fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultListenPeerURLs, ""), "listen-peer-urls", "和成员之间通信的地址.用于监听其他etcd member的url") + fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls", "对外提供服务的地址") + fs.Var(flags.NewUniqueURLsWithExceptions("", ""), "listen-metrics-urls", "要监听指标和运行状况端点的url列表.") + fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "要保留的最大快照文件数(0表示不受限制).5") + fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "要保留的最大wal文件数(0表示不受限制). 5") + fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "本节点.人类可读的名字") + // 作用:此配置值作为此节点在--initial-cluster标志中列出的条目(例如.default=http://localhost:2380)引用.若使用静态引导.则需要匹配标志中使用的密钥.使用发现时.每个成员必须具有唯一的名称.建议使用Hostname或者machine-id. + fs.Uint64Var(&cfg.ec.SnapshotCount, "snapshot-count", cfg.ec.SnapshotCount, "// 触发一次磁盘快照的提交事务的次数.") + fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "心跳间隔 100ms") + fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "选举超时") + fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "是否提前初始化选举时钟启动,以便更快的选举.") + fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "当后端大小超过给定配额时(0默认为低空间配额).引发警报.") + fs.StringVar(&cfg.ec.BackendFreelistType, "backend-bbolt-freelist-type", cfg.ec.BackendFreelistType, "BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型). map ") + fs.DurationVar(&cfg.ec.BoltBackendBatchInterval, "backend-batch-interval", cfg.ec.BoltBackendBatchInterval, "BackendBatchInterval是提交后端事务前的最长时间.") + fs.IntVar(&cfg.ec.BoltBackendBatchLimit, "backend-batch-limit", cfg.ec.BoltBackendBatchLimit, "BackendBatchLimit是提交后端事务前的最大操作数.") + fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "事务中允许的最大操作数.") + fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "服务器将接受的最大客户端请求大小(字节).") + fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "客户端在ping服务器之前应等待的最短持续时间间隔.") + fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用).") + fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "关闭非响应连接之前的额外持续等待时间(0表示禁用).20s") + fs.BoolVar(&cfg.ec.SocketOpts.ReusePort, "socket-reuse-port", cfg.ec.SocketOpts.ReusePort, "启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口.false") + fs.BoolVar(&cfg.ec.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.ec.SocketOpts.ReuseAddress, "启用在listener上设置套接字选项SO_REUSEADDR 允许重新绑定一个已经在使用的端口 在`TIME_WAIT` 状态.") + + // raft 连接超时 + fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "在每个rafthttp连接上设置的读取超时 5s") + fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "在每个rafthttp连接上设置写入超时 5s") + + // 集群 + fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""), "initial-advertise-peer-urls", "集群成员的 URL地址.且会通告群集的其余成员节点.") + fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultAdvertiseClientURLs, ""), "advertise-client-urls", "就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url") + // 注意,不能写http://localhost:237,这样就是通知其他节点,可以用localhost访问,将导致ectd的客户端用localhost访问本地,导致访问不通.还有一个更可怕情况,ectd布置了代理层,代理层将一直通过locahost访问自己的代理接口,导致无限循环 + fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "用于引导群集的发现URL.") + fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf(`发现服务失败时的预期行为("退出"或"代理")."proxy"仅支持v2 API. %q`, cfg.cf.fallback.Valids())) + + fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "用于流量到发现服务的HTTP代理.") + fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS srv域用于引导群集.") + fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "使用DNS引导时查询的DNS srv名称的后缀.") + fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "用于引导初始集群配置,集群中所有节点的信息..") + fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "创建集群的 token.这个值每个集群保持唯一.") + fs.Var(cfg.cf.clusterState, "initial-cluster-state", "初始集群状态 ('new' or 'existing').") + + fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "拒绝可能导致仲裁丢失的重新配置请求.true") + + fs.BoolVar(&cfg.ec.PreVote, "pre-vote", cfg.ec.PreVote, "是否启用PreVote扩展,解决分区恢复选举bug") + + fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state. Deprecated in 3.5. Will be decomissioned in 3.6.") + fs.Var(cfg.cf.v2deprecation, "v2-deprecation", fmt.Sprintf("v2store deprecation stage: %q. ", cfg.cf.proxy.Valids())) // off readonly on + + // proxy + fs.Var(cfg.cf.proxy, "proxy", fmt.Sprintf("代理模式设置 %q", cfg.cf.proxy.Valids())) + fs.UintVar(&cfg.cp.ProxyFailureWaitMs, "proxy-failure-wait", cfg.cp.ProxyFailureWaitMs, "在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位).") + fs.UintVar(&cfg.cp.ProxyRefreshIntervalMs, "proxy-refresh-interval", cfg.cp.ProxyRefreshIntervalMs, "endpoints 刷新间隔的时间(以毫秒为单位).") + fs.UintVar(&cfg.cp.ProxyDialTimeoutMs, "proxy-dial-timeout", cfg.cp.ProxyDialTimeoutMs, "拨号超时的时间(以毫秒为单位)或0表示禁用超时") + fs.UintVar(&cfg.cp.ProxyWriteTimeoutMs, "proxy-write-timeout", cfg.cp.ProxyWriteTimeoutMs, "写入超时的时间(以毫秒为单位)或0以禁用超时.") + fs.UintVar(&cfg.cp.ProxyReadTimeoutMs, "proxy-read-timeout", cfg.cp.ProxyReadTimeoutMs, "读取超时的时间(以毫秒为单位)或0以禁用超时.") + + // etcdctl通信的证书配置 + fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "客户端证书") + fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "客户端私钥") + + fs.StringVar(&cfg.ec.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "验证client客户端时使用的 证书文件路径,否则在需要客户认证时将使用cert-file文件") + fs.StringVar(&cfg.ec.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "验证client客户端时使用的 密钥文件路径,否则在需要客户认证时将使用key-file文件.") + fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "启用客户端证书验证;默认false") + fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "客户端证书吊销列表文件的路径.") + fs.StringVar(&cfg.ec.ClientTLSInfo.AllowedHostname, "client-cert-allowed-hostname", "", "允许客户端证书认证使用TLS主机名.") + fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "客户端etcd通信 的可信CA证书文件") + fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "客户端TLS使用自动生成的证书") + // etcd通信之间的证书配置 + fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "证书路径") + fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "私钥路径") + + fs.StringVar(&cfg.ec.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "验证server客户端时使用的 证书文件路径,否则在需要客户认证时将使用cert-file文件") + fs.StringVar(&cfg.ec.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "验证server客户端时使用的 密钥文件路径,否则在需要客户认证时将使用key-file文件.") + + fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "启用server客户端证书验证;默认false") + fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "服务器端ca证书") + fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "节点之间使用生成的证书通信;默认false") + fs.UintVar(&cfg.ec.SelfSignedCertValidity, "self-signed-cert-validity", 1, "客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS,") + fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "服务端证书吊销列表文件的路径.") + fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "允许的server客户端证书CommonName") + fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "允许的server客户端证书hostname") + fs.Var(flags.NewStringsValue(""), "cipher-suites", "客户端/etcds之间支持的TLS加密套件的逗号分隔列表(空将由Go自动填充).") + fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "跳过server 客户端证书中SAN字段的验证.默认false") + + fs.Var(flags.NewUniqueURLsWithExceptions("*", "*"), "cors", "逗号分隔的CORS白名单.或跨来源资源共享.(空或*表示允许所有)") + fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "如果etcd是不安全的(空意味着允许所有).用逗号分隔HTTP客户端请求中的可接受主机名.") + + // 日志 + fs.StringVar(&cfg.ec.Logger, "logger", "zap", "当前只支持zap,结构化数据") + fs.Var(flags.NewUniqueStringsValue(embed.DefaultLogOutput), "log-outputs", "指定'stdout'或'stderr'以跳过日志记录,即使在systemd或逗号分隔的输出目标列表下运行也是如此.") + fs.StringVar(&cfg.ec.LogLevel, "log-level", logutil.DefaultLogLevel, "日志等级,只支持 debug, info, warn, error, panic, or fatal. Default 'info'.") + fs.BoolVar(&cfg.ec.EnableLogRotation, "enable-log-rotation", false, "启用单个日志输出文件目标的日志旋转.") + fs.StringVar(&cfg.ec.LogRotationConfigJSON, "log-rotation-config-json", embed.DefaultLogRotationConfig, "是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的.") + + // 版本 + fs.BoolVar(&cfg.printVersion, "version", false, "打印版本并退出.") + //--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000; + //--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口. + fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "在一个小时内为mvcc键值存储的自动压缩.0表示禁用自动压缩.") + fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "基于时间保留的三种模式:periodic, revision") + + // 性能分析器 通过 HTTP + fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, `通过HTTP服务器启用运行时分析数据.地址位于客户端URL +/debug/pprof/`) + + // additional metrics + fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, `设置导出的指标的详细程度,指定"扩展"以包括直方图指标(extensive,basic)`) + + // experimental distributed tracing + fs.BoolVar(&cfg.ec.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.") + fs.StringVar(&cfg.ec.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", embed.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).") + fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", embed.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.") + fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID必须是unique per etcd instance.") + + // auth + fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "指定验证令牌的具体选项. ('simple' or 'jwt')") + fs.UintVar(&cfg.ec.BcryptCost, "bcrypt-cost", cfg.ec.BcryptCost, "为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间.") + fs.UintVar(&cfg.ec.AuthTokenTTL, "auth-token-ttl", cfg.ec.AuthTokenTTL, "token过期时间") + + // gateway + fs.BoolVar(&cfg.ec.EnableGRPCGateway, "enable-grpc-gateway", cfg.ec.EnableGRPCGateway, "Enable GRPC gateway.") + + // experimental + fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.") + fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.") + + fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", true, "允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置") + // TODO: delete in v3.7 + fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", true, "启用持续的剩余TTL,以防止长期租赁的无限期自动续约.在v3.6中始终启用.应使用该功能以确保从启用该功能的v3.5集群顺利升级.需要启用experimental-enable-lease-checkpoint.") + fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.") + fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.") + fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "两次降级状态检查之间的时间间隔.") + fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "时间长度.如果应用请求的时间超过这个值.就会产生一个警告.") + fs.BoolVar(&cfg.ec.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ec.ExperimentalMemoryMlock, "启用强制执行etcd页面(特别是bbolt)留在RAM中.") + fs.BoolVar(&cfg.ec.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "启用写事务在其只读检查操作中使用共享缓冲区.") + fs.UintVar(&cfg.ec.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd etcd bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.") + + // 非安全 + fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "禁用fsync,不安全,会导致数据丢失.") + fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "强制创建新的单成员群集.它提交配置更改,强制删除集群中的所有现有成员并添加自身.需要将其设置为还原备份.") + + // ignored + for _, f := range cfg.ignored { + fs.Var(&flags.IgnoredFlag{Name: f}, f, "") + } + return cfg +} + +// OK +func (cfg *config) parse(arguments []string) error { + perr := cfg.cf.flagSet.Parse(arguments) + switch perr { + case nil: + case flag.ErrHelp: + fmt.Println(flagsline) + os.Exit(0) + default: + os.Exit(2) + } + if len(cfg.cf.flagSet.Args()) != 0 { + return fmt.Errorf("'%s'不是一个有效的标志 ", cfg.cf.flagSet.Arg(0)) + } + + if cfg.printVersion { + fmt.Printf("etcd Version: %s\n", version.Version) + fmt.Printf("Git SHA: %s\n", version.GitSHA) + fmt.Printf("Go Version: %s\n", runtime.Version()) + fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) + os.Exit(0) + } + + var err error + + // 这个env变量必须被单独解析,因为我们需要根据配置文件是否被设置,来决定是使用还是忽略env变量. + if cfg.configFile == "" { + cfg.configFile = os.Getenv(flags.FlagToEnv("ETCD", "config-file")) // ETCD_CONFIG_FILE + } + + if cfg.configFile != "" { + err = cfg.configFromFile(cfg.configFile) + if lg := cfg.ec.GetLogger(); lg != nil { + lg.Info("加载的etcd配置,其他配置的命令行标志和环境变量将被忽略,如果提供了", zap.String("path", cfg.configFile)) + } + } else { + err = cfg.configFromCmdLine() + } + if runtime.GOOS == "windows" { + fmt.Println(os.RemoveAll(fmt.Sprintf("E:\\etcd_cn\\%s.etcd", cfg.ec.Name))) + } else { + fmt.Println(os.RemoveAll(fmt.Sprintf("/Users/liushuo/Desktop/source_code/etcd_cn/%s.etcd", cfg.ec.Name))) + } + return err +} + +// OK +func (cfg *config) configFromCmdLine() error { + // 用户指定的记录器尚未设置,在标志解析过程中使用此记录器 + lg, err := zap.NewProduction() + if err != nil { + return err + } + err = flags.SetFlagsFromEnv(lg, "ETCD", cfg.cf.flagSet) // 解析给定flagset中的所有注册标志,如果它们还没有被设置,则尝试从环境变量中设置其值. + if err != nil { + return err + } + + if rafthttp.ConnReadTimeout < rafthttp.DefaultConnReadTimeout { + rafthttp.ConnReadTimeout = rafthttp.DefaultConnReadTimeout + lg.Info(fmt.Sprintf("raft-read-timeout : %v", rafthttp.DefaultConnReadTimeout)) + } + if rafthttp.ConnWriteTimeout < rafthttp.DefaultConnWriteTimeout { + rafthttp.ConnWriteTimeout = rafthttp.DefaultConnWriteTimeout + lg.Info(fmt.Sprintf("raft-write-timeout increased to minimum value: %v", rafthttp.DefaultConnWriteTimeout)) + } + // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口 + cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls") + cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls") + cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls") + cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls") + cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls") + + cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors") + cfg.ec.HostWhitelist = flags.UniqueStringsMapFromFlag(cfg.cf.flagSet, "host-whitelist") + + cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites") + + cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs") + + cfg.ec.ClusterState = cfg.cf.clusterState.String() + cfg.cp.Fallback = cfg.cf.fallback.String() // proxy + cfg.cp.Proxy = cfg.cf.proxy.String() // off + + // 如果设置了lcurls,则禁用默认的 advertise-client-urls + fmt.Println(`flags.IsSet(cfg.cf.flagSet, "listen-client-urls")`, flags.IsSet(cfg.cf.flagSet, "listen-client-urls")) + fmt.Println(`flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")`, flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")) + missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls") + // todo 没看懂 + if !cfg.mayBeProxy() && missingAC { + cfg.ec.ACUrls = nil + } + + // 如果设置了discovery则禁用默认初始集群 + if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "" || cfg.ec.DNSClusterServiceName != "") && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") { + cfg.ec.InitialCluster = "" + } + + return cfg.validate() // √ +} + +// OK +func (cfg *config) configFromFile(path string) error { + eCfg, err := embed.ConfigFromFile(path) + if err != nil { + return err + } + cfg.ec = *eCfg + + // 加载额外的配置信息 + b, rerr := ioutil.ReadFile(path) + if rerr != nil { + return rerr + } + if yerr := yaml.Unmarshal(b, &cfg.cp); yerr != nil { + return yerr + } + + if cfg.cp.FallbackJSON != "" { + if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil { + log.Fatalf("设置时出现意外错误 discovery-fallback flag: %v", err) + } + cfg.cp.Fallback = cfg.cf.fallback.String() + } + + if cfg.cp.ProxyJSON != "" { + if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil { + log.Fatalf("设置时出现意外错误 proxyFlag: %v", err) + } + cfg.cp.Proxy = cfg.cf.proxy.String() + } + return nil +} + +func (cfg *config) mayBeProxy() bool { + mayFallbackToProxy := cfg.ec.Durl != "" && cfg.cp.Fallback == fallbackFlagProxy + return cfg.cp.Proxy != proxyFlagOff || mayFallbackToProxy +} + +func (cfg *config) validate() error { + err := cfg.ec.Validate() + // TODO(yichengq): 通过 discovery service case加入,请检查这一点. + if err == embed.ErrUnsetAdvertiseClientURLsFlag && cfg.mayBeProxy() { + return nil + } + return err +} + +// 是否开启代理模式 +func (cfg config) isProxy() bool { return cfg.cf.proxy.String() != proxyFlagOff } +func (cfg config) isReadonlyProxy() bool { return cfg.cf.proxy.String() == proxyFlagReadonly } +func (cfg config) shouldFallbackToProxy() bool { return cfg.cf.fallback.String() == fallbackFlagProxy } diff --git a/server/etcdmain/doc.go b/etcd/etcdmain/doc.go similarity index 100% rename from server/etcdmain/doc.go rename to etcd/etcdmain/doc.go diff --git a/etcd/etcdmain/etcd.go b/etcd/etcdmain/etcd.go new file mode 100644 index 00000000000..42415ca7cf7 --- /dev/null +++ b/etcd/etcdmain/etcd.go @@ -0,0 +1,445 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdmain + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/embed" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2discovery" + "github.com/ls-2018/etcd_cn/etcd/proxy/httpproxy" + pkgioutil "github.com/ls-2018/etcd_cn/pkg/ioutil" + "github.com/ls-2018/etcd_cn/pkg/osutil" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +// 数据目录下的几种子目录 +type dirType string + +// member、proxy只能存在一种 +// 都没有就返回empty[节点运行之初] +var ( + dirMember = dirType("member") + dirProxy = dirType("proxy") + dirEmpty = dirType("empty") +) + +func startEtcdOrProxy(args []string) { + grpc.EnableTracing = false + + cfg := newConfig() + defaultInitialCluster := cfg.ec.InitialCluster + + err := cfg.parse(args[1:]) + lg := cfg.ec.GetLogger() + // 如果我们未能解析整个配置,最好使用配置中已解决的记录器来打印错误,但如果不存在,则创建一个新的临时记录器. + if lg == nil { + var zapError error + lg, zapError = zap.NewProduction() + if zapError != nil { + fmt.Printf("创建zap logger失败%v", zapError) + os.Exit(1) + } + } + lg.Info("运行中:", zap.Strings("args", args)) + if err != nil { + lg.Warn("未能验证标志", zap.Error(err)) + switch err { + case embed.ErrUnsetAdvertiseClientURLsFlag: + lg.Warn("advertise client URLs are not set", zap.Error(err)) + } + os.Exit(1) + } + // err := cfg.ZapLoggerBuilder(cfg) + cfg.ec.SetupGlobalLoggers() + + defer func() { + logger := cfg.ec.GetLogger() + if logger != nil { + logger.Sync() + } + }() + // TODO 没明白这个函数是干啥的, 防止Name发生变化,InitialCluster没有生效 + defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster) + if defaultHost != "" { + lg.Info("检测到默认的advertise主机", zap.String("host", defaultHost)) + } + if dhErr != nil { + lg.Info("未能检测到默认主机", zap.Error(dhErr)) + } + + if cfg.ec.Dir == "" { + cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name) + lg.Warn("'data-dir'是空的,使用默认的default", zap.String("data-dir", cfg.ec.Dir)) + } + + var stopped <-chan struct{} + var errc <-chan error + // 识别数据目录, 返回data dir的类型. + which := identifyDataDirOrDie(cfg.ec.GetLogger(), cfg.ec.Dir) + if which != dirEmpty { + lg.Info("etcd数据已经被初始化了", zap.String("data-dir", cfg.ec.Dir), zap.String("dir-type", string(which))) + switch which { + case dirMember: + stopped, errc, err = startEtcd(&cfg.ec) + case dirProxy: + err = startProxy(cfg) + default: + lg.Panic("未知目录类型", zap.String("dir-type", string(which))) + } + } else { + shouldProxy := cfg.isProxy() // 是否开启代理模式 + if !shouldProxy { // 一般不会开启 + stopped, errc, err = startEtcd(&cfg.ec) + // todo 还没看 + if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == v2discovery.ErrFullCluster { + if cfg.shouldFallbackToProxy() { + lg.Warn("discovery cluster is full, falling back to proxy", zap.String("fallback-proxy", fallbackFlagProxy), zap.Error(err)) + shouldProxy = true + } + } else if err != nil { + lg.Warn("failed to start etcd", zap.Error(err)) + } + } + if shouldProxy { + err = startProxy(cfg) + } + } + + if err != nil { + if derr, ok := err.(*etcdserver.DiscoveryError); ok { + switch derr.Err { + case v2discovery.ErrDuplicateID: + lg.Warn("member has been registered with discovery service", zap.String("name", cfg.ec.Name), zap.String("discovery-token", cfg.ec.Durl), zap.Error(derr.Err)) + lg.Warn("but could not find valid cluster configuration", zap.String("data-dir", cfg.ec.Dir)) + lg.Warn("check data dir if previous bootstrap succeeded") + lg.Warn("or use a new discovery token if previous bootstrap failed") + + case v2discovery.ErrDuplicateName: + lg.Warn("member with duplicated name has already been registered", zap.String("discovery-token", cfg.ec.Durl), zap.Error(derr.Err)) + lg.Warn("cURL the discovery token URL for details") + lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster") + + default: + lg.Warn("failed to bootstrap; discovery token was already used", zap.String("discovery-token", cfg.ec.Durl), zap.Error(err)) + lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster") + } + os.Exit(1) + } + + if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") { + lg.Warn("failed to start", zap.Error(err)) + if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) { + lg.Warn("forgot to set --initial-cluster?") + } + if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs { + lg.Warn("forgot to set --initial-advertise-peer-urls?") + } + if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 { + lg.Warn("--discovery flag is not set") + } + os.Exit(1) + } + lg.Fatal("discovery failed", zap.Error(err)) + } + + osutil.HandleInterrupts(lg) + + // At this point, the initialization of etcd is done. + // The listeners are listening on the TCP ports and ready + // for accepting connections. The etcd instance should be + // joined with the cluster and ready to serve incoming + // connections. + notifySystemd(lg) + + select { + case lerr := <-errc: + // fatal out on listener errors + lg.Fatal("listener failed", zap.Error(lerr)) + case <-stopped: + } + + osutil.Exit(0) +} + +// startEtcd +func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) { + e, err := embed.StartEtcd(cfg) // 异步启动etcd| http + if err != nil { + return nil, nil, err + } + osutil.RegisterInterruptHandler(e.Close) // 注册中断处理程序,但不会执行 + select { + case <-e.Server.ReadyNotify(): // 等待本节点加入集群 + case <-e.Server.StopNotify(): // 收到了异常 + } + return e.Server.StopNotify(), e.Err(), nil +} + +// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes. +func startProxy(cfg *config) error { + lg := cfg.ec.GetLogger() + lg.Info("v2 API proxy starting") + + clientTLSInfo := cfg.ec.ClientTLSInfo + if clientTLSInfo.Empty() { + // Support old proxy behavior of defaulting to PeerTLSInfo + // for both client and peer connections. + clientTLSInfo = cfg.ec.PeerTLSInfo + } + clientTLSInfo.InsecureSkipVerify = cfg.ec.ClientAutoTLS + cfg.ec.PeerTLSInfo.InsecureSkipVerify = cfg.ec.PeerAutoTLS + + pt, err := transport.NewTimeoutTransport( + clientTLSInfo, + time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, + time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, + time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond, + ) + if err != nil { + return err + } + pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost + + if err = cfg.ec.PeerSelfCert(); err != nil { + lg.Fatal("failed to get self-signed certs for peer", zap.Error(err)) + } + tr, err := transport.NewTimeoutTransport( + cfg.ec.PeerTLSInfo, + time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, + time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, + time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond, + ) + if err != nil { + return err + } + + cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy") + err = fileutil.TouchDirAll(cfg.ec.Dir) + if err != nil { + return err + } + + var peerURLs []string + clusterfile := filepath.Join(cfg.ec.Dir, "cluster") + + b, err := ioutil.ReadFile(clusterfile) + switch { + case err == nil: + if cfg.ec.Durl != "" { + lg.Warn( + "discovery token ignored since the proxy has already been initialized; valid cluster file found", + zap.String("cluster-file", clusterfile), + ) + } + if cfg.ec.DNSCluster != "" { + lg.Warn( + "DNS SRV discovery ignored since the proxy has already been initialized; valid cluster file found", + zap.String("cluster-file", clusterfile), + ) + } + urls := struct{ PeerURLs []string }{} + err = json.Unmarshal(b, &urls) + if err != nil { + return err + } + peerURLs = urls.PeerURLs + lg.Info( + "proxy using peer URLS from cluster file", + zap.Strings("peer-urls", peerURLs), + zap.String("cluster-file", clusterfile), + ) + + case os.IsNotExist(err): + var urlsmap types.URLsMap + urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy") + if err != nil { + return fmt.Errorf("error setting up initial cluster: %v", err) + } + + if cfg.ec.Durl != "" { + var s string + s, err = v2discovery.GetCluster(lg, cfg.ec.Durl, cfg.ec.Dproxy) + if err != nil { + return err + } + if urlsmap, err = types.NewURLsMap(s); err != nil { + return err + } + } + peerURLs = urlsmap.URLs() + lg.Info("proxy using peer URLS", zap.Strings("peer-urls", peerURLs)) + + default: + return err + } + + clientURLs := []string{} + uf := func() []string { + gcls, gerr := etcdserver.GetClusterFromRemotePeers(lg, peerURLs, tr) + if gerr != nil { + lg.Warn( + "failed to get cluster from remote peers", + zap.Strings("peer-urls", peerURLs), + zap.Error(gerr), + ) + return []string{} + } + + clientURLs = gcls.ClientURLs() + urls := struct{ PeerURLs []string }{gcls.PeerURLs()} + b, jerr := json.Marshal(urls) + if jerr != nil { + lg.Warn("proxy failed to marshal peer URLs", zap.Error(jerr)) + return clientURLs + } + + err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0o600) + if err != nil { + lg.Warn("proxy failed to write cluster file", zap.Error(err)) + return clientURLs + } + err = os.Rename(clusterfile+".bak", clusterfile) + if err != nil { + lg.Warn( + "proxy failed to rename cluster file", + zap.String("path", clusterfile), + zap.Error(err), + ) + return clientURLs + } + if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) { + lg.Info( + "proxy updated peer URLs", + zap.Strings("from", peerURLs), + zap.Strings("to", gcls.PeerURLs()), + ) + } + peerURLs = gcls.PeerURLs() + + return clientURLs + } + ph := httpproxy.NewHandler(lg, pt, uf, time.Duration(cfg.cp.ProxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.cp.ProxyRefreshIntervalMs)*time.Millisecond) + ph = embed.WrapCORS(cfg.ec.CORS, ph) + + if cfg.isReadonlyProxy() { + ph = httpproxy.NewReadonlyHandler(ph) + } + + // setup self signed certs when serving https + cHosts, cTLS := []string{}, false + for _, u := range cfg.ec.LCUrls { + cHosts = append(cHosts, u.Host) + cTLS = cTLS || u.Scheme == "https" + } + for _, u := range cfg.ec.ACUrls { + cHosts = append(cHosts, u.Host) + cTLS = cTLS || u.Scheme == "https" + } + listenerTLS := cfg.ec.ClientTLSInfo + if cfg.ec.ClientAutoTLS && cTLS { + listenerTLS, err = transport.SelfCert(cfg.ec.GetLogger(), filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts, cfg.ec.SelfSignedCertValidity) + if err != nil { + lg.Fatal("failed to initialize self-signed client cert", zap.Error(err)) + } + } + + // Start a proxy etcd goroutine for each listen address + for _, u := range cfg.ec.LCUrls { + l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS) + if err != nil { + return err + } + + host := u.String() + go func() { + lg.Info("v2 proxy started listening on client requests", zap.String("host", host)) + mux := http.NewServeMux() + etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port + mux.Handle("/", ph) + lg.Fatal("done serving", zap.Error(http.Serve(l, mux))) + }() + } + return nil +} + +// identifyDataDirOrDie 识别数据目录, 返回data dir的类型. 如果datadir无效,则视为无效. +func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType { + names, err := fileutil.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return dirEmpty + } + lg.Fatal("未能列出数据目录", zap.String("dir", dir), zap.Error(err)) + } + + var m, p bool + for _, name := range names { + switch dirType(name) { + case dirMember: + m = true + case dirProxy: + p = true + default: + lg.Warn("在数据目录下发现无效的文件", zap.String("filename", name), zap.String("data-dir", dir)) + } + } + + if m && p { + lg.Fatal("无效的数据目录,成员目录和代理目录都存在") + } + if m { + return dirMember + } + if p { + return dirProxy + } + return dirEmpty +} + +// 检查系统是否支持 +func checkSupportArch() { + if runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" { + return + } + // 不支持的架构 仅通过环境变量配置,因此在这里取消设置不通过解析标志 + defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH") + if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH { + fmt.Printf("在不支持的体系结构上运行etcd%q 当 ETCD_UNSUPPORTED_ARCH 设置了\n", env) + return + } + + fmt.Printf("etcd在不支持ETCD_UNSUPPORTED_ARCH的平台上=%s set\n", runtime.GOARCH) + os.Exit(1) +} diff --git a/server/etcdmain/gateway.go b/etcd/etcdmain/gateway.go similarity index 92% rename from server/etcdmain/gateway.go rename to etcd/etcdmain/gateway.go index 64fb90df2c1..830f3e771fd 100644 --- a/server/etcdmain/gateway.go +++ b/etcd/etcdmain/gateway.go @@ -21,8 +21,7 @@ import ( "os" "time" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/server/v3/proxy/tcpproxy" + "github.com/ls-2018/etcd_cn/etcd/proxy/tcpproxy" "github.com/spf13/cobra" "go.uber.org/zap" @@ -38,13 +37,11 @@ var ( gatewayCA string ) -var ( - rootCmd = &cobra.Command{ - Use: "etcd", - Short: "etcd server", - SuggestFor: []string{"etcd"}, - } -) +var rootCmd = &cobra.Command{ + Use: "etcd", + Short: "etcd etcd", + SuggestFor: []string{"etcd"}, +} func init() { rootCmd.AddCommand(newGatewayCommand()) @@ -72,7 +69,7 @@ func newGatewayStartCommand() *cobra.Command { cmd.Flags().StringVar(&gatewayDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster") cmd.Flags().StringVar(&gatewayDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery") cmd.Flags().BoolVar(&gatewayInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records") - cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file for verifying the discovered endpoints when discovery-srv is provided.") + cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client etcd TLS CA file for verifying the discovered endpoints when discovery-srv is provided.") cmd.Flags().StringSliceVar(&gatewayEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints") @@ -93,7 +90,8 @@ func stripSchema(eps []string) []string { } func startGateway(cmd *cobra.Command, args []string) { - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) + var lg *zap.Logger + lg, err := zap.NewProduction() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/etcd/etcdmain/grpc_proxy.go b/etcd/etcdmain/grpc_proxy.go new file mode 100644 index 00000000000..a07c59e6dc0 --- /dev/null +++ b/etcd/etcdmain/grpc_proxy.go @@ -0,0 +1,493 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdmain + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "math" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/v3/leasing" + "github.com/ls-2018/etcd_cn/client_sdk/v3/namespace" + "github.com/ls-2018/etcd_cn/client_sdk/v3/ordering" + "github.com/ls-2018/etcd_cn/etcd/embed" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" + "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/debugutil" + "go.uber.org/zap/zapgrpc" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/soheilhy/cmux" + "github.com/spf13/cobra" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" +) + +var ( + grpcProxyListenAddr string + grpcProxyMetricsListenAddr string + grpcProxyEndpoints []string + grpcProxyDNSCluster string + grpcProxyDNSClusterServiceName string + grpcProxyInsecureDiscovery bool + grpcProxyDataDir string + grpcMaxCallSendMsgSize int + grpcMaxCallRecvMsgSize int + + // tls for connecting to etcd + + grpcProxyCA string + grpcProxyCert string + grpcProxyKey string + grpcProxyInsecureSkipTLSVerify bool + + // tls for clients connecting to proxy + + grpcProxyListenCA string + grpcProxyListenCert string + grpcProxyListenKey string + grpcProxyListenAutoTLS bool + grpcProxyListenCRL string + selfSignedCertValidity uint + + grpcProxyAdvertiseClientURL string + grpcProxyResolverPrefix string + grpcProxyResolverTTL int + + grpcProxyNamespace string + grpcProxyLeasing string + + grpcProxyEnablePprof bool + grpcProxyEnableOrdering bool + + grpcProxyDebug bool + + // GRPC keep alive related options. + grpcKeepAliveMinTime time.Duration + grpcKeepAliveTimeout time.Duration + grpcKeepAliveInterval time.Duration +) + +const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024 + +func init() { + rootCmd.AddCommand(newGRPCProxyCommand()) +} + +// newGRPCProxyCommand returns the cobra command for "grpc-proxy". +func newGRPCProxyCommand() *cobra.Command { + lpc := &cobra.Command{ + Use: "grpc-proxy ", + Short: "grpc-proxy related command", + } + lpc.AddCommand(newGRPCProxyStartCommand()) + + return lpc +} + +func newGRPCProxyStartCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "start", + Short: "start the grpc proxy", + Run: startGRPCProxy, + } + + cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address") + cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "domain name to query for SRV records describing cluster endpoints") + cmd.Flags().StringVar(&grpcProxyDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery") + cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for endpoint /metrics requests on an additional interface") + cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records") + cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints") + cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)") + cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)") + cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints") + cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests") + cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP etcd. Address is at client URL + "/debug/pprof/"`) + cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data") + cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)") + cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)") + cmd.Flags().DurationVar(&grpcKeepAliveMinTime, "grpc-keepalive-min-time", embed.DefaultGRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging proxy.") + cmd.Flags().DurationVar(&grpcKeepAliveInterval, "grpc-keepalive-interval", embed.DefaultGRPCKeepAliveInterval, "Frequency duration of etcd-to-client ping to check if a connection is alive (0 to disable).") + cmd.Flags().DurationVar(&grpcKeepAliveTimeout, "grpc-keepalive-timeout", embed.DefaultGRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") + + // client TLS for connecting to etcd + cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file") + cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file") + cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle") + cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd etcd TLS certificates (CAUTION: this option should be enabled only for testing purposes)") + + // client TLS for connecting to proxy + cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file") + cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file") + cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle") + cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates") + cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.") + cmd.Flags().UintVar(&selfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the proxy certificates, unit is year") + + // experimental flags + cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.") + cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.") + + cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.") + + return &cmd +} + +func startGRPCProxy(cmd *cobra.Command, args []string) { + checkArgs() + + lcfg := logutil.DefaultZapLoggerConfig + if grpcProxyDebug { + lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + grpc.EnableTracing = true + } + + lg, err := lcfg.Build() + if err != nil { + log.Fatal(err) + } + defer lg.Sync() + + grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) + + // The proxy itself (ListenCert) can have not-empty CN. + // The empty CN is required for grpcProxyCert. + // Please see https://github.com/etcd-io/etcd/issues/11970#issuecomment-687875315 for more context. + tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey, false) + + if tlsinfo == nil && grpcProxyListenAutoTLS { + host := []string{"https://" + grpcProxyListenAddr} + dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy") + autoTLS, err := transport.SelfCert(lg, dir, host, selfSignedCertValidity) + if err != nil { + log.Fatal(err) + } + tlsinfo = &autoTLS + } + if tlsinfo != nil { + lg.Info("gRPC proxy etcd TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo))) + } + m := mustListenCMux(lg, tlsinfo) + grpcl := m.Match(cmux.HTTP2()) + defer func() { + grpcl.Close() + lg.Info("stop listening gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) + }() + + client := mustNewClient(lg) + + // The proxy client is used for self-healthchecking. + // TODO: The mechanism should be refactored to use internal connection. + var proxyClient *clientv3.Client + if grpcProxyAdvertiseClientURL != "" { + proxyClient = mustNewProxyClient(lg, tlsinfo) + } + + srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client, proxyClient) + errc := make(chan error, 3) + go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }() + go func() { errc <- srvhttp.Serve(httpl) }() + go func() { errc <- m.Serve() }() + if len(grpcProxyMetricsListenAddr) > 0 { + mhttpl := mustMetricsListener(lg, tlsinfo) + go func() { + mux := http.NewServeMux() + grpcproxy.HandleHealth(lg, mux, client) + grpcproxy.HandleProxyHealth(lg, mux, proxyClient) + lg.Info("gRPC proxy etcd metrics URL serving") + herr := http.Serve(mhttpl, mux) + if herr != nil { + lg.Fatal("gRPC proxy etcd metrics URL returned", zap.Error(herr)) + } else { + lg.Info("gRPC proxy etcd metrics URL returned") + } + }() + } + + lg.Info("started gRPC proxy", zap.String("address", grpcProxyListenAddr)) + + // grpc-proxy is initialized, ready to serve + notifySystemd(lg) + + fmt.Fprintln(os.Stderr, <-errc) + os.Exit(1) +} + +func checkArgs() { + if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 { + fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL)) + os.Exit(1) + } + if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 { + fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix)) + os.Exit(1) + } + if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" { + fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL)) + os.Exit(1) + } + if grpcProxyListenAutoTLS && selfSignedCertValidity == 0 { + fmt.Fprintln(os.Stderr, fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0")) + os.Exit(1) + } +} + +func mustNewClient(lg *zap.Logger) *clientv3.Client { + srvs := discoverEndpoints(lg, grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery, grpcProxyDNSClusterServiceName) + eps := srvs.Endpoints + if len(eps) == 0 { + eps = grpcProxyEndpoints + } + cfg, err := newClientCfg(lg, eps) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + cfg.DialOptions = append(cfg.DialOptions, + grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor)) + cfg.DialOptions = append(cfg.DialOptions, + grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor)) + cfg.Logger = lg.Named("client") + client, err := clientv3.New(*cfg) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + return client +} + +func mustNewProxyClient(lg *zap.Logger, tls *transport.TLSInfo) *clientv3.Client { + eps := []string{grpcProxyAdvertiseClientURL} + cfg, err := newProxyClientCfg(lg.Named("client"), eps, tls) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + client, err := clientv3.New(*cfg) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + lg.Info("create proxy client", zap.String("grpcProxyAdvertiseClientURL", grpcProxyAdvertiseClientURL)) + return client +} + +func newProxyClientCfg(lg *zap.Logger, eps []string, tls *transport.TLSInfo) (*clientv3.Config, error) { + cfg := clientv3.Config{ + Endpoints: eps, + DialTimeout: 5 * time.Second, + Logger: lg, + } + if tls != nil { + clientTLS, err := tls.ClientConfig() + if err != nil { + return nil, err + } + cfg.TLS = clientTLS + } + return &cfg, nil +} + +func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) { + // set tls if any one tls option set + cfg := clientv3.Config{ + Endpoints: eps, + DialTimeout: 5 * time.Second, + } + + if grpcMaxCallSendMsgSize > 0 { + cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize + } + if grpcMaxCallRecvMsgSize > 0 { + cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize + } + + tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey, true) + if tls == nil && grpcProxyInsecureSkipTLSVerify { + tls = &transport.TLSInfo{} + } + if tls != nil { + clientTLS, err := tls.ClientConfig() + if err != nil { + return nil, err + } + clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify + if clientTLS.InsecureSkipVerify { + lg.Warn("--insecure-skip-tls-verify was given, this grpc proxy process skips authentication of etcd etcd TLS certificates. This option should be enabled only for testing purposes.") + } + cfg.TLS = clientTLS + lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls))) + } + return &cfg, nil +} + +func newTLS(ca, cert, key string, requireEmptyCN bool) *transport.TLSInfo { + if ca == "" && cert == "" && key == "" { + return nil + } + return &transport.TLSInfo{TrustedCAFile: ca, CertFile: cert, KeyFile: key, EmptyCN: requireEmptyCN} +} + +func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux { + l, err := net.Listen("tcp", grpcProxyListenAddr) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if tlsinfo != nil { + tlsinfo.CRLFile = grpcProxyListenCRL + if l, err = transport.NewTLSListener(l, tlsinfo); err != nil { + lg.Fatal("failed to create TLS listener", zap.Error(err)) + } + } + + lg.Info("listening for gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) + return cmux.New(l) +} + +func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server { + if grpcProxyEnableOrdering { + vf := ordering.NewOrderViolationSwitchEndpointClosure(client) + client.KV = ordering.NewKV(client.KV, vf) + lg.Info("waiting for linearized read from cluster to recover ordering") + for { + _, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly()) + if err == nil { + break + } + lg.Warn("ordering recovery failed, retrying in 1s", zap.Error(err)) + time.Sleep(time.Second) + } + } + + if len(grpcProxyNamespace) > 0 { + client.KV = namespace.NewKV(client.KV, grpcProxyNamespace) + client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace) + client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace) + } + + if len(grpcProxyLeasing) > 0 { + client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing) + } + + kvp, _ := grpcproxy.NewKvProxy(client) + watchp, _ := grpcproxy.NewWatchProxy(client.Ctx(), lg, client) + if grpcProxyResolverPrefix != "" { + grpcproxy.Register(lg, client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL) + } + clusterp, _ := grpcproxy.NewClusterProxy(lg, client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix) + leasep, _ := grpcproxy.NewLeaseProxy(client.Ctx(), client) + + mainp := grpcproxy.NewMaintenanceProxy(client) + authp := grpcproxy.NewAuthProxy(client) + electionp := grpcproxy.NewElectionProxy(client) + lockp := grpcproxy.NewLockProxy(client) + + gopts := []grpc.ServerOption{ + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + grpc.MaxConcurrentStreams(math.MaxUint32), + } + if grpcKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: grpcKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if grpcKeepAliveInterval > time.Duration(0) || + grpcKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: grpcKeepAliveInterval, + Timeout: grpcKeepAliveTimeout, + })) + } + + server := grpc.NewServer(gopts...) + + pb.RegisterKVServer(server, kvp) + pb.RegisterWatchServer(server, watchp) + pb.RegisterClusterServer(server, clusterp) + pb.RegisterLeaseServer(server, leasep) + pb.RegisterMaintenanceServer(server, mainp) + pb.RegisterAuthServer(server, authp) + v3electionpb.RegisterElectionServer(server, electionp) + v3lockpb.RegisterLockServer(server, lockp) + + return server +} + +func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) { + httpmux := http.NewServeMux() + httpmux.HandleFunc("/", http.NotFound) + grpcproxy.HandleHealth(lg, httpmux, c) + grpcproxy.HandleProxyHealth(lg, httpmux, proxy) + if grpcProxyEnablePprof { + for p, h := range debugutil.PProfHandlers() { + httpmux.Handle(p, h) + } + lg.Info("gRPC proxy enabled pprof", zap.String("path", debugutil.HTTPPrefixPProf)) + } + srvhttp := &http.Server{ + Handler: httpmux, + ErrorLog: log.New(ioutil.Discard, "net/http", 0), + } + + if tlsinfo == nil { + return srvhttp, m.Match(cmux.HTTP1()) + } + + srvTLS, err := tlsinfo.ServerConfig() + if err != nil { + lg.Fatal("failed to set up TLS", zap.Error(err)) + } + srvhttp.TLSConfig = srvTLS + return srvhttp, m.Match(cmux.Any()) +} + +func mustMetricsListener(lg *zap.Logger, tlsinfo *transport.TLSInfo) net.Listener { + murl, err := url.Parse(grpcProxyMetricsListenAddr) + if err != nil { + fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr) + os.Exit(1) + } + ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + lg.Info("gRPC proxy listening for metrics", zap.String("address", murl.String())) + return ml +} diff --git a/etcd/etcdmain/help.go b/etcd/etcdmain/help.go new file mode 100644 index 00000000000..540f8e644d0 --- /dev/null +++ b/etcd/etcdmain/help.go @@ -0,0 +1,256 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdmain + +import ( + "fmt" + "strconv" + + cconfig "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/embed" + "golang.org/x/crypto/bcrypt" +) + +var ( + usageline = `Usage: + + etcd [flags] + Start an etcd etcd. + + etcd --version + Show the version of etcd. + + etcd -h | --help + Show the help information about etcd. + + etcd --config-file + Path to the etcd configuration file. Note that if a configuration file is provided, other command line flags and environment variables will be ignored. + + etcd gateway + 启动 L4 TCP网关代理 + + etcd grpc-proxy + L7 grpc 代理 +` + flagsline = ` +Member: + --name 'default' + 本节点.人类可读的名字 + --data-dir '${name}.etcd' + 服务运行数据保存的路径. ${name}.etcd + --wal-dir '' + 专用wal目录的路径.默认值:--data-dir的路径下 + --snapshot-count '100000' + 触发快照到磁盘的已提交事务数. + --heartbeat-interval '100' + 心跳间隔 100ms + --election-timeout '1000' + 选举超时 + --initial-election-tick-advance 'true' + 是否提前初始化选举时钟启动,以便更快的选举 + --listen-peer-urls 'http://localhost:2380' + 和成员之间通信的地址.用于监听其他etcd member的url + --listen-client-urls 'http://localhost:2379' + List of URLs to listen on for client traffic. + --max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `' + 要保留的最大快照文件数(0表示不受限制).5 + --max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `' + 要保留的最大wal文件数(0表示不受限制). 5 + --quota-backend-bytes '0' + 当后端大小超过给定配额时(0默认为低空间配额).引发警报. + --backend-bbolt-freelist-type 'map' + BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型). map + --backend-batch-interval '' + BackendBatchInterval是提交后端事务前的最长时间. + --backend-batch-limit '0' + BackendBatchLimit是提交后端事务前的最大操作数. + --max-txn-ops '128' + 事务中允许的最大操作数. + --max-request-bytes '1572864' + 服务器将接受的最大客户端请求大小(字节). + --grpc-keepalive-min-time '5s' + 客户端在ping服务器之前应等待的最短持续时间间隔. + --grpc-keepalive-interval '2h' + 服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用). + --grpc-keepalive-timeout '20s' + 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s + --socket-reuse-port 'false' + 启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口.false + --socket-reuse-address 'false' + 启用在listener上设置套接字选项SO_REUSEADDR 允许重新绑定一个已经在使用的端口 在TIME_WAIT 状态. + +Clustering: + --initial-advertise-peer-urls 'http://localhost:2380' + 集群成员的 URL地址.且会通告群集的其余成员节点. + --initial-cluster 'default=http://localhost:2380' + 集群中所有节点的信息. + --initial-cluster-state 'new' + 初始集群状态 ('new' or 'existing'). + --initial-cluster-token 'etcd-cluster' + 创建集群的 token.这个值每个集群保持唯一. + --advertise-client-urls 'http://localhost:2379' + 监听client的请求 + The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster. + --discovery '' + 用于引导群集的发现URL. + --discovery-fallback 'proxy' + 发现服务失败时的预期行为("退出"或"代理")."proxy"仅支持v2 API. %q + --discovery-proxy '' + 用于流量到发现服务的HTTP代理. + --discovery-srv '' + DNS srv域用于引导群集. + --discovery-srv-name '' + 使用DNS引导时查询的DNS srv名称的后缀. + --strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `' + 拒绝可能导致仲裁丢失的重新配置请求.true + --pre-vote 'true' + Enable to run an additional Raft election phase. + --auto-compaction-retention '0' + Auto compaction retention length. 0 means disable auto compaction. + --auto-compaction-mode 'periodic' + Interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention. + --v2-deprecation '` + string(cconfig.V2_DEPR_DEFAULT) + `' + Phase of v2store deprecation. Allows to opt-in for higher compatibility mode. + Supported values: + 'not-yet' // Issues a warning if v2store have meaningful content (default in v3.5) + 'write-only' // Custom v2 state is not allowed (planned default in v3.6) + 'write-only-drop-data' // Custom v2 state will get DELETED ! + 'gone' // v2store is not maintained any longer. (planned default in v3.7) + +Security: + --cert-file '' + 客户端证书 + --key-file '' + 客户端私钥 + --client-cert-auth 'false' + 启用客户端证书验证;默认false + --client-crl-file '' + 客户端证书吊销列表文件的路径. + --client-cert-allowed-hostname '' + 允许客户端证书认证使用TLS主机名 + --trusted-ca-file '' + 客户端etcd通信 的可信CA证书文件 + --auto-tls 'false' + 节点之间使用生成的证书通信;默认false + --peer-cert-file '' + 证书路径 + --peer-key-file '' + 私钥路径 + --peer-client-cert-auth 'false' + 启用server客户端证书验证;默认false + --peer-trusted-ca-file '' + 服务器端ca证书 + --peer-cert-allowed-cn '' + 允许的server客户端证书CommonName + --peer-cert-allowed-hostname '' + 允许的server客户端证书hostname + --peer-auto-tls 'false' + 节点之间使用生成的证书通信;默认false + --self-signed-cert-validity '1' + 客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS, + --peer-crl-file '' + 服务端证书吊销列表文件的路径. + --cipher-suites '' + 客户端/etcds之间支持的TLS加密套件的逗号分隔列表(空将由Go自动填充). + --cors '*' + Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all). + --host-whitelist '*' + Acceptable hostnames from HTTP client requests, if etcd is not secure (empty or * means allow all). + +Auth: + --auth-token 'simple' + 指定验证令牌的具体选项. ('simple' or 'jwt') + --bcrypt-cost ` + fmt.Sprintf("%d", bcrypt.DefaultCost) + ` + 为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间. + --auth-token-ttl 300 + token过期时间 + +Profiling and Monitoring: + --enable-pprof 'false' + 通过HTTP服务器启用运行时分析数据.地址位于客户端URL +"/ debug / pprof /" + --metrics 'basic' + 设置导出的指标的详细程度,指定"扩展"以包括直方图指标(extensive,basic) + --listen-metrics-urls '' + List of URLs to listen on for the metrics and health endpoints. + +Logging: + --logger 'zap' + Currently only supports 'zap' for structured logging. + --log-outputs 'default' + 指定'stdout'或'stderr'以跳过日志记录,即使在systemd或逗号分隔的输出目标列表下运行也是如此. + --log-level 'info' + 日志等级,只支持 debug, info, warn, error, panic, or fatal. Default 'info'. + --enable-log-rotation 'false' + 启用单个日志输出文件目标的日志旋转. + --log-rotation-config-json '{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}' + 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的. MaxSize(MB), MaxAge(days,0=no limit), MaxBackups(0=no limit), LocalTime(use computers local time), Compress(gzip)". + +Experimental distributed tracing: + --experimental-enable-distributed-tracing 'false' + Enable experimental distributed tracing. + --experimental-distributed-tracing-address 'localhost:4317' + Distributed tracing collector address. + --experimental-distributed-tracing-service-name 'etcd' + Distributed tracing service name,必须是same across all etcd instances. + --experimental-distributed-tracing-instance-id '' + Distributed tracing instance ID,必须是unique per each etcd instance. + +v2 Proxy (to be deprecated in v3.6): + --proxy 'off' + 代理模式设置 ('off', 'readonly' or 'on'). + --proxy-failure-wait 5000 + 在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位). + --proxy-refresh-interval 30000 + endpoints 刷新间隔的时间(以毫秒为单位). + --proxy-dial-timeout 1000 + 拨号超时的时间(以毫秒为单位)或0表示禁用超时 + --proxy-write-timeout 5000 + 写入超时的时间(以毫秒为单位)或0以禁用超时. + --proxy-read-timeout 0 + 读取超时的时间(以毫秒为单位)或0以禁用超时. + +Experimental feature: + --experimental-initial-corrupt-check 'false' + Enable to check data corruption before serving any client/peer traffic. + --experimental-corrupt-check-time '0s' + Duration of time between cluster corruption check passes. + --experimental-enable-v2v3 '' + Serve v2 requests through the v3 backend under a given prefix. Deprecated and to be decommissioned in v3.6. + --experimental-enable-lease-checkpoint 'false' + ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. + --experimental-compaction-batch-limit 1000 + ExperimentalCompactionBatchLimit sets the maximum revisions deleted in each compaction batch. + --experimental-peer-skip-client-san-verification 'false' + 跳过server 客户端证书中SAN字段的验证.默认false + --experimental-watch-progress-notify-interval '10m' + Duration of periodic watch progress notifications. + --experimental-warning-apply-duration '100ms' + 时间长度.如果应用请求的时间超过这个值.就会产生一个警告. + --experimental-txn-mode-write-with-shared-buffer 'true' + 启用写事务在其只读检查操作中使用共享缓冲区. + --experimental-bootstrap-defrag-threshold-megabytes + Enable the defrag during etcd etcd bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect. + +Unsafe feature: + --force-new-cluster 'false' + 强制创建新的单成员群集.它提交配置更改,强制删除集群中的所有现有成员并添加自身.需要将其设置为还原备份. + --unsafe-no-fsync 'false' + 禁用fsync,不安全,会导致数据丢失. + +CAUTIOUS with unsafe flag! It may break the guarantees given by the consensus protocol! +` +) + +// Add back "TO BE DEPRECATED" section if needed diff --git a/etcd/etcdmain/main.go b/etcd/etcdmain/main.go new file mode 100644 index 00000000000..9399620a594 --- /dev/null +++ b/etcd/etcdmain/main.go @@ -0,0 +1,54 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdmain + +import ( + "fmt" + "os" + + "github.com/coreos/go-systemd/v22/daemon" + "go.uber.org/zap" +) + +func Main(args []string) { + checkSupportArch() // 检查系统是否支持 + + if len(args) > 1 { + cmd := args[1] + switch cmd { + case "gateway", "grpc-proxy": + if err := rootCmd.Execute(); err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + return + } + } + + startEtcdOrProxy(args) +} + +func notifySystemd(lg *zap.Logger) { + if lg == nil { + lg = zap.NewExample() + } + lg.Info("通知init守护进程") + _, err := daemon.SdNotify(false, daemon.SdNotifyReady) + if err != nil { + lg.Error("未能通知 systemd 准备就绪", zap.Error(err)) + return + } + lg.Info("成功地通知了init守护程序") +} diff --git a/etcd/etcdmain/util.go b/etcd/etcdmain/util.go new file mode 100644 index 00000000000..68b42ebeb57 --- /dev/null +++ b/etcd/etcdmain/util.go @@ -0,0 +1,97 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdmain + +import ( + "fmt" + "os" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + + "go.uber.org/zap" +) + +func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, serviceName string) (s srv.SRVClients) { + if dns == "" { + return s + } + srvs, err := srv.GetClient("etcd-client", dns, serviceName) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + endpoints := srvs.Endpoints + + if lg != nil { + lg.Info( + "discovered cluster from SRV", + zap.String("srv-etcd", dns), + zap.Strings("endpoints", endpoints), + ) + } + + if insecure { + return *srvs + } + // confirm TLS connections are good + tlsInfo := transport.TLSInfo{ + TrustedCAFile: ca, + ServerName: dns, + } + + if lg != nil { + lg.Info( + "validating discovered SRV endpoints", + zap.String("srv-etcd", dns), + zap.Strings("endpoints", endpoints), + ) + } + + endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints) + if err != nil { + if lg != nil { + lg.Warn( + "failed to validate discovered endpoints", + zap.String("srv-etcd", dns), + zap.Strings("endpoints", endpoints), + zap.Error(err), + ) + } + } else { + if lg != nil { + lg.Info( + "using validated discovered SRV endpoints", + zap.String("srv-etcd", dns), + zap.Strings("endpoints", endpoints), + ) + } + } + + // map endpoints back to SRVClients struct with SRV data + eps := make(map[string]struct{}) + for _, ep := range endpoints { + eps[ep] = struct{}{} + } + for i := range srvs.Endpoints { + if _, ok := eps[srvs.Endpoints[i]]; !ok { + continue + } + s.Endpoints = append(s.Endpoints, srvs.Endpoints[i]) + s.SRVs = append(s.SRVs, srvs.SRVs[i]) + } + + return s +} diff --git a/etcd/etcdserver/api/capability.go b/etcd/etcdserver/api/capability.go new file mode 100644 index 00000000000..95bec6aef4c --- /dev/null +++ b/etcd/etcdserver/api/capability.go @@ -0,0 +1,88 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "sync" + + "github.com/coreos/go-semver/semver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "go.uber.org/zap" +) + +type Capability string + +const ( + AuthCapability Capability = "auth" + V3rpcCapability Capability = "v3rpc" +) + +var ( + // capabilityMaps is a static map of version to capability map. + capabilityMaps = map[string]map[Capability]bool{ + "3.0.0": {AuthCapability: true, V3rpcCapability: true}, + "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, + "3.3.0": {AuthCapability: true, V3rpcCapability: true}, + "3.4.0": {AuthCapability: true, V3rpcCapability: true}, + "3.5.0": {AuthCapability: true, V3rpcCapability: true}, + } + + enableMapMu sync.RWMutex + // enabledMap points to a map in capabilityMaps + enabledMap map[Capability]bool + + curVersion *semver.Version +) + +func init() { + enabledMap = map[Capability]bool{ + AuthCapability: true, // auth + V3rpcCapability: true, // v3rpc + } +} + +// UpdateCapability 当集群的版本增加时,更新enabledMap. +func UpdateCapability(lg *zap.Logger, v *semver.Version) { + if v == nil { + // if recovered but version was never set by cluster + return + } + enableMapMu.Lock() + if curVersion != nil && !membership.IsValidVersionChange(v, curVersion) { + enableMapMu.Unlock() + return + } + curVersion = v + enabledMap = capabilityMaps[curVersion.String()] + enableMapMu.Unlock() + + if lg != nil { + lg.Info( + "enabled capabilities for version", + zap.String("cluster-version", version.Cluster(v.String())), + ) + } +} + +func IsCapabilityEnabled(c Capability) bool { + enableMapMu.RLock() + defer enableMapMu.RUnlock() + if enabledMap == nil { + return false + } + return enabledMap[c] +} diff --git a/etcd/etcdserver/api/etcdhttp/a.go b/etcd/etcdserver/api/etcdhttp/a.go new file mode 100644 index 00000000000..b764db7ba9d --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/a.go @@ -0,0 +1,205 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/raft" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" +) + +const ( + PathMetrics = "/metrics" + PathHealth = "/health" + PathProxyMetrics = "/proxy/metrics" + PathProxyHealth = "/proxy/health" +) + +// HandleMetricsHealth registers metrics and health handlers. +func HandleMetricsHealth(lg *zap.Logger, mux *http.ServeMux, srv etcdserver.ServerV2) { + mux.Handle(PathMetrics, promhttp.Handler()) + mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV2Health(lg, srv, excludedAlarms) })) +} + +// HandleMetricsHealthForV3 registers metrics and health handlers. it checks health by using v3 range request +// and its corresponding timeout. +func HandleMetricsHealthForV3(lg *zap.Logger, mux *http.ServeMux, srv *etcdserver.EtcdServer) { + mux.Handle(PathMetrics, promhttp.Handler()) + mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { + return checkV3Health(lg, srv, excludedAlarms, true) + })) +} + +// HandlePrometheus registers prometheus handler on '/metrics'. +func HandlePrometheus(mux *http.ServeMux) { + mux.Handle(PathMetrics, promhttp.Handler()) +} + +// NewHealthHandler handles '/health' requests. +func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.Header().Set("Allow", http.MethodGet) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed)) + return + } + excludedAlarms := getExcludedAlarms(r) + // Passing the query parameter "serializable=true" ensures that the + // health of the local etcd is checked vs the health of the cluster. + // This is useful for probes attempting to validate the liveness of + // the etcd process vs readiness of the cluster to serve requests. + // serializableFlag := getSerializableFlag(r) + h := hfunc(excludedAlarms) + defer func() { + if h.Health == "true" { + healthSuccess.Inc() + } else { + healthFailed.Inc() + } + }() + d, _ := json.Marshal(h) + if h.Health != "true" { + http.Error(w, string(d), http.StatusServiceUnavailable) + lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable)) + return + } + w.WriteHeader(http.StatusOK) + w.Write(d) + lg.Debug("/health OK", zap.Int("status-code", http.StatusOK)) + } +} + +var ( + healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "health_success", + Help: "The total number of successful health checks", + }) + healthFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "health_failures", + Help: "The total number of failed health checks", + }) +) + +func init() { + prometheus.MustRegister(healthSuccess) + prometheus.MustRegister(healthFailed) +} + +// Health defines etcd server health status. +// TODO: remove manual parsing in etcdctl cluster-health +type Health struct { + Health string `json:"health"` + Reason string `json:"reason"` +} + +type AlarmSet map[string]struct{} + +func getExcludedAlarms(r *http.Request) (alarms AlarmSet) { + alarms = make(map[string]struct{}, 2) + alms, found := r.URL.Query()["exclude"] + if found { + for _, alm := range alms { + if len(alm) == 0 { + continue + } + alarms[alm] = struct{}{} + } + } + return alarms +} + +func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet, serializable bool) Health { + h := Health{} + h.Health = "true" + as := srv.Alarms() + if len(as) > 0 { + for _, v := range as { + alarmName := v.Alarm.String() + if _, found := excludedAlarms[alarmName]; found { + lg.Debug("/health excluded alarm", zap.String("alarm", v.String())) + continue + } + + h.Health = "false" + switch v.Alarm { + case etcdserverpb.AlarmType_NOSPACE: + h.Reason = "ALARM NOSPACE" + case etcdserverpb.AlarmType_CORRUPT: + h.Reason = "ALARM CORRUPT" + default: + h.Reason = "ALARM UNKNOWN" + } + lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String())) + return h + } + } + + if !serializable && (uint64(srv.Leader()) == raft.None) { + h.Health = "false" + h.Reason = "RAFT NO LEADER" + lg.Warn("serving /health false; no leader") + return h + } + return h +} + +func checkV2Health(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) (h Health) { + if h = checkHealth(lg, srv, excludedAlarms, false); h.Health != "true" { + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"}) + cancel() + if err != nil { + h.Health = "false" + h.Reason = fmt.Sprintf("QGET ERROR:%s", err) + lg.Warn("serving /health false; QGET fails", zap.Error(err)) + return + } + lg.Debug("serving /health true") + return +} + +func checkV3Health(lg *zap.Logger, srv *etcdserver.EtcdServer, excludedAlarms AlarmSet, serializable bool) (h Health) { + if h = checkHealth(lg, srv, excludedAlarms, serializable); h.Health != "true" { + return + } + ctx, cancel := context.WithTimeout(context.Background(), srv.Cfg.ReqTimeout()) + _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable}) + cancel() + if err != nil && err != auth.ErrUserEmpty && err != auth.ErrPermissionDenied { + h.Health = "false" + h.Reason = fmt.Sprintf("RANGE ERROR:%s", err) + lg.Warn("serving /health false; Range fails", zap.Error(err)) + return + } + lg.Debug("serving /health true") + return +} diff --git a/etcd/etcdserver/api/etcdhttp/api.go b/etcd/etcdserver/api/etcdhttp/api.go new file mode 100644 index 00000000000..d60c097c2e7 --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/api.go @@ -0,0 +1,64 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp" + + "go.uber.org/zap" +) + +const ( + peerMembersPath = "/members" + peerMemberPromotePrefix = "/members/promote/" +) + +// NewPeerHandler 生成 http.Handler 处理客户端请求 +func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler { + return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler(), s.DowngradeEnabledHandler()) +} + +func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handler, + leaseHandler http.Handler, hashKVHandler http.Handler, downgradeEnabledHandler http.Handler, +) http.Handler { + if lg == nil { + lg = zap.NewNop() + } + peerMembersHandler := newPeerMembersHandler(lg, s.Cluster()) // ✅ + peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s) // ✅ + + mux := http.NewServeMux() + mux.HandleFunc("/", http.NotFound) + mux.Handle(rafthttp.RaftPrefix, raftHandler) // /raft + mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) // + mux.Handle(peerMembersPath, peerMembersHandler) // /members + mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler) // /members/promote + if leaseHandler != nil { + mux.Handle(leasehttp.LeasePrefix, leaseHandler) // /leases + mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) // /leases/internal + } + if downgradeEnabledHandler != nil { + mux.Handle(etcdserver.DowngradeEnabledPath, downgradeEnabledHandler) // /downgrade/enabled + } + if hashKVHandler != nil { + mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler) // /members/hashkv + } + mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion)) + return mux +} diff --git a/etcd/etcdserver/api/etcdhttp/over_base.go b/etcd/etcdserver/api/etcdhttp/over_base.go new file mode 100644 index 00000000000..e1fe8f338e7 --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/over_base.go @@ -0,0 +1,144 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "go.uber.org/zap" +) + +const ( + varsPath = "/debug/vars" + versionPath = "/version" +) + +// HandleBasic 添加处理程序到一个mux服务JSON etcd客户端请求不访问v2存储. +func HandleBasic(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerPeer) { + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) // {"etcdserver":"3.5.2","etcdcluster":"3.5.0"} +} + +// ok +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + // clusterV = server.Cluster().String() + // {"etcdserver":"3.5.2","etcdcluster":"3.5.0"} + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + panic(fmt.Sprintf("序列化失败 (%v)", err)) + } + w.Write(b) +} + +// ok +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { // 同一时刻只能有一个请求执行 + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +// ok +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *v2error.Error: + e.WriteTo(w) + + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + if lg != nil { + lg.Debug( + "写失败 v2 HTTP", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-etcd-error", e.Error()), + zap.Error(et), + ) + } + } + + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, + etcdserver.ErrUnhealthy: + if lg != nil { + lg.Warn( + "v2 响应错误", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-etcd-error", err.Error()), + ) + } + + default: + if lg != nil { + lg.Warn( + "未知的v2响应错误", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-etcd-error", err.Error()), + ) + } + } + + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + if lg != nil { + lg.Debug( + "写失败 v2 HTTP", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-etcd-error", err.Error()), + zap.Error(et), + ) + } + } + } +} diff --git a/etcd/etcdserver/api/etcdhttp/over_member_api.go b/etcd/etcdserver/api/etcdhttp/over_member_api.go new file mode 100644 index 00000000000..2558b8eaed5 --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/over_member_api.go @@ -0,0 +1,33 @@ +package etcdhttp + +import ( + "encoding/json" + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "go.uber.org/zap" +) + +func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler { + return &peerMembersHandler{ + lg: lg, + cluster: cluster, + } +} + +func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + if r.URL.Path != peerMembersPath { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + ms := h.cluster.Members() + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(ms); err != nil { + h.lg.Warn("编码成员信息失败", zap.Error(err)) + } +} diff --git a/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go b/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go new file mode 100644 index 00000000000..ca091d4ba3d --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go @@ -0,0 +1,74 @@ +package etcdhttp + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "go.uber.org/zap" +) + +type peerMembersHandler struct { + lg *zap.Logger + cluster api.Cluster +} + +func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler { + return &peerMemberPromoteHandler{ + lg: lg, + cluster: s.Cluster(), + server: s, + } +} + +type peerMemberPromoteHandler struct { + lg *zap.Logger + cluster api.Cluster + server etcdserver.Server +} + +func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "POST") { + return + } + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix) + id, err := strconv.ParseUint(idStr, 10, 64) + if err != nil { + http.Error(w, fmt.Sprintf("成员 %s 不在集群中", idStr), http.StatusNotFound) + return + } + + resp, err := h.server.PromoteMember(r.Context(), id) + if err != nil { + switch err { + case membership.ErrIDNotFound: + http.Error(w, err.Error(), http.StatusNotFound) + case membership.ErrMemberNotLearner: + http.Error(w, err.Error(), http.StatusPreconditionFailed) + case etcdserver.ErrLearnerNotReady: + http.Error(w, err.Error(), http.StatusPreconditionFailed) + default: + WriteError(h.lg, w, r, err) + } + h.lg.Warn("提升成员失败", zap.String("member-id", types.ID(id).String()), zap.Error(err)) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(resp); err != nil { + h.lg.Warn("编码成员信息失败", zap.Error(err)) + } +} diff --git a/etcd/etcdserver/api/etcdhttp/over_version_api.go b/etcd/etcdserver/api/etcdhttp/over_version_api.go new file mode 100644 index 00000000000..f9dba99935e --- /dev/null +++ b/etcd/etcdserver/api/etcdhttp/over_version_api.go @@ -0,0 +1,19 @@ +package etcdhttp + +import ( + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" +) + +// ok +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} diff --git a/etcd/etcdserver/api/membership/errors.go b/etcd/etcdserver/api/membership/errors.go new file mode 100644 index 00000000000..fb6add09d01 --- /dev/null +++ b/etcd/etcdserver/api/membership/errors.go @@ -0,0 +1,35 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "errors" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" +) + +var ( + ErrIDRemoved = errors.New("membership: ID 已移除") + ErrIDExists = errors.New("membership: ID 存在") + ErrIDNotFound = errors.New("membership: ID 没有找到") + ErrPeerURLexists = errors.New("membership: peerURL 已存在") + ErrMemberNotLearner = errors.New("membership: 只能提升一个learner成员") + ErrTooManyLearners = errors.New("membership: 集群中成员太多") +) + +func isKeyNotFound(err error) bool { + e, ok := err.(*v2error.Error) + return ok && e.ErrorCode == v2error.EcodeKeyNotFound +} diff --git a/etcd/etcdserver/api/membership/over_cluster.go b/etcd/etcdserver/api/membership/over_cluster.go new file mode 100644 index 00000000000..8eaf6be6a53 --- /dev/null +++ b/etcd/etcdserver/api/membership/over_cluster.go @@ -0,0 +1,661 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "bytes" + "context" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "fmt" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "github.com/ls-2018/etcd_cn/pkg/netutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +const maxLearners = 1 + +// RaftCluster raft集群成员 +type RaftCluster struct { + lg *zap.Logger + localID types.ID // 本机节点ID + cid types.ID // 集群ID,根据所有初始 memberID hash 得到的 + v2store v2store.Store // 内存里面的一个树形node结构 + be backend.Backend // + sync.Mutex // 守住下面的字段 + version *semver.Version // + members map[types.ID]*Member // + removed map[types.ID]bool // 记录被删除的节点ID,删除后的节点无法重用 + downgradeInfo *DowngradeInfo // 降级信息 +} + +type ConfigChangeContext struct { + Member + IsPromote bool `json:"isPromote"` // 是否提升learner +} + +type ShouldApplyV3 bool + +const ( + ApplyBoth = ShouldApplyV3(true) + ApplyV2storeOnly = ShouldApplyV3(false) +) + +// Recover 接收到快照之后,会调用此函数 +func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { + c.Lock() + defer c.Unlock() + + if c.v2store != nil { + c.version = clusterVersionFromStore(c.lg, c.v2store) + c.members, c.removed = membersFromStore(c.lg, c.v2store) + } else { + c.version = clusterVersionFromBackend(c.lg, c.be) + c.members, c.removed = membersFromBackend(c.lg, c.be) + } + + if c.be != nil { + c.downgradeInfo = downgradeInfoFromBackend(c.lg, c.be) + } + d := &DowngradeInfo{Enabled: false} + if c.downgradeInfo != nil { + d = &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + } + mustDetectDowngrade(c.lg, c.version, d) // 检测版本降级 + onSet(c.lg, c.version) + + for _, m := range c.members { + c.lg.Info( + "从store中恢复/增加成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("recovered-remote-peer-id", m.ID.String()), zap.Strings("recovered-remote-peer-urls", m.PeerURLs), + ) + } + if c.version != nil { + c.lg.Info("从store获取version,并设置", zap.String("cluster-version", version.Cluster(c.version.String()))) + } +} + +// NewClusterFromMembers 从远端节点获取到的集群节点信息 +func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCluster { + c := NewCluster(lg) + c.cid = id + for _, m := range membs { + c.members[m.ID] = m + } + return c +} + +// UpdateAttributes 更新成员属性 +func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + + if m, ok := c.members[id]; ok { + m.Attributes = attr + if c.v2store != nil { + mustUpdateMemberAttrInStore(c.lg, c.v2store, m) + } + if c.be != nil && shouldApplyV3 { + unsafeSaveMemberToBackend(c.lg, c.be, m) + } + return + } + + _, ok := c.removed[id] + if !ok { + c.lg.Panic("更新失败", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("unknown-remote-peer-id", id.String())) + } + + c.lg.Warn("移除的成员 不进行属性更新", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("updated-peer-id", id.String())) +} + +// ValidateClusterAndAssignIDs 通过匹配PeerURLs来验证本地集群与现有集群是否匹配.如果验证成功,它将把现有集群的IDs归入本地集群. +// 如果验证失败,将返回一个错误. +func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error { + ems := existing.Members() + lms := local.Members() + if len(ems) != len(lms) { + return fmt.Errorf("成员数量不一致") + } + + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + for i := range ems { + var err error + ok := false + for j := range lms { + if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok { + lms[j].ID = ems[i].ID + break + } + } + if !ok { + return fmt.Errorf("PeerURLs: 没有找到匹配的现有成员(%v, %v),最后的解析器错误(%v).", ems[i].ID, ems[i].PeerURLs, err) + } + } + local.members = make(map[types.ID]*Member) + for _, m := range lms { + local.members[m.ID] = m + } + return nil +} + +func (c *RaftCluster) ID() types.ID { return c.cid } + +func (c *RaftCluster) Members() []*Member { + c.Lock() + defer c.Unlock() + var ms MembersByID + for _, m := range c.members { + ms = append(ms, m.Clone()) + } + sort.Sort(ms) + return []*Member(ms) +} + +// Member ok +func (c *RaftCluster) Member(id types.ID) *Member { + c.Lock() + defer c.Unlock() + return c.members[id].Clone() +} + +// 从v2Store中获取所有的集群节点 +func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) { + members := make(map[types.ID]*Member) + removed := make(map[types.ID]bool) + e, err := st.Get(StoreMembersPrefix, true, true) // 获取/0/members 事件 + if err != nil { + if isKeyNotFound(err) { // 不存在 /0/members节点 + return members, removed + } + lg.Panic("从store获取成员失败", zap.String("path", StoreMembersPrefix), zap.Error(err)) + } + for _, n := range e.NodeExtern.ExternNodes { + var m *Member + m, err = nodeToMember(lg, n) + if err != nil { + lg.Panic("node--->member失败", zap.Error(err)) + } + members[m.ID] = m + } + + e, err = st.Get(storeRemovedMembersPrefix, true, true) // 获取/0/removed_members 事件 + if err != nil { + if isKeyNotFound(err) { + return members, removed + } + lg.Panic("从store中获取移除节点失败", zap.String("path", storeRemovedMembersPrefix), zap.Error(err)) + } + for _, n := range e.NodeExtern.ExternNodes { + removed[MustParseMemberIDFromKey(lg, n.Key)] = true + } + return members, removed +} + +func (c *RaftCluster) IsIDRemoved(id types.ID) bool { + c.Lock() + defer c.Unlock() + return c.removed[id] +} + +func (c *RaftCluster) String() string { + c.Lock() + defer c.Unlock() + b := &bytes.Buffer{} + fmt.Fprintf(b, "{ClusterID:%s ", c.cid) + var ms []string + for _, m := range c.members { + ms = append(ms, fmt.Sprintf("%+v", m)) + } + fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) + var ids []string + for id := range c.removed { + ids = append(ids, id.String()) + } + fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) + return b.String() +} + +// 生成集群ID +func (c *RaftCluster) genID() { + mIDs := c.MemberIDs() // 返回所有成员iD + b := make([]byte, 8*len(mIDs)) + //[id,id,id,id,id,id,id] + for i, id := range mIDs { + binary.BigEndian.PutUint64(b[8*i:], uint64(id)) + } + hash := sha1.Sum(b) + c.cid = types.ID(binary.BigEndian.Uint64(hash[:8])) +} + +// UpdateRaftAttributes 节点的属性更新 +func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + + c.members[id].RaftAttributes = raftAttr + if c.v2store != nil { + mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) + } + if c.be != nil && shouldApplyV3 { + unsafeSaveMemberToBackend(c.lg, c.be, c.members[id]) + } + + c.lg.Info("更新成员属性", zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), + ) +} + +// MemberByName 返回一个具有给定名称的成员 +func (c *RaftCluster) MemberByName(name string) *Member { + c.Lock() + defer c.Unlock() + var memb *Member + for _, m := range c.members { + if m.Name == name { + if memb != nil { + c.lg.Panic("发现了两个相同名称的成员", zap.String("name", name)) + } + memb = m + } + } + return memb.Clone() +} + +// MemberIDs 返回所有成员iD +func (c *RaftCluster) MemberIDs() []types.ID { + c.Lock() + defer c.Unlock() + var ids []types.ID + for _, m := range c.members { + ids = append(ids, m.ID) + } + sort.Sort(types.IDSlice(ids)) + return ids +} + +// SetID 设置ID +func (c *RaftCluster) SetID(localID, cid types.ID) { + c.localID = localID + c.cid = cid +} + +// SetStore OK +func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st } + +func (c *RaftCluster) SetBackend(be backend.Backend) { + c.be = be + mustCreateBackendBuckets(c.be) +} + +// VotingMembers 集群中的可投票成员 +func (c *RaftCluster) VotingMembers() []*Member { + c.Lock() + defer c.Unlock() + var ms MembersByID + for _, m := range c.members { + if !m.IsLearner { + ms = append(ms, m.Clone()) + } + } + sort.Sort(ms) + return []*Member(ms) +} + +// Version 集群版本 +func (c *RaftCluster) Version() *semver.Version { + c.Lock() + defer c.Unlock() + if c.version == nil { + return nil + } + return semver.Must(semver.NewVersion(c.version.String())) +} + +// SetVersion 设置集群版本 +func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version), shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + if c.version != nil { + c.lg.Info("更新集群版本", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("from", version.Cluster(c.version.String())), + zap.String("to", version.Cluster(ver.String())), + ) + } else { + c.lg.Info("设置初始集群版本", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("cluster-version", version.Cluster(ver.String())), + ) + } + c.version = ver + mustDetectDowngrade(c.lg, c.version, c.downgradeInfo) + if c.v2store != nil { + mustSaveClusterVersionToStore(c.lg, c.v2store, ver) + } + if c.be != nil && shouldApplyV3 { + mustSaveClusterVersionToBackend(c.be, ver) + } + onSet(c.lg, ver) +} + +// NewClusterFromURLsMap 使用提供的url映射创建一个新的raft集群.目前,该算法不支持使用raft learner成员创建集群. +func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) { + c := NewCluster(lg) // RaftCluster struct + for name, urls := range urlsmap { + m := NewMember(name, urls, token, nil) + if _, ok := c.members[m.ID]; ok { + return nil, fmt.Errorf(" %v", m) + } + if uint64(m.ID) == raft.None { + return nil, fmt.Errorf("不能使用 %x作为成员ID", raft.None) + } + c.members[m.ID] = m + } + c.genID() // 生成集群ID + return c, nil +} + +// PeerURLs 返回所有成员的通信地址 +func (c *RaftCluster) PeerURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.PeerURLs...) + } + sort.Strings(urls) + return urls +} + +func NewCluster(lg *zap.Logger) *RaftCluster { + if lg == nil { + lg = zap.NewNop() + } + return &RaftCluster{ + lg: lg, + members: make(map[types.ID]*Member), + removed: make(map[types.ID]bool), + downgradeInfo: &DowngradeInfo{Enabled: false}, + } +} + +func clusterVersionFromBackend(lg *zap.Logger, be backend.Backend) *semver.Version { + ckey := backendClusterVersionKey() + tx := be.ReadTx() + tx.RLock() + defer tx.RUnlock() + keys, vals := tx.UnsafeRange(buckets.Cluster, ckey, nil, 0) // 从集群获取 获取 clusterVersion + if len(keys) == 0 { + return nil + } + if len(keys) != 1 { + lg.Panic("从后端获取集群版本时,键的数量超出预期", zap.Int("number-of-key", len(keys))) + } + return semver.Must(semver.NewVersion(string(vals[0]))) +} + +func downgradeInfoFromBackend(lg *zap.Logger, be backend.Backend) *DowngradeInfo { + dkey := backendDowngradeKey() + tx := be.ReadTx() + tx.Lock() + defer tx.Unlock() + keys, vals := tx.UnsafeRange(buckets.Cluster, dkey, nil, 0) // 从集群获取 获取 downgrade + + if len(keys) == 0 { + return nil + } + + if len(keys) != 1 { + lg.Panic( + "unexpected number of keys when getting cluster version from backend", + zap.Int("number-of-key", len(keys)), + ) + } + var d DowngradeInfo + if err := json.Unmarshal([]byte(vals[0]), &d); err != nil { + lg.Panic("反序列化失败", zap.Error(err)) + } + if d.Enabled { + if _, err := semver.NewVersion(d.TargetVersion); err != nil { + lg.Panic( + "降级目标版本的版本格式出乎意料", + zap.String("target-version", d.TargetVersion), + ) + } + } + return &d +} + +func (c *RaftCluster) IsMemberExist(id types.ID) bool { + c.Lock() + defer c.Unlock() + _, ok := c.members[id] + return ok +} + +func (c *RaftCluster) VotingMemberIDs() []types.ID { + c.Lock() + defer c.Unlock() + var ids []types.ID + for _, m := range c.members { + if !m.IsLearner { + ids = append(ids, m.ID) + } + } + sort.Sort(types.IDSlice(ids)) + return ids +} + +func (c *RaftCluster) IsLocalMemberLearner() bool { + c.Lock() + defer c.Unlock() + localMember, ok := c.members[c.localID] + if !ok { + c.lg.Panic("无法查找到本地节点", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String())) + } + return localMember.IsLearner +} + +func (c *RaftCluster) DowngradeInfo() *DowngradeInfo { + c.Lock() + defer c.Unlock() + if c.downgradeInfo == nil { + return &DowngradeInfo{Enabled: false} + } + d := &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + return d +} + +func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + + if c.be != nil && shouldApplyV3 { + mustSaveDowngradeToBackend(c.lg, c.be, d) + } + + c.downgradeInfo = d + + if d.Enabled { + c.lg.Info( + "The etcd is ready to downgrade", + zap.String("target-version", d.TargetVersion), + zap.String("etcd-version", version.Version), + ) + } +} + +// PushMembershipToStorage 是覆盖集群成员的存储信息,使其完全反映RaftCluster的内部存储. +func (c *RaftCluster) PushMembershipToStorage() { + if c.be != nil { + TrimMembershipFromBackend(c.lg, c.be) + for _, m := range c.members { + unsafeSaveMemberToBackend(c.lg, c.be, m) + } + } + if c.v2store != nil { + TrimMembershipFromV2Store(c.lg, c.v2store) + for _, m := range c.members { + mustSaveMemberToStore(c.lg, c.v2store, m) + } + } +} + +func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { + e, err := st.Get(path.Join(storePrefix, "version"), false, false) + if err != nil { + if isKeyNotFound(err) { + return nil + } + lg.Panic("从store获取集群版本信息失败", zap.String("path", path.Join(storePrefix, "version")), zap.Error(err)) + } + return semver.Must(semver.NewVersion(*e.NodeExtern.Value)) +} + +// IsValidVersionChange 检查两种情况下的版本变更是否有效. +// 1.降级:集群版本比本地版本高一个小版本.集群版本应该改变. +// 2.集群启动:当不是所有成员的版本都可用时,集群版本被设置为MinVersion(3.0),当所有成员都在较高版本时,集群版本低于本地版本时,簇的版本应该改变. +func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool { + // 集群版本 + cv = &semver.Version{Major: cv.Major, Minor: cv.Minor} + // 本地版本 + lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} + + if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) { + return true + } + return false +} + +// ValidateConfigurationChange 验证接受 提议的ConfChange 并确保它仍然有效. +func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChangeV1) error { + members, removed := membersFromStore(c.lg, c.v2store) // 从v2store中获取所有成员 + // members 包括leader、follower、learner、候选者 + id := types.ID(cc.NodeID) + if removed[id] { // 不能在移除的节点中 + return ErrIDRemoved + } + _ = cc.Context // ConfigChangeContext Member 的序列化数据 + switch cc.Type { + case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: + confChangeContext := new(ConfigChangeContext) + if err := json.Unmarshal([]byte(cc.Context), confChangeContext); err != nil { + c.lg.Panic("发序列化失败confChangeContext", zap.Error(err)) + } + if confChangeContext.IsPromote { // 将一个learner提升为投票节点, 那他应该是learner + if members[id] == nil { + return ErrIDNotFound + } + if !members[id].IsLearner { + return ErrMemberNotLearner + } + } else { // 添加新节点 + if members[id] != nil { + return ErrIDExists + } + + urls := make(map[string]bool) // 当前集群所有节点的通信地址 + for _, m := range members { + for _, u := range m.PeerURLs { + urls[u] = true + } + } + // 检查peer地址是否已存在 + for _, u := range confChangeContext.Member.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + + if confChangeContext.Member.IsLearner { // 新加入的节点时learner + numLearners := 0 + for _, m := range members { + if m.IsLearner { + numLearners++ + } + } + if numLearners+1 > maxLearners { + return ErrTooManyLearners + } + } + } + + case raftpb.ConfChangeRemoveNode: + if members[id] == nil { + return ErrIDNotFound + } + + case raftpb.ConfChangeUpdateNode: + // 有这个成员,且peer地址不存在 + if members[id] == nil { + return ErrIDNotFound + } + urls := make(map[string]bool) + for _, m := range members { + if m.ID == id { + continue + } + for _, u := range m.PeerURLs { + urls[u] = true + } + } + m := new(Member) + if err := json.Unmarshal([]byte(cc.Context), m); err != nil { + c.lg.Panic("反序列化成员失败", zap.Error(err)) + } + for _, u := range m.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + + default: + c.lg.Panic("未知的 ConfChange type", zap.String("type", cc.Type.String())) + } + return nil +} + +// ClientURLs 所有监听客户端请求的地址 +func (c *RaftCluster) ClientURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.ClientURLs...) + } + sort.Strings(urls) + return urls +} diff --git a/etcd/etcdserver/api/membership/over_confstate.go b/etcd/etcdserver/api/membership/over_confstate.go new file mode 100644 index 00000000000..c2f89fbf3fc --- /dev/null +++ b/etcd/etcdserver/api/membership/over_confstate.go @@ -0,0 +1,56 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "encoding/json" + "log" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" +) + +var confStateKey = []byte("confState") + +// MustUnsafeSaveConfStateToBackend confstate ---> bolt.db/meta/confState +func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) { + confStateBytes, err := json.Marshal(confState) + if err != nil { + lg.Panic("不能序列化raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err)) + } + + tx.UnsafePut(buckets.Meta, confStateKey, confStateBytes) +} + +// UnsafeConfStateFromBackend confstate <--- bolt.db/meta/confState +func UnsafeConfStateFromBackend(lg *zap.Logger, tx backend.ReadTx) *raftpb.ConfState { + keys, vals := tx.UnsafeRange(buckets.Meta, confStateKey, nil, 0) + if len(keys) == 0 { + return nil + } + + if len(keys) != 1 { + lg.Panic("不期待的key: "+string(confStateKey)+" 当从bolt获取集群版本", zap.Int("number-of-key", len(keys))) + } + var confState raftpb.ConfState + if err := json.Unmarshal(vals[0], &confState); err != nil { + log.Panic("从bolt.db获取到的值无法反序列化", + zap.ByteString("conf-state-json", []byte(vals[0])), + zap.Error(err)) + } + return &confState +} diff --git a/etcd/etcdserver/api/membership/over_downgrade.go b/etcd/etcdserver/api/membership/over_downgrade.go new file mode 100644 index 00000000000..6ac61c5295e --- /dev/null +++ b/etcd/etcdserver/api/membership/over_downgrade.go @@ -0,0 +1,62 @@ +// Copyright 2020 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "github.com/coreos/go-semver/semver" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "go.uber.org/zap" +) + +type DowngradeInfo struct { + TargetVersion string `json:"target-version"` // 是目标降级版本,如果集群不在降级中,targetVersion将是一个空字符串. + Enabled bool `json:"enabled"` // 表示集群是否启用了降级功能 +} + +func (d *DowngradeInfo) GetTargetVersion() *semver.Version { + return semver.Must(semver.NewVersion(d.TargetVersion)) +} + +// mustDetectDowngrade 检测版本降级. +func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version, d *DowngradeInfo) { + lv := semver.Must(semver.NewVersion(version.Version)) + lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} + + // 如果集群启用了降级功能,请对照降级目标版本检查本地版本. + if d != nil && d.Enabled && d.TargetVersion != "" { + if lv.Equal(*d.GetTargetVersion()) { + if cv != nil { + lg.Info("集群正在降级到目标版本", zap.String("target-cluster-version", d.TargetVersion), zap.String("determined-cluster-version", version.Cluster(cv.String())), zap.String("current-etcd-version", version.Version)) + } + return + } + lg.Fatal("无效的降级;当降级被启用时,etcd版本不允许加入", zap.String("current-etcd-version", version.Version), zap.String("target-cluster-version", d.TargetVersion)) + } + + // 如果集群禁止降级,则根据确定的集群版本检查本地版本,如果本地版本不低于集群版本,则验证通过 + if cv != nil && lv.LessThan(*cv) { + lg.Fatal("无效的降级;etcd版本低于确定的集群版本", zap.String("current-etcd-version", version.Version), zap.String("determined-cluster-version", version.Cluster(cv.String()))) + } +} + +// AllowedDowngradeVersion 允许版本降级 +func AllowedDowngradeVersion(ver *semver.Version) *semver.Version { + return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1} +} + +// isValidDowngrade 验证集群是否可以从verFrom降级到verTo 小版本差1 +func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool { + return verTo.Equal(*AllowedDowngradeVersion(verFrom)) +} diff --git a/etcd/etcdserver/api/membership/over_node_change.go b/etcd/etcdserver/api/membership/over_node_change.go new file mode 100644 index 00000000000..a9ded70b82d --- /dev/null +++ b/etcd/etcdserver/api/membership/over_node_change.go @@ -0,0 +1,293 @@ +package membership + +import ( + "crypto/sha1" + "encoding/binary" + "errors" + "fmt" + "math/rand" + "sort" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "go.uber.org/zap" +) + +type Member struct { + ID types.ID `json:"id"` // hash得到的, 本节点ID + RaftAttributes // 与raft相关的etcd成员属性 + Attributes // 代表一个etcd成员的所有非raft的相关属性 +} + +// RaftAttributes 与raft相关的etcd成员属性 +type RaftAttributes struct { + PeerURLs []string `json:"peerURLs"` // 是raft集群中的对等体列表. + IsLearner bool `json:"isLearner,omitempty"` // 表示该成员是否是raft Learner. +} + +// Attributes 代表一个etcd成员的所有非raft的相关属性. +type Attributes struct { + Name string `json:"name,omitempty"` // 节点创建时设置的name 默认default + ClientURLs []string `json:"clientURLs,omitempty"` // 当接受到来自该Name的请求时,会 +} + +// NewMember 创建一个没有ID的成员,并根据集群名称、peer的URLS 和时间生成一个ID.这是用来引导/添加新成员的. +func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { + memberId := computeMemberId(peerURLs, clusterName, now) + return newMember(name, peerURLs, memberId, false) +} + +// NewMemberAsLearner 创建一个没有ID的成员,并根据集群名称、peer的URLS 和时间生成一个ID.这是用来引导新learner成员的. +func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { + memberId := computeMemberId(peerURLs, clusterName, now) + return newMember(name, peerURLs, memberId, true) +} + +// IsReadyToAddVotingMember OK +func (c *RaftCluster) IsReadyToAddVotingMember() bool { + nmembers := 1 // 新添加的节点 先置1 + nstarted := 0 + + for _, member := range c.VotingMembers() { + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + if nstarted == 1 && nmembers == 2 { + // 在一个成员集群中添加一个新节点,用于恢复集群数据 + c.lg.Debug("启动成员数为1;是否可以接受添加成员的请求") + return true + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + c.lg.Warn("拒绝添加成员;启动的成员将少于法定人数", zap.Int("number-of-started-member", nstarted), zap.Int("quorum", nquorum), zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String())) + return false + } + + return true +} + +func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool { + nmembers := 0 + nstarted := 0 + + for _, member := range c.VotingMembers() { + if uint64(member.ID) == id { + continue + } + + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + c.lg.Warn( + "rejecting member remove; started member will be less than quorum", + zap.Int("number-of-started-member", nstarted), + zap.Int("quorum", nquorum), + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + return false + } + + return true +} + +// IsReadyToPromoteMember 是否准备好提升节点角色, 提升以后现有成员是否可以达到大多数 +func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool { + nmembers := 1 // 我们为未来的法定人数计算被提拔的学习者 + nstarted := 1 + + for _, member := range c.VotingMembers() { + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + c.lg.Warn("拒绝成员晋升;启动成员将少于法定人数", + zap.Int("number-of-started-member", nstarted), + zap.Int("quorum", nquorum), + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + return false + } + + return true +} + +// ------------------------------------------------ over ------------------------------------------------ + +// PromoteMember 将该成员的IsLearner属性标记为false. +func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + + c.members[id].RaftAttributes.IsLearner = false + if c.v2store != nil { + // 内存里面的一个树形node结构 + mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) + } + if c.be != nil && shouldApplyV3 { + unsafeSaveMemberToBackend(c.lg, c.be, c.members[id]) + } + + c.lg.Info("成员角色提升", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String())) +} + +// AddMember 在集群中添加一个新的成员,并将给定成员的raftAttributes保存到存储空间.给定的成员应该有空的属性. 一个具有匹配id的成员必须不存在. +func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + + var v2Err, beErr error + if c.v2store != nil { + v2Err = unsafeSaveMemberToStore(c.lg, c.v2store, m) + if v2Err != nil { + if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeNodeExist { + c.lg.Panic("保存member到v2store失败", zap.String("member-id", m.ID.String()), zap.Error(v2Err)) + } + } + } + _ = backend.MyBackend{} + if c.be != nil && shouldApplyV3 { + beErr = unsafeSaveMemberToBackend(c.lg, c.be, m) // 保存到bolt.db members + if beErr != nil && !errors.Is(beErr, errMemberAlreadyExist) { + c.lg.Panic("保存member到backend失败", zap.String("member-id", m.ID.String()), zap.Error(beErr)) + } + } + if v2Err != nil && (beErr != nil || c.be == nil) { + c.lg.Panic("保存member到store失败", zap.String("member-id", m.ID.String()), zap.Error(v2Err)) + } + + c.members[m.ID] = m + + c.lg.Info("添加成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("added-peer-id", m.ID.String()), zap.Strings("added-peer-peer-urls", m.PeerURLs)) +} + +// RemoveMember store中必须存在该ID,否则会panic +func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) { + c.Lock() + defer c.Unlock() + var v2Err, beErr error + if c.v2store != nil { + v2Err = unsafeDeleteMemberFromStore(c.v2store, id) + if v2Err != nil { + if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeKeyNotFound { + c.lg.Panic("从v2store删除节点失败", zap.String("member-id", id.String()), zap.Error(v2Err)) + } + } + } + if c.be != nil && shouldApplyV3 { + beErr = unsafeDeleteMemberFromBackend(c.be, id) + if beErr != nil && !errors.Is(beErr, errMemberNotFound) { + c.lg.Panic("从backend bolt 删除节点失败", zap.String("member-id", id.String()), zap.Error(beErr)) + } + } + if v2Err != nil && (beErr != nil || c.be == nil) { + c.lg.Panic("从store中删除节点失败", zap.String("member-id", id.String()), zap.Error(v2Err)) + } + + m, ok := c.members[id] + delete(c.members, id) + c.removed[id] = true + + if ok { + c.lg.Info("移除成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), zap.Strings("removed-remote-peer-urls", m.PeerURLs)) + } else { + c.lg.Warn("该成员已经移除", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String())) + } +} + +// 计算成员ID +func computeMemberId(peerURLs types.URLs, clusterName string, now *time.Time) types.ID { + peerURLstrs := peerURLs.StringSlice() + sort.Strings(peerURLstrs) + joinedPeerUrls := strings.Join(peerURLstrs, "") + b := []byte(joinedPeerUrls) + + b = append(b, []byte(clusterName)...) + if now != nil { + b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...) + } + + hash := sha1.Sum(b) + return types.ID(binary.BigEndian.Uint64(hash[:8])) +} + +func newMember(name string, peerURLs types.URLs, memberId types.ID, isLearner bool) *Member { + m := &Member{ + RaftAttributes: RaftAttributes{ + PeerURLs: peerURLs.StringSlice(), + IsLearner: isLearner, + }, + Attributes: Attributes{Name: name}, + ID: memberId, + } + return m +} + +// PickPeerURL 随机从 Member's PeerURLs 选择一个 +func (m *Member) PickPeerURL() string { + if len(m.PeerURLs) == 0 { + panic("peer url 应该>0") + } + return m.PeerURLs[rand.Intn(len(m.PeerURLs))] +} + +// Clone 返回member deepcopy +func (m *Member) Clone() *Member { + if m == nil { + return nil + } + mm := &Member{ + ID: m.ID, + RaftAttributes: RaftAttributes{ + IsLearner: m.IsLearner, + }, + Attributes: Attributes{ + Name: m.Name, + }, + } + if m.PeerURLs != nil { + mm.PeerURLs = make([]string, len(m.PeerURLs)) + copy(mm.PeerURLs, m.PeerURLs) + } + if m.ClientURLs != nil { + mm.ClientURLs = make([]string, len(m.ClientURLs)) + copy(mm.ClientURLs, m.ClientURLs) + } + return mm +} + +func (m *Member) IsStarted() bool { + return len(m.Name) != 0 +} + +type MembersByID []*Member + +func (ms MembersByID) Len() int { return len(ms) } +func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID } +func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } + +type MembersByPeerURLs []*Member + +func (ms MembersByPeerURLs) Len() int { return len(ms) } +func (ms MembersByPeerURLs) Less(i, j int) bool { + return ms[i].PeerURLs[0] < ms[j].PeerURLs[0] +} +func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } diff --git a/etcd/etcdserver/api/membership/over_store.go b/etcd/etcdserver/api/membership/over_store.go new file mode 100644 index 00000000000..8ef22e866ad --- /dev/null +++ b/etcd/etcdserver/api/membership/over_store.go @@ -0,0 +1,370 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "bytes" + "encoding/json" + "fmt" + "path" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +const ( + attributesSuffix = "attributes" + raftAttributesSuffix = "raftAttributes" + storePrefix = "/0" // 在store中存储成员信息的前缀 + +) + +var ( + StoreMembersPrefix = path.Join(storePrefix, "members") // /0/members + storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") // /0/removed_members + errMemberAlreadyExist = fmt.Errorf("member already exists") + errMemberNotFound = fmt.Errorf("member not found") +) + +// v2store 更新成员属性 +func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.Attributes) + if err != nil { + lg.Panic("序列化 属性失败", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), attributesSuffix) + if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic("更新属性失败", zap.String("path", p), zap.Error(err)) + } +} + +// v2store 保存集群版本 +func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { + if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "保存集群版本到store失败", + zap.String("path", StoreClusterVersionKey()), + zap.Error(err), + ) + } +} + +// 创建blot.db存储桶 +func mustCreateBackendBuckets(be backend.Backend) { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(buckets.Members) + tx.UnsafeCreateBucket(buckets.MembersRemoved) + tx.UnsafeCreateBucket(buckets.Cluster) +} + +// MemberAttributesStorePath v2store 成员属性路径 +func MemberAttributesStorePath(id types.ID) string { + return path.Join(MemberStoreKey(id), attributesSuffix) +} + +func mustParseMemberIDFromBytes(lg *zap.Logger, key []byte) types.ID { + id, err := types.IDFromString(string(key)) + if err != nil { + lg.Panic("从key解析成员ID失败", zap.Error(err)) + } + return id +} + +// OK +func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { + err := unsafeSaveMemberToStore(lg, s, m) + if err != nil { + lg.Panic( + "保存member到store失败", + zap.String("member-id", m.ID.String()), + zap.Error(err), + ) + } +} + +// node---->v2store [memory] +func unsafeSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) error { + b, err := json.Marshal(m.RaftAttributes) // 是raft集群中的对等体列表. 表示该成员是否是raft Learner. + if err != nil { + lg.Panic("序列化失败raftAttributes", zap.Error(err)) + } + _ = computeMemberId // id 由这个函数生成,需要 peerURLs clusterName 创建时间,创建时间一般为nil + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) // /0/members/123/raftAttributes + _, err = s.Create(p, false, string(b), // ✅ + false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) + return err +} + +func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { + // s 内存里面的一个树形node结构 + b, err := json.Marshal(m.RaftAttributes) // 是raft集群中的对等体列表. 表示该成员是否是raft Learner. + if err != nil { + lg.Panic("序列化raft相关属性失败", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) // 123213/raftAttributes + if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic("更新raftAttributes失败", zap.String("path", p), zap.Error(err)) + } +} + +// MustParseMemberIDFromKey ok +func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { + id, err := types.IDFromString(path.Base(key)) // /0/members/8e9e05c52164694d + if err != nil { + lg.Panic("从key解析member ID 失败", zap.Error(err)) + } + return id +} + +// 将member保存到Backend bolt.db +func unsafeSaveMemberToBackend(lg *zap.Logger, be backend.Backend, m *Member) error { + mkey := backendMemberKey(m.ID) // 16进制字符串 + mvalue, err := json.Marshal(m) + if err != nil { + lg.Panic("序列化失败", zap.Error(err)) + } + + tx := be.BatchTx() // 写事务 + tx.Lock() + defer tx.Unlock() + if unsafeMemberExists(tx, mkey) { // ✅ + return errMemberAlreadyExist + } + tx.UnsafePut(buckets.Members, mkey, mvalue) + return nil +} + +// MemberStoreKey 15 -----> /0/members/e +func MemberStoreKey(id types.ID) string { + return path.Join(StoreMembersPrefix, id.String()) // /0/members/e +} + +// RemovedMemberStoreKey 15 -----> /0/removed_members/e +func RemovedMemberStoreKey(id types.ID) string { + return path.Join(storeRemovedMembersPrefix, id.String()) +} + +// 移除节点,并添加到removed_member +func unsafeDeleteMemberFromStore(s v2store.Store, id types.ID) error { + if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { + return err + } + if _, err := s.Create(RemovedMemberStoreKey(id), // ✅ + false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + return err + } + return nil +} + +// 首先遍历bolt.db members下的所有k,v +func unsafeMemberExists(tx backend.ReadTx, mkey []byte) bool { + var found bool + tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { + if bytes.Equal(k, mkey) { + found = true + } + return nil + }) + return found +} + +// 从bolt.db删除节点信息 +func unsafeDeleteMemberFromBackend(be backend.Backend, id types.ID) error { + mkey := backendMemberKey(id) + + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafePut(buckets.MembersRemoved, mkey, []byte("removed")) // 更新 + if !unsafeMemberExists(tx, mkey) { + return errMemberNotFound + } + tx.UnsafeDelete(buckets.Members, mkey) + return nil +} + +// 在bolt.db存储的key +func backendMemberKey(id types.ID) []byte { + return []byte(id.String()) +} + +// nodeToMember 从node构建一个member +func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { + m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} + attrs := make(map[string][]byte) + raftAttrKey := path.Join(n.Key, raftAttributesSuffix) // /0/members/8e9e05c52164694d/raftAttributes + attrKey := path.Join(n.Key, attributesSuffix) // /0/members/8e9e05c52164694d/raftAttributes/attributes + // &v2store.NodeExtern{Key: "/0/members/8e9e05c52164694d", ExternNodes: []*v2store.NodeExtern{ + // {Key: "/0/members/8e9e05c52164694d/raftAttributes/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)}, + // {Key: "/0/members/8e9e05c52164694d/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, + // }} + for _, nn := range n.ExternNodes { + if nn.Key != raftAttrKey && nn.Key != attrKey { + return nil, fmt.Errorf("未知的 key %q", nn.Key) + } + attrs[nn.Key] = []byte(*nn.Value) + } + if data := attrs[raftAttrKey]; data != nil { + if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { + return nil, fmt.Errorf("反序列化 raftAttributes 失败: %v", err) + } + } else { + return nil, fmt.Errorf("raftAttributes key不存在") + } + if data := attrs[attrKey]; data != nil { + if err := json.Unmarshal(data, &m.Attributes); err != nil { + return m, fmt.Errorf("反序列化 attributes 失败: %v", err) + } + } + return m, nil +} + +// TrimClusterFromBackend 从bolt.db移除cluster 桶 +func TrimClusterFromBackend(be backend.Backend) error { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafeDeleteBucket(buckets.Cluster) + return nil +} + +// 读取bolt.db中的member桶 +func readMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool, error) { + members := make(map[types.ID]*Member) + removed := make(map[types.ID]bool) + + tx := be.ReadTx() + tx.RLock() + defer tx.RUnlock() + err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { + memberId := mustParseMemberIDFromBytes(lg, k) + m := &Member{ID: memberId} + if err := json.Unmarshal(v, &m); err != nil { + return err + } + members[memberId] = m + return nil + }) + if err != nil { + return nil, nil, fmt.Errorf("不能读取bolt.db中的member桶: %w", err) + } + + err = tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { + memberId := mustParseMemberIDFromBytes(lg, k) + removed[memberId] = true + return nil + }) + if err != nil { + return nil, nil, fmt.Errorf("不能读取bolt.db中的 members_removed 桶: %w", err) + } + return members, removed, nil +} + +func membersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { + return mustReadMembersFromBackend(lg, be) +} + +// 从bolt.db读取成员信息 +func mustReadMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { + members, removed, err := readMembersFromBackend(lg, be) + if err != nil { + lg.Panic("不能从bolt.db读取成员信息", zap.Error(err)) + } + return members, removed +} + +// TrimMembershipFromBackend 从bolt.db删除成员信息 +func TrimMembershipFromBackend(lg *zap.Logger, be backend.Backend) error { + lg.Info("开始删除成员信息...") + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { + tx.UnsafeDelete(buckets.Members, k) + lg.Debug("删除成员信息", zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) + return nil + }) + if err != nil { + return err + } + return tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { + tx.UnsafeDelete(buckets.MembersRemoved, k) + lg.Debug("删除 已移除的成员信息", zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) + return nil + }) +} + +// TrimMembershipFromV2Store 从v2store删除所有节点信息 +func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error { + members, removed := membersFromStore(lg, s) + + for mID := range members { + _, err := s.Delete(MemberStoreKey(mID), true, true) + if err != nil { + return err + } + } + for mID := range removed { + _, err := s.Delete(RemovedMemberStoreKey(mID), true, true) + if err != nil { + return err + } + } + + return nil +} + +// 保存集群版本到bolt.db +func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { + ckey := backendClusterVersionKey() + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafePut(buckets.Cluster, ckey, []byte(ver.String())) +} + +// bolt.db 集群版本key +func backendClusterVersionKey() []byte { + return []byte("clusterVersion") +} + +func backendDowngradeKey() []byte { + return []byte("downgrade") +} + +// 保存降级信息到blot.db +func mustSaveDowngradeToBackend(lg *zap.Logger, be backend.Backend, downgrade *DowngradeInfo) { + dkey := backendDowngradeKey() // downgrade + dvalue, err := json.Marshal(downgrade) + if err != nil { + lg.Panic("序列化降级信息失败", zap.Error(err)) + } + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafePut(buckets.Cluster, dkey, dvalue) +} + +// StoreClusterVersionKey v2store中集群版本路径 +func StoreClusterVersionKey() string { // /0/version + return path.Join(storePrefix, "version") +} diff --git a/etcd/etcdserver/api/membership/over_storev2.go b/etcd/etcdserver/api/membership/over_storev2.go new file mode 100644 index 00000000000..2e92ba7956b --- /dev/null +++ b/etcd/etcdserver/api/membership/over_storev2.go @@ -0,0 +1,34 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" +) + +// IsMetaStoreOnly 验证给定的`store`是否只包含元信息(成员,版本);可以从后端(storev3)恢复,而不是用户数据. +func IsMetaStoreOnly(store v2store.Store) (bool, error) { + event, err := store.Get("/", true, false) + if err != nil { + return false, err + } + for _, n := range event.NodeExtern.ExternNodes { + if n.Key != storePrefix && n.ExternNodes.Len() > 0 { + return false, nil + } + } + + return true, nil +} diff --git a/etcd/etcdserver/api/over_cluster.go b/etcd/etcdserver/api/over_cluster.go new file mode 100644 index 00000000000..491fa8e41a5 --- /dev/null +++ b/etcd/etcdserver/api/over_cluster.go @@ -0,0 +1,30 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package api + +import ( + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + + "github.com/coreos/go-semver/semver" +) + +type Cluster interface { + ID() types.ID // 集群ID + ClientURLs() []string // 返回该集群正在监听客户端请求的所有URL的集合. + Members() []*membership.Member // 集群成员,排序之后的 + Member(id types.ID) *membership.Member + Version() *semver.Version +} diff --git a/server/etcdserver/api/rafthttp/msg_codec.go b/etcd/etcdserver/api/rafthttp/msg_codec.go similarity index 86% rename from server/etcdserver/api/rafthttp/msg_codec.go rename to etcd/etcdserver/api/rafthttp/msg_codec.go index 5444c01f8fd..98b5ea3ad5b 100644 --- a/server/etcdserver/api/rafthttp/msg_codec.go +++ b/etcd/etcdserver/api/rafthttp/msg_codec.go @@ -19,12 +19,10 @@ import ( "errors" "io" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" ) -// messageEncoder is a encoder that can encode all kinds of messages. -// It MUST be used with a paired messageDecoder. type messageEncoder struct { w io.Writer } @@ -37,7 +35,6 @@ func (enc *messageEncoder) encode(m *raftpb.Message) error { return err } -// messageDecoder is a decoder that can decode all kinds of messages. type messageDecoder struct { r io.Reader } diff --git a/server/etcdserver/api/rafthttp/msgappv2_codec.go b/etcd/etcdserver/api/rafthttp/msgappv2_codec.go similarity index 80% rename from server/etcdserver/api/rafthttp/msgappv2_codec.go rename to etcd/etcdserver/api/rafthttp/msgappv2_codec.go index 59425aeea69..b6c57878a41 100644 --- a/server/etcdserver/api/rafthttp/msgappv2_codec.go +++ b/etcd/etcdserver/api/rafthttp/msgappv2_codec.go @@ -15,52 +15,26 @@ package rafthttp import ( + "bytes" "encoding/binary" "fmt" "io" + "io/ioutil" "time" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" ) const ( msgTypeLinkHeartbeat uint8 = 0 msgTypeAppEntries uint8 = 1 msgTypeApp uint8 = 2 - - msgAppV2BufSize = 1024 * 1024 + msgAppV2BufSize = 1024 * 1024 ) -// msgappv2 stream sends three types of message: linkHeartbeatMessage, -// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in -// replicate state in raft, whose index and term are fully predictable. -// -// Data format of linkHeartbeatMessage: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x00 | -// -// Data format of AppEntries: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x01 | -// | 1 | 8 | length of entries | -// | 9 | 8 | length of first entry | -// | 17 | n1 | first entry | -// ... -// | x | 8 | length of k-th entry data | -// | x+8 | nk | k-th entry data | -// | x+8+nk | 8 | commit index | -// -// Data format of MsgApp: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x02 | -// | 1 | 8 | length of encoded message | -// | 9 | n | encoded message | type msgAppV2Encoder struct { w io.Writer fs *stats.FollowerStats @@ -101,13 +75,14 @@ func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error { return err } for i := 0; i < len(m.Entries); i++ { - // write length of entry binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size())) if _, err := enc.w.Write(enc.uint64buf); err != nil { return err } if n := m.Entries[i].Size(); n < msgAppV2BufSize { - if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil { + temp, err := m.Entries[i].Marshal() + enc.buf = append(enc.buf, temp...) + if err != nil { return err } if _, err := enc.w.Write(enc.buf[:n]); err != nil { @@ -176,6 +151,8 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) { m raftpb.Message typ uint8 ) + xxx, _ := ioutil.ReadAll(dec.r) + dec.r = bytes.NewReader(xxx) if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil { return m, err } diff --git a/etcd/etcdserver/api/rafthttp/over_coder.go b/etcd/etcdserver/api/rafthttp/over_coder.go new file mode 100644 index 00000000000..3c3b517486c --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/over_coder.go @@ -0,0 +1,25 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import "github.com/ls-2018/etcd_cn/raft/raftpb" + +type encoder interface { + encode(m *raftpb.Message) error +} + +type decoder interface { + decode() (raftpb.Message, error) +} diff --git a/etcd/etcdserver/api/rafthttp/over_http.go b/etcd/etcdserver/api/rafthttp/over_http.go new file mode 100644 index 00000000000..ef2a2527d5e --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/over_http.go @@ -0,0 +1,121 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "errors" + "net/http" + "path" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "go.uber.org/zap" +) + +const ( + connReadLimitByte = 64 * 1024 // 链接最大的读取数据量 + snapshotLimitByte = 1 * 1024 * 1024 * 1024 * 1024 // 快照大小上限 +) + +var ( + RaftPrefix = "/raft" + ProbingPrefix = path.Join(RaftPrefix, "probing") + RaftStreamPrefix = path.Join(RaftPrefix, "stream") + RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot") + errIncompatibleVersion = errors.New("不兼容的版本") + errClusterIDMismatch = errors.New("cluster ID不匹配") +) + +type peerGetter interface { + Get(id types.ID) Peer +} + +type writerToResponse interface { + WriteTo(w http.ResponseWriter) +} + +// checkClusterCompatibilityFromHeader 检查集群版本的兼容性 +//.它检查本地成员的版本是否与报头中的版本兼容,以及本地成员的集群ID是否与报头中的ID一致. +func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error { + remoteName := header.Get("X-Server-From") + remoteServer := serverVersion(header) + remoteVs := "" + if remoteServer != nil { + remoteVs = remoteServer.String() + } + + remoteMinClusterVer := minClusterVersion(header) + remoteMinClusterVs := "" + if remoteMinClusterVer != nil { + remoteMinClusterVs = remoteMinClusterVer.String() + } + + localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer) + + localVs := "" + if localServer != nil { + localVs = localServer.String() + } + localMinClusterVs := "" + if localMinCluster != nil { + localMinClusterVs = localMinCluster.String() + } + + if err != nil { + lg.Warn( + "检查版本兼容性失败", + zap.String("local-member-id", localID.String()), + zap.String("local-member-cluster-id", cid.String()), + zap.String("local-member-etcd-version", localVs), + zap.String("local-member-etcd-minimum-cluster-version", localMinClusterVs), + zap.String("remote-peer-etcd-name", remoteName), + zap.String("remote-peer-etcd-version", remoteVs), + zap.String("remote-peer-etcd-minimum-cluster-version", remoteMinClusterVs), + zap.Error(err), + ) + return errIncompatibleVersion + } + if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() { + lg.Warn( + "集群ID不匹配", + zap.String("local-member-id", localID.String()), + zap.String("local-member-cluster-id", cid.String()), + zap.String("local-member-etcd-version", localVs), + zap.String("local-member-etcd-minimum-cluster-version", localMinClusterVs), + zap.String("remote-peer-etcd-name", remoteName), + zap.String("remote-peer-etcd-version", remoteVs), + zap.String("remote-peer-etcd-minimum-cluster-version", remoteMinClusterVs), + zap.String("remote-peer-cluster-id", gcid), + ) + return errClusterIDMismatch + } + return nil +} + +type closeNotifier struct { + done chan struct{} +} + +func newCloseNotifier() *closeNotifier { + return &closeNotifier{ + done: make(chan struct{}), + } +} + +func (n *closeNotifier) Close() error { + close(n.done) + return nil +} + +func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done } diff --git a/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go b/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go new file mode 100644 index 00000000000..fcd92554caf --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go @@ -0,0 +1,85 @@ +package rafthttp + +import ( + "context" + "io/ioutil" + "net/http" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" +) + +// newPipelineHandler Pipeline 类型通道用于处理数据量大的消息例如 Snapshot .这种类型的 +// 消息需要与心跳等消息分开处理否则会阻塞心跳包的传输进而影响集群的稳定性.使用Pipeline 类型通道进行通信时点到点之间不维护HTTP 长链接 +// 它只通过短链接传输数据用完即关闭. +func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler { + h := &pipelineHandler{ + lg: t.Logger, + localID: t.ID, + tr: t, + r: r, + cid: cid, + } + if h.lg == nil { + h.lg = zap.NewNop() + } + return h +} + +type pipelineHandler struct { + lg *zap.Logger + localID types.ID + tr Transporter + r Raft + cid types.ID +} + +func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.Header().Set("Allow", "POST") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + + addRemoteFromRequest(h.tr, r) + + limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) // 限制返回的数据大小 64K + b, err := ioutil.ReadAll(limitedr) + if err != nil { + h.lg.Warn("读取raft消息失败", zap.String("local-member-id", h.localID.String()), zap.Error(err)) + http.Error(w, "读取raft消息失败", http.StatusBadRequest) + return + } + + var m raftpb.Message + if err := m.Unmarshal(b); err != nil { + h.lg.Warn("发序列化raft消息失败", zap.String("local-member-id", h.localID.String()), zap.Error(err)) + http.Error(w, "发序列化raft消息失败", http.StatusBadRequest) + return + } + + if err := h.r.Process(context.TODO(), m); err != nil { + switch v := err.(type) { + case writerToResponse: + v.WriteTo(w) + default: + h.lg.Warn("处理raft消息错误", zap.String("local-member-id", h.localID.String()), zap.Error(err)) + http.Error(w, "处理raft消息错误", http.StatusInternalServerError) + w.(http.Flusher).Flush() + // 断开http流的连接 + panic(err) + } + return + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go b/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go new file mode 100644 index 00000000000..f03823120a4 --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go @@ -0,0 +1,136 @@ +package rafthttp + +import ( + "context" + "fmt" + "net/http" + "time" + + humanize "github.com/dustin/go-humanize" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" +) + +type snapshotHandler struct { + lg *zap.Logger + tr Transporter + r Raft + snapshotter *snap.Snapshotter + + localID types.ID + cid types.ID +} + +func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { + h := &snapshotHandler{ + lg: t.Logger, + tr: t, + r: r, + snapshotter: snapshotter, + localID: t.ID, + cid: cid, + } + if h.lg == nil { + h.lg = zap.NewNop() + } + return h +} + +// ServeHTTP serves HTTP request to receive and process snapshot message. +// 如果请求发送者在没有关闭基础TCP连接的情况下死亡.处理程序将继续等待请求主体,直到TCP keepalive发现连接在几分钟后被破坏. +// 这是可接受的,因为通过其他 TCP 连接发送的快照信息仍然可以被接收和处理.接收和处理. +// 2. 这种情况应该很少发生,所以不做进一步优化. +func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + if r.Method != "POST" { + w.Header().Set("Allow", "POST") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + + addRemoteFromRequest(h.tr, r) + + dec := &messageDecoder{r: r.Body} + // 快照可能超过512MB. + m, err := dec.decodeLimit(snapshotLimitByte) // 8字节[消息长度]+消息+snap + from := types.ID(m.From).String() + if err != nil { + msg := fmt.Sprintf("解码raft消息失败 (%v)", err) + h.lg.Warn("解码raft消息失败", zap.String("local-member-id", h.localID.String()), zap.String("remote-snapshot-sender-id", from), zap.Error(err)) + http.Error(w, msg, http.StatusBadRequest) + return + } + + msgSize := m.Size() + + if m.Type != raftpb.MsgSnap { + h.lg.Warn( + "不期待的消息类型", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.String("message-type", m.Type.String()), + ) + http.Error(w, "不期待的消息类型", http.StatusBadRequest) + return + } + + h.lg.Info( + "开始接受快照", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Int("incoming-snapshot-message-size-bytes", msgSize), + zap.String("incoming-snapshot-message-size", humanize.Bytes(uint64(msgSize))), + ) + + n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index) + if err != nil { + msg := fmt.Sprintf("保存快照失败 (%v)", err) + h.lg.Warn( + "保存快照失败", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Error(err), + ) + http.Error(w, msg, http.StatusInternalServerError) + return + } + + downloadTook := time.Since(start) + h.lg.Info( + "接受并保存数据库快照", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Int64("incoming-snapshot-size-bytes", n), + zap.String("incoming-snapshot-size", humanize.Bytes(uint64(n))), + zap.String("download-took", downloadTook.String()), + ) + + if err := h.r.Process(context.TODO(), m); err != nil { + switch v := err.(type) { + case writerToResponse: + v.WriteTo(w) + default: + msg := fmt.Sprintf("处理消息失败 (%v)", err) + h.lg.Warn("处理消息失败", zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Error(err), + ) + http.Error(w, msg, http.StatusInternalServerError) + } + return + } + w.WriteHeader(http.StatusNoContent) +} diff --git a/etcd/etcdserver/api/rafthttp/peer.go b/etcd/etcdserver/api/rafthttp/peer.go new file mode 100644 index 00000000000..d5d696b5983 --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/peer.go @@ -0,0 +1,370 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "context" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +const ( + // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates. + // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for + // tcp keepalive failing to detect a bad connection, which is at minutes level. + // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage + // to keep the connection alive. + // For short term pipeline connections, the connection必须是killed to avoid it being + // put back to http pkg connection pool. + DefaultConnReadTimeout = 5 * time.Second + DefaultConnWriteTimeout = 5 * time.Second + + recvBufSize = 4096 + // maxPendingProposals holds the proposals during one leader election process. + // Generally one leader election takes at most 1 sec. It should have + // 0-2 election conflicts, and each one takes 0.5 sec. + // We assume the number of concurrent proposers is smaller than 4096. + // One client blocks on its proposal for at least 1 sec, so 4096 is enough + // to hold all proposals. + maxPendingProposals = 4096 + + streamAppV2 = "streamMsgAppV2" + streamMsg = "streamMsg" + pipelineMsg = "pipeline" + sendSnap = "sendMsgSnap" +) + +var ( + ConnReadTimeout = DefaultConnReadTimeout // 在每个rafthttp连接上设置的读取超时 5s + ConnWriteTimeout = DefaultConnWriteTimeout +) + +type Peer interface { + // send sends the message to the remote peer. The function is non-blocking + // and has no promise that the message will be received by the remote. + // When it fails to send message out, it will report the status to underlying + // raft. + send(m raftpb.Message) + + // sendSnap sends the merged snapshot message to the remote peer. Its behavior + // is similar to send. + sendSnap(m snap.Message) + + // update updates the urls of remote peer. + update(urls types.URLs) + + // attachOutgoingConn attaches the outgoing connection to the peer for + // stream usage. After the call, the ownership of the outgoing + // connection hands over to the peer. The peer will close the connection + // when it is no longer used. + attachOutgoingConn(conn *outgoingConn) + // activeSince returns the time that the connection with the + // peer becomes active. + activeSince() time.Time + // stop performs any necessary finalization and terminates the peer + // elegantly. + stop() +} + +// peer is the representative of a remote raft node. Local raft node sends +// messages to the remote through peer. +// Each peer has two underlying mechanisms to send out a message: stream and +// pipeline. +// A stream is a receiver initialized long-polling connection, which +// is always open to transfer messages. Besides general stream, peer also has +// a optimized stream for sending msgApp since msgApp accounts for large part +// of all messages. Only raft leader uses the optimized stream to send msgApp +// to the remote follower node. +// A pipeline is a series of http clients that send http requests to the remote. +// It is only used when the stream has not been established. +type peer struct { + lg *zap.Logger + + localID types.ID + // id of the remote raft peer node + id types.ID + + r Raft + + status *peerStatus + + picker *urlPicker + + msgAppV2Writer *streamWriter + writer *streamWriter + pipeline *pipeline + snapSender *snapshotSender // snapshot sender to send v3 snapshot messages + msgAppV2Reader *streamReader + msgAppReader *streamReader + + recvc chan raftpb.Message + propc chan raftpb.Message + + mu sync.Mutex + paused bool + + cancel context.CancelFunc // cancel pending works in go routine created by peer. + stopc chan struct{} +} + +func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { + if t.Logger != nil { + t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String())) + } + defer func() { + if t.Logger != nil { + t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String())) + } + }() + + status := newPeerStatus(t.Logger, t.ID, peerID) + picker := newURLPicker(urls) + errorc := t.ErrorC + r := t.Raft + pipeline := &pipeline{ + peerID: peerID, + tr: t, + picker: picker, + status: status, + followerStats: fs, + raft: r, + errorc: errorc, + } + pipeline.start() + + p := &peer{ + lg: t.Logger, + localID: t.ID, + id: peerID, + r: r, + status: status, + picker: picker, + msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), + writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), + pipeline: pipeline, + snapSender: newSnapshotSender(t, picker, peerID, status), + recvc: make(chan raftpb.Message, recvBufSize), + propc: make(chan raftpb.Message, maxPendingProposals), + stopc: make(chan struct{}), + } + + ctx, cancel := context.WithCancel(context.Background()) + p.cancel = cancel + go func() { + for { + select { + case mm := <-p.recvc: + if err := r.Process(ctx, mm); err != nil { + if t.Logger != nil { + t.Logger.Warn("failed to process Raft message", zap.Error(err)) + } + } + case <-p.stopc: + return + } + } + }() + + // r.Process might block for processing proposal when there is no leader. + // Thus propc必须是put into a separate routine with recvc to avoid blocking + // processing other raft messages. + go func() { + for { + select { + case mm := <-p.propc: + if err := r.Process(ctx, mm); err != nil { + if t.Logger != nil { + t.Logger.Warn("failed to process Raft message", zap.Error(err)) + } + } + case <-p.stopc: + return + } + } + }() + + p.msgAppV2Reader = &streamReader{ + lg: t.Logger, + peerID: peerID, + typ: streamTypeMsgAppV2, + tr: t, + picker: picker, + status: status, + recvc: p.recvc, + propc: p.propc, + rl: rate.NewLimiter(t.DialRetryFrequency, 1), + } + p.msgAppReader = &streamReader{ + lg: t.Logger, + peerID: peerID, + typ: streamTypeMessage, + tr: t, + picker: picker, + status: status, + recvc: p.recvc, + propc: p.propc, + rl: rate.NewLimiter(t.DialRetryFrequency, 1), + } + + p.msgAppV2Reader.start() + p.msgAppReader.start() + + return p +} + +func (p *peer) send(m raftpb.Message) { + p.mu.Lock() + paused := p.paused + p.mu.Unlock() + + if paused { + return + } + // 如果消息类型是snapshot则返回pipeline,如果是MsgApp则返回msgAppV2Writer,否则返回wirter + // wirtec创建是在 + writec, name := p.pick(m) + select { + /* 将消息写入channel中 + * 接收端的channel位于stream.go streamWriter.run msgc + */ + case writec <- m: + default: + p.r.ReportUnreachable(m.To) + if isMsgSnap(m) { + p.r.ReportSnapshot(m.To, raft.SnapshotFailure) + } + if p.status.isActive() { + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.String("remote-peer-name", name), + zap.Bool("remote-peer-active", p.status.isActive()), + ) + } + } else { + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.String("remote-peer-name", name), + zap.Bool("remote-peer-active", p.status.isActive()), + ) + } + } + } +} + +func (p *peer) sendSnap(m snap.Message) { + go p.snapSender.send(m) +} + +func (p *peer) update(urls types.URLs) { + p.picker.update(urls) +} + +func (p *peer) attachOutgoingConn(conn *outgoingConn) { + var ok bool + switch conn.t { + case streamTypeMsgAppV2: + ok = p.msgAppV2Writer.attach(conn) + case streamTypeMessage: + ok = p.writer.attach(conn) + default: + if p.lg != nil { + p.lg.Panic("未知的stream类型", zap.String("type", conn.t.String())) + } + } + if !ok { + conn.Close() + } +} + +func (p *peer) activeSince() time.Time { return p.status.activeSince() } + +// Pause pauses the peer. The peer will simply drops all incoming +// messages without returning an error. +func (p *peer) Pause() { + p.mu.Lock() + defer p.mu.Unlock() + p.paused = true + p.msgAppReader.pause() + p.msgAppV2Reader.pause() +} + +// Resume resumes a paused peer. +func (p *peer) Resume() { + p.mu.Lock() + defer p.mu.Unlock() + p.paused = false + p.msgAppReader.resume() + p.msgAppV2Reader.resume() +} + +func (p *peer) stop() { + if p.lg != nil { + p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String())) + } + + defer func() { + if p.lg != nil { + p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String())) + } + }() + + close(p.stopc) + p.cancel() + p.msgAppV2Writer.stop() + p.writer.stop() + p.pipeline.stop() + p.snapSender.stop() + p.msgAppV2Reader.stop() + p.msgAppReader.stop() +} + +// 根据消息的类型选择合适的消息通道, +func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) { + var ok bool + // Considering MsgSnap may have a big size, e.g., 1G, and will block + // stream for a long time, only use one of the N pipelines to send MsgSnap. + if isMsgSnap(m) { + return p.pipeline.msgc, pipelineMsg + } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) { + return writec, streamAppV2 + } else if writec, ok = p.writer.writec(); ok { + return writec, streamMsg + } + return p.pipeline.msgc, pipelineMsg +} + +func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp } + +func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap } diff --git a/server/etcdserver/api/rafthttp/peer_status.go b/etcd/etcdserver/api/rafthttp/peer_status.go similarity index 87% rename from server/etcdserver/api/rafthttp/peer_status.go rename to etcd/etcdserver/api/rafthttp/peer_status.go index cad19b2fbce..108f87eb877 100644 --- a/server/etcdserver/api/rafthttp/peer_status.go +++ b/etcd/etcdserver/api/rafthttp/peer_status.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" "go.uber.org/zap" ) @@ -30,7 +30,7 @@ type failureType struct { action string } -type peerStatus struct { +type peerStatus struct { // 节点状态 lg *zap.Logger local types.ID id types.ID @@ -54,7 +54,6 @@ func (s *peerStatus) activate() { s.active = true s.since = time.Now() - activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc() } } @@ -67,8 +66,6 @@ func (s *peerStatus) deactivate(failure failureType, reason string) { s.active = false s.since = time.Time{} - activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec() - disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc() return } diff --git a/server/etcdserver/api/rafthttp/pipeline.go b/etcd/etcdserver/api/rafthttp/pipeline.go similarity index 87% rename from server/etcdserver/api/rafthttp/pipeline.go rename to etcd/etcdserver/api/rafthttp/pipeline.go index b8ff3dfcadb..040887e5f46 100644 --- a/server/etcdserver/api/rafthttp/pipeline.go +++ b/etcd/etcdserver/api/rafthttp/pipeline.go @@ -18,16 +18,17 @@ import ( "bytes" "context" "errors" - "io" + "io/ioutil" "runtime" "sync" "time" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" "go.uber.org/zap" ) @@ -62,15 +63,15 @@ type pipeline struct { func (p *pipeline) start() { p.stopc = make(chan struct{}) - p.msgc = make(chan raftpb.Message, pipelineBufSize) - p.wg.Add(connPerPipeline) + p.msgc = make(chan raftpb.Message, pipelineBufSize) // 64 + p.wg.Add(connPerPipeline) // 4 for i := 0; i < connPerPipeline; i++ { go p.handle() } if p.tr != nil && p.tr.Logger != nil { p.tr.Logger.Info( - "started HTTP pipelining with remote peer", + "与远程对等端启动HTTP管道", zap.String("local-member-id", p.tr.ID.String()), zap.String("remote-peer-id", p.peerID.String()), ) @@ -83,7 +84,7 @@ func (p *pipeline) stop() { if p.tr != nil && p.tr.Logger != nil { p.tr.Logger.Info( - "stopped HTTP pipelining with remote peer", + "停止与远程对等端HTTP管道", zap.String("local-member-id", p.tr.ID.String()), zap.String("remote-peer-id", p.peerID.String()), ) @@ -110,7 +111,6 @@ func (p *pipeline) handle() { if isMsgSnap(m) { p.raft.ReportSnapshot(m.To, raft.SnapshotFailure) } - sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() continue } @@ -121,7 +121,6 @@ func (p *pipeline) handle() { if isMsgSnap(m) { p.raft.ReportSnapshot(m.To, raft.SnapshotFinish) } - sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size())) case <-p.stopc: return } @@ -154,7 +153,7 @@ func (p *pipeline) post(data []byte) (err error) { return err } defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) + b, err := ioutil.ReadAll(resp.Body) if err != nil { p.picker.unreachable(u) return err diff --git a/server/etcdserver/api/rafthttp/probing_status.go b/etcd/etcdserver/api/rafthttp/probing_status.go similarity index 78% rename from server/etcdserver/api/rafthttp/probing_status.go rename to etcd/etcdserver/api/rafthttp/probing_status.go index 672a579ce62..a35d5c0aa3b 100644 --- a/server/etcdserver/api/rafthttp/probing_status.go +++ b/etcd/etcdserver/api/rafthttp/probing_status.go @@ -31,32 +31,13 @@ const ( ) var ( - // proberInterval must be shorter than read timeout. + // proberInterval必须是shorter than read timeout. // Or the connection will time-out. proberInterval = ConnReadTimeout - time.Second statusMonitoringInterval = 30 * time.Second statusErrorInterval = 5 * time.Second ) -func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { - hus := make([]string, len(us)) - for i := range us { - hus[i] = us[i] + ProbingPrefix - } - - p.AddHTTP(id, proberInterval, hus) - - s, err := p.Status(id) - if err != nil { - if lg != nil { - lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id), zap.Error(err)) - } - return - } - - go monitorProbingStatus(lg, s, id, roundTripperName, rttSecProm) -} - func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { // set the first interval short to log error early. interval := statusErrorInterval @@ -89,7 +70,6 @@ func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTrip ) } } - rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds()) case <-s.StopNotify(): return diff --git a/etcd/etcdserver/api/rafthttp/raft_stream_api.go b/etcd/etcdserver/api/rafthttp/raft_stream_api.go new file mode 100644 index 00000000000..dca157f474e --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/raft_stream_api.go @@ -0,0 +1,140 @@ +package rafthttp + +import ( + "net/http" + "path" + "strings" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + + "go.uber.org/zap" +) + +type streamHandler struct { + lg *zap.Logger + tr *Transport + peerGetter peerGetter + r Raft + id types.ID + cid types.ID +} + +func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { + h := &streamHandler{ + lg: t.Logger, + tr: t, + peerGetter: pg, + r: r, + id: id, + cid: cid, + } + if h.lg == nil { + h.lg = zap.NewNop() + } + return h +} + +// 添加远端节点的一个通信地址 +func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.Header().Set("Allow", "GET") + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Server-Version", version.Version) + w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) + + if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil { + http.Error(w, err.Error(), http.StatusPreconditionFailed) // 状态 前提条件未通过 + return + } + + var t streamType + switch path.Dir(r.URL.Path) { + case streamTypeMsgAppV2.endpoint(h.lg): // /raft/stream/msgappv2 + t = streamTypeMsgAppV2 + case streamTypeMessage.endpoint(h.lg): // /raft/stream/message + t = streamTypeMessage + default: + h.lg.Debug("忽略意外的流请求路径", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("path", r.URL.Path), + ) + http.Error(w, "无效的路径", http.StatusNotFound) + return + } + + fromStr := path.Base(r.URL.Path) + from, err := types.IDFromString(fromStr) + if err != nil { + h.lg.Warn( + "无法将路径解析为ID", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("path", fromStr), + zap.Error(err), + ) + http.Error(w, "invalid from", http.StatusNotFound) + return + } + if h.r.IsIDRemoved(uint64(from)) { + h.lg.Warn( + "拒绝流,该节点已被移除", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-from", from.String()), + ) + http.Error(w, "该节点已被移除", http.StatusGone) + return + } + p := h.peerGetter.Get(from) + if p == nil { + // 这可能发生在以下情况: + // 1.用户启动的远端节点属于不同的集群,且集群ID相同. + // 2. 本地etcd落后于集群,无法识别在当前进度之后加入的成员. + if urls := r.Header.Get("X-PeerURLs"); urls != "" { + h.tr.AddRemote(from, strings.Split(urls, ",")) + } + h.lg.Warn( + "在集群中没有找到远端节点", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-from", from.String()), + zap.String("cluster-id", h.cid.String()), + ) + http.Error(w, "发送方没有发现该节点", http.StatusNotFound) + return + } + + wto := h.id.String() + if gto := r.Header.Get("X-Raft-To"); gto != wto { + h.lg.Warn( + "忽略流请求; ID 不匹配", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-header", gto), + zap.String("remote-peer-id-from", from.String()), + zap.String("cluster-id", h.cid.String()), + ) + http.Error(w, "to field mismatch", http.StatusPreconditionFailed) + return + } + /* 这个地方需要注意一下,此处并没有包把应答报文发出去,但是具体处理逻辑需要参考net/http中Flush */ + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + c := newCloseNotifier() + conn := &outgoingConn{ + t: t, // 连接类型 + Writer: w, + Flusher: w.(http.Flusher), + Closer: c, + localID: h.tr.ID, + peerID: from, + } + p.attachOutgoingConn(conn) // 会发streamWriter run中connc操作 用于 + <-c.closeNotify() // 等待close channel,若一直没数据可读则阻塞 +} diff --git a/server/etcdserver/api/rafthttp/remote.go b/etcd/etcdserver/api/rafthttp/remote.go similarity index 91% rename from server/etcdserver/api/rafthttp/remote.go rename to etcd/etcdserver/api/rafthttp/remote.go index f40acbb9802..9b9afada521 100644 --- a/server/etcdserver/api/rafthttp/remote.go +++ b/etcd/etcdserver/api/rafthttp/remote.go @@ -15,8 +15,8 @@ package rafthttp import ( - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/raft/raftpb" "go.uber.org/zap" ) @@ -30,8 +30,8 @@ type remote struct { } func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote { - picker := newURLPicker(urls) - status := newPeerStatus(tr.Logger, tr.ID, id) + picker := newURLPicker(urls) // ok + status := newPeerStatus(tr.Logger, tr.ID, id) // ok pipeline := &pipeline{ peerID: id, tr: tr, @@ -78,7 +78,6 @@ func (g *remote) send(m raftpb.Message) { ) } } - sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() } } diff --git a/server/etcdserver/api/rafthttp/snapshot_sender.go b/etcd/etcdserver/api/rafthttp/snapshot_sender.go similarity index 86% rename from server/etcdserver/api/rafthttp/snapshot_sender.go rename to etcd/etcdserver/api/rafthttp/snapshot_sender.go index 9b98474fe00..65d29504fc2 100644 --- a/server/etcdserver/api/rafthttp/snapshot_sender.go +++ b/etcd/etcdserver/api/rafthttp/snapshot_sender.go @@ -18,23 +18,23 @@ import ( "bytes" "context" "io" + "io/ioutil" "net/http" "time" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/httputil" - pioutil "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3" - "github.com/dustin/go-humanize" + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/pkg/httputil" + pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil" + "go.uber.org/zap" ) -var ( - // timeout for reading snapshot response body - snapResponseReadTimeout = 5 * time.Second -) +// timeout for reading snapshot response body +var snapResponseReadTimeout = 5 * time.Second type snapshotSender struct { from, to types.ID @@ -66,8 +66,6 @@ func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *pe func (s *snapshotSender) stop() { close(s.stopc) } func (s *snapshotSender) send(merged snap.Message) { - start := time.Now() - m := merged.Message to := types.ID(m.To).String() @@ -89,11 +87,6 @@ func (s *snapshotSender) send(merged snap.Message) { ) } - snapshotSendInflights.WithLabelValues(to).Inc() - defer func() { - snapshotSendInflights.WithLabelValues(to).Dec() - }() - err := s.post(req) defer merged.CloseWithError(err) if err != nil { @@ -121,8 +114,6 @@ func (s *snapshotSender) send(merged snap.Message) { // machine knows about it, it would pause a while and retry sending // new snapshot message. s.r.ReportSnapshot(m.To, raft.SnapshotFailure) - sentFailures.WithLabelValues(to).Inc() - snapshotSendFailures.WithLabelValues(to).Inc() return } s.status.activate() @@ -137,10 +128,6 @@ func (s *snapshotSender) send(merged snap.Message) { zap.String("size", snapshotSize), ) } - - sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize)) - snapshotSend.WithLabelValues(to).Inc() - snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds()) } // post posts the given request. @@ -168,7 +155,7 @@ func (s *snapshotSender) post(req *http.Request) (err error) { // prevents from reading the body forever when the other side dies right after // successfully receives the request body. time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) }) - body, err := io.ReadAll(resp.Body) + body, err := ioutil.ReadAll(resp.Body) result <- responseAndError{resp, body, err} }() diff --git a/server/etcdserver/api/rafthttp/stream.go b/etcd/etcdserver/api/rafthttp/stream.go similarity index 81% rename from server/etcdserver/api/rafthttp/stream.go rename to etcd/etcdserver/api/rafthttp/stream.go index c8a1f1fb5ea..3dcdfd85ba4 100644 --- a/server/etcdserver/api/rafthttp/stream.go +++ b/etcd/etcdserver/api/rafthttp/stream.go @@ -18,18 +18,19 @@ import ( "context" "fmt" "io" + "io/ioutil" "net/http" "path" "strings" "sync" "time" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/httputil" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "github.com/ls-2018/etcd_cn/pkg/httputil" + "github.com/ls-2018/etcd_cn/raft/raftpb" "github.com/coreos/go-semver/semver" "go.uber.org/zap" @@ -58,7 +59,6 @@ var ( "3.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.4.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.5.0": {streamTypeMsgAppV2, streamTypeMessage}, - "3.6.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -66,13 +66,13 @@ type streamType string func (t streamType) endpoint(lg *zap.Logger) string { switch t { - case streamTypeMsgAppV2: + case streamTypeMsgAppV2: // /raft/stream/msgappv2 return path.Join(RaftStreamPrefix, "msgapp") - case streamTypeMessage: + case streamTypeMessage: // /raft/stream/message return path.Join(RaftStreamPrefix, "message") default: if lg != nil { - lg.Panic("unhandled stream type", zap.String("stream-type", t.String())) + lg.Panic("无法处理的路由", zap.String("stream-type", t.String())) } return "" } @@ -89,19 +89,17 @@ func (t streamType) String() string { } } -var ( - // linkHeartbeatMessage is a special message used as heartbeat message in - // link layer. It never conflicts with messages from raft because raft - // doesn't send out messages without From and To fields. - linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat} -) +// linkHeartbeatMessage is a special message used as heartbeat message in +// link layer. It never conflicts with messages from raft because raft +// doesn't send out messages without From and To fields. +var linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat} func isLinkHeartbeatMessage(m *raftpb.Message) bool { return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0 } type outgoingConn struct { - t streamType + t streamType // 连接类型 io.Writer http.Flusher io.Closer @@ -154,12 +152,12 @@ func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, f func (cw *streamWriter) run() { var ( - msgc chan raftpb.Message - heartbeatc <-chan time.Time - t streamType - enc encoder - flusher http.Flusher - batched int + msgc chan raftpb.Message // 指向当前streamWriter. msgc字段 + heartbeatc <-chan time.Time // 定时器会定时向该通道发送信号, 触发心跳消息的发送,该心跳消息与后面介绍的Raft的心跳消息有所不同,该心跳消息的主要目的是为了防止连接长时间不用断升的 + t streamType // 用来记录消息的版本信息 + enc encoder // 编码器,负责将消息序列化并写入连接的缓冲区 + flusher http.Flusher // 负责刷新底层连接,将数据真正发送出去 + batched int // 当前未Flush的消息个数 ) tickc := time.NewTicker(ConnReadTimeout / 3) defer tickc.Stop() @@ -176,19 +174,18 @@ func (cw *streamWriter) run() { for { select { case <-heartbeatc: + // 不是raft心跳消息,是为了防止链接超时 err := enc.encode(&linkHeartbeatMessage) unflushed += linkHeartbeatMessage.Size() if err == nil { flusher.Flush() batched = 0 - sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) unflushed = 0 continue } cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error()) - sentFailures.WithLabelValues(cw.peerID.String()).Inc() cw.close() if cw.lg != nil { cw.lg.Warn( @@ -201,24 +198,23 @@ func (cw *streamWriter) run() { heartbeatc, msgc = nil, nil case m := <-msgc: - err := enc.encode(&m) + err := enc.encode(&m) // 格式化消息,如选举消息 if err == nil { unflushed += m.Size() - - if len(msgc) == 0 || batched > streamBufSize/2 { - flusher.Flush() - sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) + // msgc通道中的消息全部发送完成或是未Flush的消息较多,则触发Flush,否则只是递增batched变量 + if len(msgc) == 0 || batched > streamBufSize/2 { // batched批处理 streamBufSize全局变量 4096 + flusher.Flush() // 刷新缓冲区,发送到对端.Flush代码为net/http模块 unflushed = 0 batched = 0 } else { batched++ } - + // 发送完成就返回上层 并没有结束会话 continue } cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error()) - cw.close() + cw.close() // 表示本次收发消息结束 即http会话结束 if cw.lg != nil { cw.lg.Warn( "lost TCP streaming connection with remote peer", @@ -229,9 +225,8 @@ func (cw *streamWriter) run() { } heartbeatc, msgc = nil, nil cw.r.ReportUnreachable(m.To) - sentFailures.WithLabelValues(cw.peerID.String()).Inc() - case conn := <-cw.connc: + case conn := <-cw.connc: // 从channel读取conn对象,表示会话已经建立 cw.mu.Lock() closed := cw.closeUnlocked() t = conn.t @@ -253,17 +248,17 @@ func (cw *streamWriter) run() { zap.String("stream-type", t.String()), ) } - flusher = conn.Flusher + flusher = conn.Flusher // 记录底层连接对应的Flusher unflushed = 0 - cw.status.activate() - cw.closer = conn.Closer - cw.working = true + cw.status.activate() // peerStatus.activeit直为true + cw.closer = conn.Closer // 记录底层连接对应的Flusher + cw.working = true // 标识当前streamWriter正在运行 cw.mu.Unlock() if closed { if cw.lg != nil { cw.lg.Warn( - "closed TCP streaming connection with remote peer", + "关闭与远端节点的TCP链接", zap.String("stream-writer-type", t.String()), zap.String("local-member-id", cw.localID.String()), zap.String("remote-peer-id", cw.peerID.String()), @@ -272,19 +267,19 @@ func (cw *streamWriter) run() { } if cw.lg != nil { cw.lg.Info( - "established TCP streaming connection with remote peer", + "与远端节点建立了TCP链接", zap.String("stream-writer-type", t.String()), zap.String("local-member-id", cw.localID.String()), zap.String("remote-peer-id", cw.peerID.String()), ) } - heartbeatc, msgc = tickc.C, cw.msgc + heartbeatc, msgc = tickc.C, cw.msgc // 保存心跳和message的通道 case <-cw.stopc: if cw.close() { if cw.lg != nil { cw.lg.Warn( - "closed TCP streaming connection with remote peer", + "关闭与远端节点的TCP链接", zap.String("stream-writer-type", t.String()), zap.String("remote-peer-id", cw.peerID.String()), ) @@ -292,7 +287,7 @@ func (cw *streamWriter) run() { } if cw.lg != nil { cw.lg.Info( - "stopped TCP streaming connection with remote peer", + "停止与远端节点的TCP链接", zap.String("stream-writer-type", t.String()), zap.String("remote-peer-id", cw.peerID.String()), ) @@ -389,11 +384,10 @@ func (cr *streamReader) start() { } func (cr *streamReader) run() { - t := cr.typ + t := cr.typ // msgappv2 if cr.lg != nil { - cr.lg.Info( - "started stream reader with remote peer", + cr.lg.Info("开始与远程节点进行流式阅读", zap.String("stream-reader-type", t.String()), zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String()), @@ -409,27 +403,16 @@ func (cr *streamReader) run() { } else { cr.status.activate() if cr.lg != nil { - cr.lg.Info( - "established TCP streaming connection with remote peer", - zap.String("stream-reader-type", cr.typ.String()), - zap.String("local-member-id", cr.tr.ID.String()), - zap.String("remote-peer-id", cr.peerID.String()), - ) + cr.lg.Info("已建立的TCP流媒体连接与远程节点", zap.String("stream-reader-type", cr.typ.String()), zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String())) } err = cr.decodeLoop(rc, t) if cr.lg != nil { - cr.lg.Warn( - "lost TCP streaming connection with remote peer", - zap.String("stream-reader-type", cr.typ.String()), - zap.String("local-member-id", cr.tr.ID.String()), - zap.String("remote-peer-id", cr.peerID.String()), - zap.Error(err), - ) + cr.lg.Warn("丢失TCP流媒体连接与远程节点", zap.String("stream-reader-type", cr.typ.String()), zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String()), zap.Error(err)) } switch { - // all data is read out + // 读取了所有数据 case err == io.EOF: - // connection is closed by the remote + // 远端节点关闭了链接 case transport.IsClosedConnError(err): default: cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) @@ -490,7 +473,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { // gofail: labelRaftDropHeartbeat: for { - m, err := dec.decode() + m, err := dec.decode() // 阻塞等待消息 if err != nil { cr.mu.Lock() cr.close() @@ -500,7 +483,6 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { // gofail-go: var raftDropHeartbeat struct{} // continue labelRaftDropHeartbeat - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) cr.mu.Lock() paused := cr.paused @@ -523,7 +505,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { } select { - case recvc <- m: + case recvc <- m: // 将消息写到channel中 channel另外一段是rafthttp/peer.go startPeer default: if cr.status.isActive() { if cr.lg != nil { @@ -548,7 +530,6 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { ) } } - recvFailures.WithLabelValues(types.ID(m.From).String()).Inc() } } } @@ -628,7 +609,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) case http.StatusPreconditionFailed: - b, err := io.ReadAll(resp.Body) + b, err := ioutil.ReadAll(resp.Body) if err != nil { cr.picker.unreachable(u) return nil, err @@ -640,7 +621,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { case errIncompatibleVersion.Error(): if cr.lg != nil { cr.lg.Warn( - "request sent was ignored by remote peer due to server version incompatibility", + "request sent was ignored by remote peer due to etcd version incompatibility", zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String()), zap.Error(errIncompatibleVersion), diff --git a/etcd/etcdserver/api/rafthttp/transport_api.go b/etcd/etcdserver/api/rafthttp/transport_api.go new file mode 100644 index 00000000000..f8e2811f0a0 --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/transport_api.go @@ -0,0 +1,421 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "context" + "net/http" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "github.com/xiang90/probing" + "go.uber.org/zap" + "golang.org/x/time/rate" +) + +type Raft interface { + Process(ctx context.Context, m raftpb.Message) error + IsIDRemoved(id uint64) bool + ReportUnreachable(id uint64) + ReportSnapshot(id uint64, status raft.SnapshotStatus) +} + +type Transporter interface { + // Start starts the given Transporter. + // Start必须是called before calling other functions in the interface. + Start() error + Handler() http.Handler + // Send sends out the given messages to the remote peers. + // Each message has a To field, which is an id that maps + // to an existing peer in the transport. + // If the id cannot be found in the transport, the message + // will be ignored. + Send(m []raftpb.Message) + // SendSnapshot sends out the given snapshot message to a remote peer. + // The behavior of SendSnapshot is similar to Send. + SendSnapshot(m snap.Message) + AddRemote(id types.ID, urls []string) + AddPeer(id types.ID, urls []string) // 链接远端的节点 + RemovePeer(id types.ID) // 移除远端节点的链接 + // RemoveAllPeers removes all the existing peers in the transport. + RemoveAllPeers() + // UpdatePeer updates the peer urls of the peer with the given id. + // It is the caller's responsibility to ensure the urls are all valid, + // or it panics. + UpdatePeer(id types.ID, urls []string) + ActiveSince(id types.ID) time.Time // 返回与给定id的对等体的连接开始活动的时间 + // ActivePeers returns the number of active peers. + ActivePeers() int + // Stop closes the connections and stops the transporter. + Stop() +} + +// Transport implements Transporter interface. It provides the functionality +// to send raft messages to peers, and receive raft messages from peers. +// User should call Handler method to get a handler to serve requests +// received from peerURLs. +// User needs to call Start before calling other functions, and call +// Stop when the Transport is no longer used. +type Transport struct { + Logger *zap.Logger + DialTimeout time.Duration // maximum duration before timing out dial of the request + // DialRetryFrequency defines the frequency of streamReader dial retrial attempts; + // a distinct rate limiter is created per every peer (default value: 10 events/sec) + DialRetryFrequency rate.Limit + TLSInfo transport.TLSInfo // TLS information used when creating connection + ID types.ID // 本节点ID + URLs types.URLs // local peer URLs + ClusterID types.ID // 集群标识符 + Raft Raft // raft状态机,Transport向其转发收到的信息并报告状态. + Snapshotter *snap.Snapshotter + ServerStats *stats.ServerStats // used to record general transportation statistics + // used to record transportation statistics with followers when + // performing as leader in raft protocol + LeaderStats *stats.LeaderStats + // ErrorC is used to report detected critical errors, e.g., + // the member has been permanently removed from the cluster + // When an error is received from ErrorC, user should stop raft state + // machine and thus stop the Transport. + ErrorC chan error + streamRt http.RoundTripper //( http.RoundTripper类型): Stream消息通道中使用的http. RoundTripper实例. + pipelineRt http.RoundTripper // ( http.RoundTripper 类型):Pipeline 消息通道中使用的http.RoundTripper实例 + mu sync.RWMutex // protect the remote and peer map + peers map[types.ID]Peer + remotes map[types.ID]*remote // 类型): remote 中只封装了pipeline 实例,remote主要负责发送快照数据,帮助新加入的节点快速追赶上其他节点的数据. + pipelineProber probing.Prober + streamProber probing.Prober +} + +func (t *Transport) Start() error { + var err error + t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout) + if err != nil { + return err + } + t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout) + if err != nil { + return err + } + t.remotes = make(map[types.ID]*remote) + t.peers = make(map[types.ID]Peer) + t.pipelineProber = probing.NewProber(t.pipelineRt) + t.streamProber = probing.NewProber(t.streamRt) + + // If client didn't provide dial retry frequency, use the default + // (100ms backoff between attempts to create a new stream), + // so it doesn't bring too much overhead when retry. + if t.DialRetryFrequency == 0 { + t.DialRetryFrequency = rate.Every(100 * time.Millisecond) + } + return nil +} + +// Handler ✅ +func (t *Transport) Handler() http.Handler { + //_ = etcdserver.EtcdServer{} // loop import + pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID) + streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID) + snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID) + mux := http.NewServeMux() + mux.Handle(RaftPrefix, pipelineHandler) // /raft + mux.Handle(RaftStreamPrefix+"/", streamHandler) // /raft/stream/ + mux.Handle(RaftSnapshotPrefix, snapHandler) // /raft/snapshot // ✅ + mux.Handle(ProbingPrefix, probing.NewHandler()) // /raft/probing // ✅ + return mux +} + +func (t *Transport) Get(id types.ID) Peer { + t.mu.RLock() + defer t.mu.RUnlock() + return t.peers[id] +} + +// Send ok +func (t *Transport) Send(msgs []raftpb.Message) { + for _, m := range msgs { + if m.To == 0 { + // 忽略故意丢弃的消息 + continue + } + to := types.ID(m.To) + + t.mu.RLock() + p, pok := t.peers[to] + g, rok := t.remotes[to] + t.mu.RUnlock() + + if pok { + if m.Type == raftpb.MsgApp { + t.ServerStats.SendAppendReq(m.Size()) + } + p.send(m) + continue + } + if rok { + g.send(m) + continue + } + if t.Logger != nil { + t.Logger.Debug( + "忽略消息发送请求;未知远程对等目标", + zap.String("type", m.Type.String()), + zap.String("unknown-target-peer-id", to.String()), + ) + } + } +} + +func (t *Transport) Stop() { + t.mu.Lock() + defer t.mu.Unlock() + for _, r := range t.remotes { + r.stop() + } + for _, p := range t.peers { + p.stop() + } + t.pipelineProber.RemoveAll() + t.streamProber.RemoveAll() + if tr, ok := t.streamRt.(*http.Transport); ok { + tr.CloseIdleConnections() + } + if tr, ok := t.pipelineRt.(*http.Transport); ok { + tr.CloseIdleConnections() + } + t.peers = nil + t.remotes = nil +} + +// CutPeer drops messages to the specified peer. +func (t *Transport) CutPeer(id types.ID) { + t.mu.RLock() + p, pok := t.peers[id] + g, gok := t.remotes[id] + t.mu.RUnlock() + + if pok { + p.(Pausable).Pause() + } + if gok { + g.Pause() + } +} + +// MendPeer recovers the message dropping behavior of the given peer. +func (t *Transport) MendPeer(id types.ID) { + t.mu.RLock() + p, pok := t.peers[id] + g, gok := t.remotes[id] + t.mu.RUnlock() + + if pok { + p.(Pausable).Resume() + } + if gok { + g.Resume() + } +} + +// AddRemote 添加远程节点 +func (t *Transport) AddRemote(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.remotes == nil { + // TODO there's no clean way to shutdown the golang http etcd + // (see: https://github.com/golang/go/issues/4674) before + // stopping the transport; ignore any new connections. + return + } + if _, ok := t.peers[id]; ok { + return // 存在 + } + if _, ok := t.remotes[id]; ok { + return // 存在 + } + urls, err := types.NewURLs(us) + if err != nil { + if t.Logger != nil { + t.Logger.Panic("失败 NewURLs", zap.Strings("urls", us), zap.Error(err)) + } + } + t.remotes[id] = startRemote(t, urls, id) + + if t.Logger != nil { + t.Logger.Info("添加一个远端节点的通信地址", + zap.String("local-member-id", t.ID.String()), + zap.String("remote-peer-id", id.String()), + zap.Strings("remote-peer-urls", us), + ) + } +} + +// AddPeer 添加伙伴节点 +func (t *Transport) AddPeer(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.peers == nil { + panic("transport stopped") + } + if _, ok := t.peers[id]; ok { + return + } + urls, err := types.NewURLs(us) + if err != nil { + if t.Logger != nil { + t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) + } + } + fs := t.LeaderStats.Follower(id.String()) + t.peers[id] = startPeer(t, urls, id, fs) + + if t.Logger != nil { + t.Logger.Info( + "added remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("remote-peer-id", id.String()), + zap.Strings("remote-peer-urls", us), + ) + } +} + +func (t *Transport) RemovePeer(id types.ID) { + t.mu.Lock() + defer t.mu.Unlock() + t.removePeer(id) +} + +func (t *Transport) RemoveAllPeers() { + t.mu.Lock() + defer t.mu.Unlock() + for id := range t.peers { + t.removePeer(id) + } +} + +// the caller of this function must have the peers mutex. +func (t *Transport) removePeer(id types.ID) { + if peer, ok := t.peers[id]; ok { + peer.stop() + } else { + if t.Logger != nil { + t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String())) + } + } + delete(t.peers, id) + delete(t.LeaderStats.Followers, id.String()) + t.pipelineProber.Remove(id.String()) + t.streamProber.Remove(id.String()) + + if t.Logger != nil { + t.Logger.Info( + "removed remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } +} + +func (t *Transport) UpdatePeer(id types.ID, us []string) { + t.mu.Lock() + defer t.mu.Unlock() + // TODO: return error or just panic? + if _, ok := t.peers[id]; !ok { + return + } + urls, err := types.NewURLs(us) + if err != nil { + if t.Logger != nil { + t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) + } + } + t.peers[id].update(urls) + + t.pipelineProber.Remove(id.String()) + t.streamProber.Remove(id.String()) + + if t.Logger != nil { + t.Logger.Info( + "updated remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", us), + ) + } +} + +func (t *Transport) ActiveSince(id types.ID) time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if p, ok := t.peers[id]; ok { + return p.activeSince() + } + return time.Time{} +} + +func (t *Transport) SendSnapshot(m snap.Message) { + t.mu.Lock() + defer t.mu.Unlock() + p := t.peers[types.ID(m.To)] + if p == nil { + m.CloseWithError(errMemberNotFound) + return + } + p.sendSnap(m) +} + +// Pausable is a testing interface for pausing transport traffic. +type Pausable interface { + Pause() + Resume() +} + +func (t *Transport) Pause() { + t.mu.RLock() + defer t.mu.RUnlock() + for _, p := range t.peers { + p.(Pausable).Pause() + } +} + +func (t *Transport) Resume() { + t.mu.RLock() + defer t.mu.RUnlock() + for _, p := range t.peers { + p.(Pausable).Resume() + } +} + +// ActivePeers returns a channel that closes when an initial +// peer connection has been established. Use this to wait until the +// first peer connection becomes active. +func (t *Transport) ActivePeers() (cnt int) { + t.mu.RLock() + defer t.mu.RUnlock() + for _, p := range t.peers { + if !p.activeSince().IsZero() { + cnt++ + } + } + return cnt +} diff --git a/server/etcdserver/api/rafthttp/urlpick.go b/etcd/etcdserver/api/rafthttp/urlpick.go similarity index 96% rename from server/etcdserver/api/rafthttp/urlpick.go rename to etcd/etcdserver/api/rafthttp/urlpick.go index fc6054a78ab..659aa3d675f 100644 --- a/server/etcdserver/api/rafthttp/urlpick.go +++ b/etcd/etcdserver/api/rafthttp/urlpick.go @@ -18,7 +18,7 @@ import ( "net/url" "sync" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" ) type urlPicker struct { diff --git a/etcd/etcdserver/api/rafthttp/util.go b/etcd/etcdserver/api/rafthttp/util.go new file mode 100644 index 00000000000..36454886a4d --- /dev/null +++ b/etcd/etcdserver/api/rafthttp/util.go @@ -0,0 +1,193 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rafthttp + +import ( + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +var ( + errMemberRemoved = fmt.Errorf("成员已经从集群中移除") + errMemberNotFound = fmt.Errorf("成员没有找到") +) + +// NewListener returns a listener for raft message transfer between peers. +// It uses timeout listener to identify broken streams promptly. +func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { + return transport.NewListenerWithOpts(u.Host, u.Scheme, transport.WithTLSInfo(tlsinfo), transport.WithTimeout(ConnReadTimeout, ConnWriteTimeout)) +} + +// NewRoundTripper 返回一个roundTripper,用于向远程peer的rafthttp监听器发送请求. +func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { + // 它使用超时传输,与远程超时listeners 配对.它没有设置读/写超时,因为请求中的信息在读出响应之前可能需要很长的时间来写出来. + return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0) +} + +// newStreamRoundTripper returns a roundTripper used to send stream requests +// to rafthttp listener of remote peers. +// Read/write timeout is set for stream roundTripper to promptly +// find out broken status, which minimizes the number of messages +// sent on broken connection. +func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { + return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) +} + +// createPostRequest creates a HTTP POST request that sends raft message. +func createPostRequest(lg *zap.Logger, u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { + uu := u + uu.Path = path + req, err := http.NewRequest("POST", uu.String(), body) + if err != nil { + if lg != nil { + lg.Panic("unexpected new request error", zap.Error(err)) + } + } + req.Header.Set("Content-Type", ct) + req.Header.Set("X-Server-From", from.String()) + req.Header.Set("X-Server-Version", version.Version) + req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) + req.Header.Set("X-Etcd-Cluster-ID", cid.String()) + setPeerURLsHeader(req, urls) + + return req +} + +// checkPostResponse checks the response of the HTTP POST request that sends +// raft message. +func checkPostResponse(lg *zap.Logger, resp *http.Response, body []byte, req *http.Request, to types.ID) error { + switch resp.StatusCode { + case http.StatusPreconditionFailed: + switch strings.TrimSuffix(string(body), "\n") { + case errIncompatibleVersion.Error(): + if lg != nil { + lg.Error( + "request sent was ignored by peer", + zap.String("remote-peer-id", to.String()), + ) + } + return errIncompatibleVersion + case errClusterIDMismatch.Error(): + if lg != nil { + lg.Error( + "request sent was ignored due to cluster ID mismatch", + zap.String("remote-peer-id", to.String()), + zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")), + zap.String("local-member-cluster-id", req.Header.Get("X-Etcd-Cluster-ID")), + ) + } + return errClusterIDMismatch + default: + return fmt.Errorf("unhandled error %q when precondition failed", string(body)) + } + case http.StatusForbidden: + return errMemberRemoved + case http.StatusNoContent: + return nil + default: + return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String()) + } +} + +// reportCriticalError reports the given error through sending it into +// the given error channel. +// If the error channel is filled up when sending error, it drops the error +// because the fact that error has happened is reported, which is +// good enough. +func reportCriticalError(err error, errc chan<- error) { + select { + case errc <- err: + default: + } +} + +// setPeerURLsHeader reports local urls for peer discovery +func setPeerURLsHeader(req *http.Request, urls types.URLs) { + if urls == nil { + // often not set in unit tests + return + } + peerURLs := make([]string, urls.Len()) + for i := range urls { + peerURLs[i] = urls[i].String() + } + req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ",")) +} + +// ----------------------------------------- OVER ---------------------------------------------------- + +// addRemoteFromRequest 根据http请求头添加一个远程对等体 +func addRemoteFromRequest(tr Transporter, r *http.Request) { + if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil { + if urls := r.Header.Get("X-PeerURLs"); urls != "" { + tr.AddRemote(from, strings.Split(urls, ",")) + } + } +} + +func serverVersion(h http.Header) *semver.Version { + verStr := h.Get("X-Server-Version") + if verStr == "" { + verStr = "2.0.0" + } + return semver.Must(semver.NewVersion(verStr)) +} + +func minClusterVersion(h http.Header) *semver.Version { + verStr := h.Get("X-Min-Cluster-Version") + if verStr == "" { + verStr = "2.0.0" + } + return semver.Must(semver.NewVersion(verStr)) +} + +// compareMajorMinorVersion 比较两个版本 +func compareMajorMinorVersion(a, b *semver.Version) int { + na := &semver.Version{Major: a.Major, Minor: a.Minor} + nb := &semver.Version{Major: b.Major, Minor: b.Minor} + switch { + case na.LessThan(*nb): + return -1 + case nb.LessThan(*na): + return 1 + default: + return 0 + } +} + +// checkVersionCompatibility 检查给定的版本是否与本地的版本兼容 +func checkVersionCompatibility(name string, server, minCluster *semver.Version) (localServer *semver.Version, localMinCluster *semver.Version, err error) { + localServer = semver.Must(semver.NewVersion(version.Version)) + localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion)) + if compareMajorMinorVersion(server, localMinCluster) == -1 { + return localServer, localMinCluster, fmt.Errorf("远端版本太低: remote[%s]=%s, local=%s", name, server, localServer) + } + if compareMajorMinorVersion(minCluster, localServer) == 1 { + return localServer, localMinCluster, fmt.Errorf("本地版本太低: remote[%s]=%s, local=%s", name, server, localServer) + } + return localServer, localMinCluster, nil +} diff --git a/etcd/etcdserver/api/snap/doc.go b/etcd/etcdserver/api/snap/doc.go new file mode 100644 index 00000000000..adad91d742c --- /dev/null +++ b/etcd/etcdserver/api/snap/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package snap handles Raft nodes' states with snapshots. +// The snapshot logic is internal to etcd etcd and raft package. +package snap diff --git a/server/etcdserver/api/snap/message.go b/etcd/etcdserver/api/snap/message.go similarity index 88% rename from server/etcdserver/api/snap/message.go rename to etcd/etcdserver/api/snap/message.go index 2b4090c981d..0da6c10a19c 100644 --- a/server/etcdserver/api/snap/message.go +++ b/etcd/etcdserver/api/snap/message.go @@ -17,18 +17,19 @@ package snap import ( "io" - "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/pkg/ioutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" ) // Message is a struct that contains a raft Message and a ReadCloser. The type -// of raft message MUST be MsgSnap, which contains the raft meta-data and an +// of raft message必须是MsgSnap, which contains the raft meta-data and an // additional data []byte field that contains the snapshot of the actual state // machine. // Message contains the ReadCloser field for handling large snapshot. This avoid // copying the entire snapshot into a byte array, which consumes a lot of memory. // // User of Message should close the Message after sending it. +// Message是所有消息的抽象,包括了各种类型消息所需要的字段 type Message struct { raftpb.Message ReadCloser io.ReadCloser diff --git a/etcd/etcdserver/api/snap/over_db.go b/etcd/etcdserver/api/snap/over_db.go new file mode 100644 index 00000000000..e3a26980a93 --- /dev/null +++ b/etcd/etcdserver/api/snap/over_db.go @@ -0,0 +1,92 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" +) + +var ErrNoDBSnapshot = errors.New("snap: 快照文件不存在") + +// SaveDBFrom 从给定的reader中保存数据库的快照.它保证 save操作是原子性的. +func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { + f, err := ioutil.TempFile(s.dir, "tmp") + if err != nil { + return 0, err + } + var n int64 + n, err = io.Copy(f, r) + if err == nil { + err = fileutil.Fsync(f) + } + f.Close() + if err != nil { + os.Remove(f.Name()) + return n, err + } + fn := s.dbFilePath(id) + if fileutil.Exist(fn) { + os.Remove(f.Name()) + return n, nil + } + err = os.Rename(f.Name(), fn) + if err != nil { + os.Remove(f.Name()) + return n, err + } + + s.lg.Info( + "保存快照到硬盘", + zap.String("path", fn), + zap.Int64("bytes", n), + zap.String("size", humanize.Bytes(uint64(n))), + ) + + return n, nil +} + +// DBFilePath 返回给定id的数据库快照的文件路径.如果该快照不存在,则返回错误. +func (s *Snapshotter) DBFilePath(id uint64) (string, error) { + if _, err := fileutil.ReadDir(s.dir); err != nil { + return "", err + } + fn := s.dbFilePath(id) + if fileutil.Exist(fn) { + return fn, nil + } + if s.lg != nil { + s.lg.Warn( + "查找快照失败 [SNAPSHOT-INDEX].snap.db", + zap.Uint64("snapshot-index", id), + zap.String("snapshot-file-path", fn), + zap.Error(ErrNoDBSnapshot), + ) + } + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) +} diff --git a/etcd/etcdserver/api/snap/snappb/over.go b/etcd/etcdserver/api/snap/snappb/over.go new file mode 100644 index 00000000000..a048b69e0ca --- /dev/null +++ b/etcd/etcdserver/api/snap/snappb/over.go @@ -0,0 +1,30 @@ +package snappb + +import ( + "encoding/json" +) + +type temp struct { + Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` + Data string `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + t := temp{ + Crc: m.Crc, + Data: string(m.Data), + } + return json.Marshal(t) +} + +func (m *Snapshot) Unmarshal(dAtA []byte) error { + t := temp{ + Crc: m.Crc, + Data: string(m.Data), + } + + err := json.Unmarshal(dAtA, m) + m.Crc = t.Crc + m.Data = []byte(t.Data) + return err +} diff --git a/etcd/etcdserver/api/snap/snappb/snap.pb.go b/etcd/etcdserver/api/snap/snappb/snap.pb.go new file mode 100644 index 00000000000..ad51a8f9c20 --- /dev/null +++ b/etcd/etcdserver/api/snap/snappb/snap.pb.go @@ -0,0 +1,73 @@ +// Code generated by protoc-gen-gogo. +// source: snap.proto + +package snappb + +import ( + "encoding/json" + fmt "fmt" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Snapshot struct { + Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_f2e3c045ebf84d00, []int{0} +} + +func init() { + proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") +} + +func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) } + +var fileDescriptor_f2e3c045ebf84d00 = []byte{ + // 126 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, + 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, + 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, + 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, + 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, + 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, + 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, +} + +func (m *Snapshot) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func sovSnap(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} + +var ( + ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSnap = fmt.Errorf("proto: unexpected end of group") +) diff --git a/server/etcdserver/api/snap/snappb/snap.proto b/etcd/etcdserver/api/snap/snappb/snap.proto similarity index 100% rename from server/etcdserver/api/snap/snappb/snap.proto rename to etcd/etcdserver/api/snap/snappb/snap.proto diff --git a/etcd/etcdserver/api/snap/snapshotter.go b/etcd/etcdserver/api/snap/snapshotter.go new file mode 100644 index 00000000000..9490a261f55 --- /dev/null +++ b/etcd/etcdserver/api/snap/snapshotter.go @@ -0,0 +1,296 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap/snappb" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" +) + +const snapSuffix = ".snap" + +var ( + ErrNoSnapshot = errors.New("snap: no available snapshot") + ErrEmptySnapshot = errors.New("snap: empty snapshot") + ErrCRCMismatch = errors.New("snap: crc mismatch") + crcTable = crc32.MakeTable(crc32.Castagnoli) + + // 一个可以出现在snap文件夹中的有效文件的映射. + validFiles = map[string]bool{ + "db": true, + } +) + +// Snapshotter 快照管理器 +type Snapshotter struct { + lg *zap.Logger + dir string +} + +func New(lg *zap.Logger, dir string) *Snapshotter { + if lg == nil { + lg = zap.NewNop() + } + return &Snapshotter{ + lg: lg, + dir: dir, + } +} + +func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { + if raft.IsEmptySnap(snapshot) { + return nil + } + return s.save(&snapshot) +} + +// 保存一个快照 +func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { + fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) + b := pbutil.MustMarshal(snapshot) + crc := crc32.Update(0, crcTable, b) + snap := snappb.Snapshot{Crc: crc, Data: b} + d, err := snap.Marshal() + if err != nil { + return err + } + + spath := filepath.Join(s.dir, fname) + err = pioutil.WriteAndSyncFile(spath, d, 0o666) + + if err != nil { + s.lg.Warn("写快照文件失败", zap.String("path", spath), zap.Error(err)) + rerr := os.Remove(spath) + if rerr != nil { + s.lg.Warn("删除损坏的snap文件失败", zap.String("path", spath), zap.Error(err)) + } + return err + } + + return nil +} + +// Load 返回最新的快照 +func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { + return s.loadMatching(func(*raftpb.Snapshot) bool { return true }) +} + +// LoadNewestAvailable 返回最新的快照 +func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) { + return s.loadMatching(func(snapshot *raftpb.Snapshot) bool { + m := snapshot.Metadata + // 倒着匹配 + // 存在的、wal记录的,寻找最新的快照 + for i := len(walSnaps) - 1; i >= 0; i-- { + if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index { + return true + } + } + return false + }) +} + +// loadMatching 返回最新的快照 +func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) { + names, err := s.snapNames() // 加载快照目录下的快照 + if err != nil { + return nil, err + } + var snap *raftpb.Snapshot + for _, name := range names { + if snap, err = loadSnap(s.lg, s.dir, name); err == nil && matchFn(snap) { + return snap, nil + } + } + return nil, ErrNoSnapshot +} + +// 判断该文件能不能读取 +func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) { + fpath := filepath.Join(dir, name) + snap, err := Read(lg, fpath) + if err != nil { + brokenPath := fpath + ".broken" + if lg != nil { + lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err)) + } + if rerr := os.Rename(fpath, brokenPath); rerr != nil { + if lg != nil { + lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr)) + } + } else { + if lg != nil { + lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath)) + } + } + } + return snap, err +} + +// Read reads the snapshot named by snapname and returns the snapshot. +func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) { + b, err := ioutil.ReadFile(snapname) + if err != nil { + if lg != nil { + lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) + } + return nil, err + } + + if len(b) == 0 { + if lg != nil { + lg.Warn("failed to read empty snapshot file", zap.String("path", snapname)) + } + return nil, ErrEmptySnapshot + } + + var serializedSnap snappb.Snapshot + if err = serializedSnap.Unmarshal(b); err != nil { + if lg != nil { + lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err)) + } + return nil, err + } + + if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { + if lg != nil { + lg.Warn("failed to read empty snapshot data", zap.String("path", snapname)) + } + return nil, ErrEmptySnapshot + } + + crc := crc32.Update(0, crcTable, serializedSnap.Data) + if crc != serializedSnap.Crc { + if lg != nil { + lg.Warn("snap file is corrupt", + zap.String("path", snapname), + zap.Uint32("prev-crc", serializedSnap.Crc), + zap.Uint32("new-crc", crc), + ) + } + return nil, ErrCRCMismatch + } + + var snap raftpb.Snapshot + if err = snap.Unmarshal(serializedSnap.Data); err != nil { + if lg != nil { + lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err)) + } + return nil, err + } + return &snap, nil +} + +// snapNames 返回快照的文件名,按逻辑时间顺序(从最新到最旧).如果没有可用的快照,将返回ErrNoSnapshot. +func (s *Snapshotter) snapNames() ([]string, error) { + dir, err := os.Open(s.dir) // ./raftexample/db/raftexample-1-snap + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + filenames, err := s.cleanupSnapdir(names) // 清除临时快照 + if err != nil { + return nil, err + } + snaps := checkSuffix(s.lg, filenames) + if len(snaps) == 0 { + return nil, ErrNoSnapshot + } + sort.Sort(sort.Reverse(sort.StringSlice(snaps))) + return snaps, nil +} + +// 检查文件名 +func checkSuffix(lg *zap.Logger, names []string) []string { + snaps := []string{} + for i := range names { + if strings.HasSuffix(names[i], snapSuffix) { // ".snap" + snaps = append(snaps, names[i]) + } else { + // 一个可以出现在snap文件夹中的有效文件的映射. + if _, ok := validFiles[names[i]]; !ok { + if lg != nil { + lg.Warn("发现了未期待的文件在快照目录下; 跳过", zap.String("path", names[i])) + } + } + } + } + return snaps +} + +// cleanupSnapdir 清除临时快照 +func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) { + names = make([]string, 0, len(filenames)) + for _, filename := range filenames { + if strings.HasPrefix(filename, "db.tmp") { + s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename)) + if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { + return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr) + } + } else { + names = append(names, filename) + } + } + return names, nil +} + +func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error { + dir, err := os.Open(s.dir) + if err != nil { + return err + } + defer dir.Close() + filenames, err := dir.Readdirnames(-1) + if err != nil { + return err + } + for _, filename := range filenames { + if strings.HasSuffix(filename, ".snap.db") { + hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db") + index, err := strconv.ParseUint(hexIndex, 16, 64) + if err != nil { + s.lg.Error("failed to parse index from filename", zap.String("path", filename), zap.String("error", err.Error())) + continue + } + if index < snap.Metadata.Index { + s.lg.Info("found orphaned .snap.db file; deleting", zap.String("path", filename)) + if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { + s.lg.Error("failed to remove orphaned .snap.db file", zap.String("path", filename), zap.String("error", rmErr.Error())) + } + } + } + } + return nil +} diff --git a/etcd/etcdserver/api/v2auth/auth.go b/etcd/etcdserver/api/v2auth/auth.go new file mode 100644 index 00000000000..f2b7cdca56a --- /dev/null +++ b/etcd/etcdserver/api/v2auth/auth.go @@ -0,0 +1,670 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2auth implements etcd authentication. +package v2auth + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "path" + "reflect" + "sort" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" +) + +const ( + // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data. + StorePermsPrefix = "/2" + + // RootRoleName is the name of the ROOT role, with privileges to manage the cluster. + RootRoleName = "root" + + // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user. + GuestRoleName = "guest" +) + +var rootRole = Role{ + Role: RootRoleName, + Permissions: Permissions{ + KV: RWPermission{ + Read: []string{"/*"}, + Write: []string{"/*"}, + }, + }, +} + +var guestRole = Role{ + Role: GuestRoleName, + Permissions: Permissions{ + KV: RWPermission{ + Read: []string{"/*"}, + Write: []string{"/*"}, + }, + }, +} + +type doer interface { + Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) +} + +type Store interface { + AllUsers() ([]string, error) + GetUser(name string) (User, error) + CreateOrUpdateUser(user User) (out User, created bool, err error) + CreateUser(user User) (User, error) + DeleteUser(name string) error + UpdateUser(user User) (User, error) + AllRoles() ([]string, error) + GetRole(name string) (Role, error) + CreateRole(role Role) error + DeleteRole(name string) error + UpdateRole(role Role) (Role, error) + AuthEnabled() bool + EnableAuth() error + DisableAuth() error + PasswordStore +} + +type PasswordStore interface { + CheckPassword(user User, password string) bool + HashPassword(password string) (string, error) +} + +type store struct { + lg *zap.Logger + server doer + timeout time.Duration + ensuredOnce bool + + PasswordStore +} + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV RWPermission `json:"kv"` +} + +func (p *Permissions) IsEmpty() bool { + return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0) +} + +type RWPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type Error struct { + Status int + Errmsg string +} + +func (ae Error) Error() string { return ae.Errmsg } +func (ae Error) HTTPStatus() int { return ae.Status } + +func authErr(hs int, s string, v ...interface{}) Error { + return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)} +} + +func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store { + if lg == nil { + lg = zap.NewNop() + } + s := &store{ + lg: lg, + server: server, + timeout: timeout, + PasswordStore: passwordStore{}, + } + return s +} + +// passwordStore implements PasswordStore using bcrypt to hash user passwords +type passwordStore struct{} + +func (passwordStore) CheckPassword(user User, password string) bool { + err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) + return err == nil +} + +func (passwordStore) HashPassword(password string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + return string(hash), err +} + +func (s *store) AllUsers() ([]string, error) { + resp, err := s.requestResource("/users/", false) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return []string{}, nil + } + } + return nil, err + } + var nodes []string + for _, n := range resp.Event.NodeExtern.ExternNodes { + _, user := path.Split(n.Key) + nodes = append(nodes, user) + } + sort.Strings(nodes) + return nodes, nil +} + +func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) } + +// CreateOrUpdateUser should be only used for creating the new user or when you are not +// sure if it is a create or update. (When only password is passed in, we are not sure +// if it is a update or create) +func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) { + _, err = s.getUser(user.User, true) + if err == nil { + out, err = s.UpdateUser(user) + return out, false, err + } + u, err := s.CreateUser(user) + return u, true, err +} + +func (s *store) CreateUser(user User) (User, error) { + // Attach root role to root user. + if user.User == "root" { + user = attachRootRole(user) + } + u, err := s.createUserInternal(user) + if err == nil { + s.lg.Info("created a user", zap.String("user-name", user.User)) + } + return u, err +} + +func (s *store) createUserInternal(user User) (User, error) { + if user.Password == "" { + return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User) + } + hash, err := s.HashPassword(user.Password) + if err != nil { + return user, err + } + user.Password = hash + + _, err = s.createResource("/users/"+user.User, user) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { + return user, authErr(http.StatusConflict, "User %s already exists.", user.User) + } + } + } + return user, err +} + +func (s *store) DeleteUser(name string) error { + if s.AuthEnabled() && name == "root" { + return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.") + } + err := s.deleteResource("/users/" + name) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return authErr(http.StatusNotFound, "User %s does not exist", name) + } + } + return err + } + s.lg.Info("deleted a user", zap.String("user-name", name)) + return nil +} + +func (s *store) UpdateUser(user User) (User, error) { + old, err := s.getUser(user.User, true) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User) + } + } + return old, err + } + + newUser, err := old.merge(s.lg, user, s.PasswordStore) + if err != nil { + return old, err + } + if reflect.DeepEqual(old, newUser) { + return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.") + } + _, err = s.updateResource("/users/"+user.User, newUser) + if err == nil { + s.lg.Info("updated a user", zap.String("user-name", user.User)) + } + return newUser, err +} + +func (s *store) AllRoles() ([]string, error) { + nodes := []string{RootRoleName} + resp, err := s.requestResource("/roles/", false) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return nodes, nil + } + } + return nil, err + } + for _, n := range resp.Event.NodeExtern.ExternNodes { + _, role := path.Split(n.Key) + nodes = append(nodes, role) + } + sort.Strings(nodes) + return nodes, nil +} + +func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) } + +func (s *store) CreateRole(role Role) error { + if role.Role == RootRoleName { + return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) + } + _, err := s.createResource("/roles/"+role.Role, role) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { + return authErr(http.StatusConflict, "Role %s already exists.", role.Role) + } + } + } + if err == nil { + s.lg.Info("created a new role", zap.String("role-name", role.Role)) + } + return err +} + +func (s *store) DeleteRole(name string) error { + if name == RootRoleName { + return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name) + } + err := s.deleteResource("/roles/" + name) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return authErr(http.StatusNotFound, "Role %s doesn't exist.", name) + } + } + } + if err == nil { + s.lg.Info("delete a new role", zap.String("role-name", name)) + } + return err +} + +func (s *store) UpdateRole(role Role) (Role, error) { + if role.Role == RootRoleName { + return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) + } + old, err := s.getRole(role.Role, true) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role) + } + } + return old, err + } + newRole, err := old.merge(s.lg, role) + if err != nil { + return old, err + } + if reflect.DeepEqual(old, newRole) { + return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.") + } + _, err = s.updateResource("/roles/"+role.Role, newRole) + if err == nil { + s.lg.Info("updated a new role", zap.String("role-name", role.Role)) + } + return newRole, err +} + +func (s *store) AuthEnabled() bool { + return s.detectAuth() +} + +func (s *store) EnableAuth() error { + if s.AuthEnabled() { + return authErr(http.StatusConflict, "already enabled") + } + + if _, err := s.getUser("root", true); err != nil { + return authErr(http.StatusConflict, "No root user available, please create one") + } + if _, err := s.getRole(GuestRoleName, true); err != nil { + s.lg.Info( + "no guest role access found; creating default", + zap.String("role-name", GuestRoleName), + ) + if err := s.CreateRole(guestRole); err != nil { + s.lg.Warn( + "failed to create a guest role; aborting auth enable", + zap.String("role-name", GuestRoleName), + zap.Error(err), + ) + return err + } + } + + if err := s.enableAuth(); err != nil { + s.lg.Warn("failed to enable auth", zap.Error(err)) + return err + } + + s.lg.Info("enabled auth") + return nil +} + +func (s *store) DisableAuth() error { + if !s.AuthEnabled() { + return authErr(http.StatusConflict, "already disabled") + } + + err := s.disableAuth() + if err == nil { + s.lg.Info("disabled auth") + } else { + s.lg.Warn("failed to disable auth", zap.Error(err)) + } + return err +} + +// merge applies the properties of the passed-in User to the User on which it +// is called and returns a new User with these modifications applied. Think of +// all Users as immutable sets of data. Merge allows you to perform the set +// operations (desired grants and revokes) atomically +func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) { + var out User + if ou.User != nu.User { + return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User) + } + out.User = ou.User + if nu.Password != "" { + hash, err := s.HashPassword(nu.Password) + if err != nil { + return ou, err + } + out.Password = hash + } else { + out.Password = ou.Password + } + currentRoles := types.NewUnsafeSet(ou.Roles...) + for _, g := range nu.Grant { + if currentRoles.Contains(g) { + lg.Warn( + "attempted to grant a duplicate role for a user", + zap.String("user-name", nu.User), + zap.String("role-name", g), + ) + return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User)) + } + currentRoles.Add(g) + } + for _, r := range nu.Revoke { + if !currentRoles.Contains(r) { + lg.Warn( + "attempted to revoke a ungranted role for a user", + zap.String("user-name", nu.User), + zap.String("role-name", r), + ) + return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User)) + } + currentRoles.Remove(r) + } + out.Roles = currentRoles.Values() + sort.Strings(out.Roles) + return out, nil +} + +// merge for a role works the same as User above -- atomic Role application to +// each of the substructures. +func (r Role) merge(lg *zap.Logger, n Role) (Role, error) { + var out Role + var err error + if r.Role != n.Role { + return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role) + } + out.Role = r.Role + out.Permissions, err = r.Permissions.Grant(n.Grant) + if err != nil { + return out, err + } + out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke) + return out, err +} + +func (r Role) HasKeyAccess(key string, write bool) bool { + if r.Role == RootRoleName { + return true + } + return r.Permissions.KV.HasAccess(key, write) +} + +func (r Role) HasRecursiveAccess(key string, write bool) bool { + if r.Role == RootRoleName { + return true + } + return r.Permissions.KV.HasRecursiveAccess(key, write) +} + +// Grant adds a set of permissions to the permission object on which it is called, +// returning a new permission object. +func (p Permissions) Grant(n *Permissions) (Permissions, error) { + var out Permissions + var err error + if n == nil { + return p, nil + } + out.KV, err = p.KV.Grant(n.KV) + return out, err +} + +// Revoke removes a set of permissions to the permission object on which it is called, +// returning a new permission object. +func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) { + var out Permissions + var err error + if n == nil { + return p, nil + } + out.KV, err = p.KV.Revoke(lg, n.KV) + return out, err +} + +// Grant adds a set of permissions to the permission object on which it is called, +// returning a new permission object. +func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { + var out RWPermission + currentRead := types.NewUnsafeSet(rw.Read...) + for _, r := range n.Read { + if currentRead.Contains(r) { + return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r) + } + currentRead.Add(r) + } + currentWrite := types.NewUnsafeSet(rw.Write...) + for _, w := range n.Write { + if currentWrite.Contains(w) { + return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w) + } + currentWrite.Add(w) + } + out.Read = currentRead.Values() + out.Write = currentWrite.Values() + sort.Strings(out.Read) + sort.Strings(out.Write) + return out, nil +} + +// Revoke removes a set of permissions to the permission object on which it is called, +// returning a new permission object. +func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) { + var out RWPermission + currentRead := types.NewUnsafeSet(rw.Read...) + for _, r := range n.Read { + if !currentRead.Contains(r) { + lg.Info( + "revoking ungranted read permission", + zap.String("read-permission", r), + ) + continue + } + currentRead.Remove(r) + } + currentWrite := types.NewUnsafeSet(rw.Write...) + for _, w := range n.Write { + if !currentWrite.Contains(w) { + lg.Info( + "revoking ungranted write permission", + zap.String("write-permission", w), + ) + continue + } + currentWrite.Remove(w) + } + out.Read = currentRead.Values() + out.Write = currentWrite.Values() + sort.Strings(out.Read) + sort.Strings(out.Write) + return out, nil +} + +func (rw RWPermission) HasAccess(key string, write bool) bool { + var list []string + if write { + list = rw.Write + } else { + list = rw.Read + } + for _, pat := range list { + match, err := simpleMatch(pat, key) + if err == nil && match { + return true + } + } + return false +} + +func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool { + list := rw.Read + if write { + list = rw.Write + } + for _, pat := range list { + match, err := prefixMatch(pat, key) + if err == nil && match { + return true + } + } + return false +} + +func simpleMatch(pattern string, key string) (match bool, err error) { + if pattern[len(pattern)-1] == '*' { + return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil + } + return key == pattern, nil +} + +func prefixMatch(pattern string, key string) (match bool, err error) { + if pattern[len(pattern)-1] != '*' { + return false, nil + } + return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil +} + +func attachRootRole(u User) User { + inRoles := false + for _, r := range u.Roles { + if r == RootRoleName { + inRoles = true + break + } + } + if !inRoles { + u.Roles = append(u.Roles, RootRoleName) + } + return u +} + +func (s *store) getUser(name string, quorum bool) (User, error) { + resp, err := s.requestResource("/users/"+name, quorum) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name) + } + } + return User{}, err + } + var u User + err = json.Unmarshal([]byte(*resp.Event.NodeExtern.Value), &u) + if err != nil { + return u, err + } + // Attach root role to root user. + if u.User == "root" { + u = attachRootRole(u) + } + return u, nil +} + +func (s *store) getRole(name string, quorum bool) (Role, error) { + if name == RootRoleName { + return rootRole, nil + } + resp, err := s.requestResource("/roles/"+name, quorum) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name) + } + } + return Role{}, err + } + var r Role + err = json.Unmarshal([]byte(*resp.Event.NodeExtern.Value), &r) + return r, err +} diff --git a/etcd/etcdserver/api/v2auth/auth_requests.go b/etcd/etcdserver/api/v2auth/auth_requests.go new file mode 100644 index 00000000000..a49d9dc968e --- /dev/null +++ b/etcd/etcdserver/api/v2auth/auth_requests.go @@ -0,0 +1,180 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2auth + +import ( + "context" + "encoding/json" + "path" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" +) + +func (s *store) ensureAuthDirectories() error { + if s.ensuredOnce { + return nil + } + for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + pe := false + rr := etcdserverpb.Request{ + Method: "PUT", + Path: res, + Dir: true, + PrevExist: &pe, + } + _, err := s.server.Do(ctx, rr) + cancel() + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { + continue + } + } + s.lg.Warn( + "failed to create auth directories", + zap.Error(err), + ) + return err + } + } + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + pe := false + rr := etcdserverpb.Request{ + Method: "PUT", + Path: StorePermsPrefix + "/enabled", + Val: "false", + PrevExist: &pe, + } + _, err := s.server.Do(ctx, rr) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { + s.ensuredOnce = true + return nil + } + } + return err + } + s.ensuredOnce = true + return nil +} + +func (s *store) enableAuth() error { + _, err := s.updateResource("/enabled", true) + return err +} + +func (s *store) disableAuth() error { + _, err := s.updateResource("/enabled", false) + return err +} + +func (s *store) detectAuth() bool { + if s.server == nil { + return false + } + value, err := s.requestResource("/enabled", false) + if err != nil { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { + return false + } + } + s.lg.Warn( + "failed to detect auth settings", + zap.Error(err), + ) + return false + } + + var u bool + err = json.Unmarshal([]byte(*value.Event.NodeExtern.Value), &u) + if err != nil { + s.lg.Warn( + "internal bookkeeping value for enabled isn't valid JSON", + zap.Error(err), + ) + return false + } + return u +} + +func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + p := path.Join(StorePermsPrefix, res) + method := "GET" + if quorum { + method = "QGET" + } + rr := etcdserverpb.Request{ + Method: method, + Path: p, + Dir: false, // TODO: always false? + } + return s.server.Do(ctx, rr) +} + +func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) { + return s.setResource(res, value, true) +} + +func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) { + return s.setResource(res, value, false) +} + +func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) { + err := s.ensureAuthDirectories() + if err != nil { + return etcdserver.Response{}, err + } + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + data, err := json.Marshal(value) + if err != nil { + return etcdserver.Response{}, err + } + p := path.Join(StorePermsPrefix, res) + rr := etcdserverpb.Request{ + Method: "PUT", + Path: p, + Val: string(data), + PrevExist: &prevexist, + } + return s.server.Do(ctx, rr) +} + +func (s *store) deleteResource(res string) error { + err := s.ensureAuthDirectories() + if err != nil { + return err + } + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + pex := true + p := path.Join(StorePermsPrefix, res) + _, err = s.server.Do(ctx, etcdserverpb.Request{ + Method: "DELETE", + Path: p, + PrevExist: &pex, + }) + return err +} diff --git a/etcd/etcdserver/api/v2auth/auth_test.go b/etcd/etcdserver/api/v2auth/auth_test.go new file mode 100644 index 00000000000..5bad22d12a1 --- /dev/null +++ b/etcd/etcdserver/api/v2auth/auth_test.go @@ -0,0 +1,677 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2auth + +import ( + "context" + "reflect" + "testing" + "time" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + + "go.uber.org/zap" +) + +type fakeDoer struct{} + +func (fakeDoer) Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) { + return etcdserver.Response{}, nil +} + +func TestCheckPassword(t *testing.T) { + st := NewStore(zap.NewExample(), fakeDoer{}, 5*time.Second) + u := User{Password: "$2a$10$I3iddh1D..EIOXXQtsra4u8AjOtgEa2ERxVvYGfXFBJDo1omXwP.q"} + matched := st.CheckPassword(u, "foo") + if matched { + t.Fatalf("expected false, got %v", matched) + } +} + +const testTimeout = time.Millisecond + +func TestMergeUser(t *testing.T) { + tbl := []struct { + input User + merge User + expect User + iserr bool + }{ + { + User{User: "foo"}, + User{User: "bar"}, + User{}, + true, + }, + { + User{User: "foo"}, + User{User: "foo"}, + User{User: "foo", Roles: []string{}}, + false, + }, + { + User{User: "foo"}, + User{User: "foo", Grant: []string{"role1"}}, + User{User: "foo", Roles: []string{"role1"}}, + false, + }, + { + User{User: "foo", Roles: []string{"role1"}}, + User{User: "foo", Grant: []string{"role1"}}, + User{}, + true, + }, + { + User{User: "foo", Roles: []string{"role1"}}, + User{User: "foo", Revoke: []string{"role2"}}, + User{}, + true, + }, + { + User{User: "foo", Roles: []string{"role1"}}, + User{User: "foo", Grant: []string{"role2"}}, + User{User: "foo", Roles: []string{"role1", "role2"}}, + false, + }, + { // empty password will not overwrite the previous password + User{User: "foo", Password: "foo", Roles: []string{}}, + User{User: "foo", Password: ""}, + User{User: "foo", Password: "foo", Roles: []string{}}, + false, + }, + } + + for i, tt := range tbl { + out, err := tt.input.merge(zap.NewExample(), tt.merge, passwordStore{}) + if err != nil && !tt.iserr { + t.Fatalf("Got unexpected error on item %d", i) + } + if !tt.iserr { + if !reflect.DeepEqual(out, tt.expect) { + t.Errorf("Unequal merge expectation on item %d: got: %#v, expect: %#v", i, out, tt.expect) + } + } + } +} + +func TestMergeRole(t *testing.T) { + tbl := []struct { + input Role + merge Role + expect Role + iserr bool + }{ + { + Role{Role: "foo"}, + Role{Role: "bar"}, + Role{}, + true, + }, + { + Role{Role: "foo"}, + Role{Role: "foo", Grant: &Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}}, + Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}}, + false, + }, + { + Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}}, + Role{Role: "foo", Revoke: &Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}}, + Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{}, Write: []string{}}}}, + false, + }, + { + Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/bardir"}}}}, + Role{Role: "foo", Revoke: &Permissions{KV: RWPermission{Read: []string{"/foodir"}}}}, + Role{}, + true, + }, + } + for i, tt := range tbl { + out, err := tt.input.merge(zap.NewExample(), tt.merge) + if err != nil && !tt.iserr { + t.Fatalf("Got unexpected error on item %d", i) + } + if !tt.iserr { + if !reflect.DeepEqual(out, tt.expect) { + t.Errorf("Unequal merge expectation on item %d: got: %#v, expect: %#v", i, out, tt.expect) + } + } + } +} + +type testDoer struct { + get []etcdserver.Response + put []etcdserver.Response + getindex int + putindex int + explicitlyEnabled bool +} + +func (td *testDoer) Do(_ context.Context, req etcdserverpb.Request) (etcdserver.Response, error) { + if td.explicitlyEnabled && (req.Path == StorePermsPrefix+"/enabled") { + t := "true" + return etcdserver.Response{ + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &t, + }, + }, + }, nil + } + if (req.Method == "GET" || req.Method == "QGET") && td.get != nil { + res := td.get[td.getindex] + if res.Event == nil { + td.getindex++ + return etcdserver.Response{}, &v2error.Error{ + ErrorCode: v2error.EcodeKeyNotFound, + } + } + td.getindex++ + return res, nil + } + if req.Method == "PUT" && td.put != nil { + res := td.put[td.putindex] + if res.Event == nil { + td.putindex++ + return etcdserver.Response{}, &v2error.Error{ + ErrorCode: v2error.EcodeNodeExist, + } + } + td.putindex++ + return res, nil + } + return etcdserver.Response{}, nil +} + +func TestAllUsers(t *testing.T) { + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Nodes: v2store.NodeExterns([]*v2store.NodeExtern{ + { + Key: StorePermsPrefix + "/users/cat", + }, + { + Key: StorePermsPrefix + "/users/dog", + }, + }), + }, + }, + }, + }, + } + expected := []string{"cat", "dog"} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false} + users, err := s.AllUsers() + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(users, expected) { + t.Error("AllUsers doesn't match given store. Got", users, "expected", expected) + } +} + +func TestGetAndDeleteUser(t *testing.T) { + data := `{"user": "cat", "roles" : ["animal"]}` + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &data, + }, + }, + }, + }, + explicitlyEnabled: true, + } + expected := User{User: "cat", Roles: []string{"animal"}} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false} + out, err := s.GetUser("cat") + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(out, expected) { + t.Error("GetUser doesn't match given store. Got", out, "expected", expected) + } + err = s.DeleteUser("cat") + if err != nil { + t.Error("Unexpected error", err) + } +} + +func TestAllRoles(t *testing.T) { + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Nodes: v2store.NodeExterns([]*v2store.NodeExtern{ + { + Key: StorePermsPrefix + "/roles/animal", + }, + { + Key: StorePermsPrefix + "/roles/human", + }, + }), + }, + }, + }, + }, + explicitlyEnabled: true, + } + expected := []string{"animal", "human", "root"} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false} + out, err := s.AllRoles() + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(out, expected) { + t.Error("AllRoles doesn't match given store. Got", out, "expected", expected) + } +} + +func TestGetAndDeleteRole(t *testing.T) { + data := `{"role": "animal"}` + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/animal", + Value: &data, + }, + }, + }, + }, + explicitlyEnabled: true, + } + expected := Role{Role: "animal"} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false} + out, err := s.GetRole("animal") + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(out, expected) { + t.Error("GetRole doesn't match given store. Got", out, "expected", expected) + } + err = s.DeleteRole("animal") + if err != nil { + t.Error("Unexpected error", err) + } +} + +func TestEnsure(t *testing.T) { + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Set, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix, + Dir: true, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Set, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/", + Dir: true, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Set, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/", + Dir: true, + }, + }, + }, + }, + } + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false} + err := s.ensureAuthDirectories() + if err != nil { + t.Error("Unexpected error", err) + } +} + +type fastPasswordStore struct{} + +func (fastPasswordStore) CheckPassword(user User, password string) bool { + return user.Password == password +} + +func (fastPasswordStore) HashPassword(password string) (string, error) { return password, nil } + +func TestCreateAndUpdateUser(t *testing.T) { + olduser := `{"user": "cat", "roles" : ["animal"]}` + newuser := `{"user": "cat", "roles" : ["animal", "pet"]}` + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: nil, + }, + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &olduser, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &olduser, + }, + }, + }, + }, + put: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &olduser, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/users/cat", + Value: &newuser, + }, + }, + }, + }, + explicitlyEnabled: true, + } + user := User{User: "cat", Password: "meow", Roles: []string{"animal"}} + update := User{User: "cat", Grant: []string{"pet"}} + expected := User{User: "cat", Roles: []string{"animal", "pet"}} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true, PasswordStore: fastPasswordStore{}} + out, created, err := s.CreateOrUpdateUser(user) + if !created { + t.Error("Should have created user, instead updated?") + } + if err != nil { + t.Error("Unexpected error", err) + } + out.Password = "meow" + if !reflect.DeepEqual(out, user) { + t.Error("UpdateUser doesn't match given update. Got", out, "expected", expected) + } + out, created, err = s.CreateOrUpdateUser(update) + if created { + t.Error("Should have updated user, instead created?") + } + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(out, expected) { + t.Error("UpdateUser doesn't match given update. Got", out, "expected", expected) + } +} + +func TestUpdateRole(t *testing.T) { + oldrole := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": []}}}` + newrole := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": ["/animal"]}}}` + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/animal", + Value: &oldrole, + }, + }, + }, + }, + put: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/animal", + Value: &newrole, + }, + }, + }, + }, + explicitlyEnabled: true, + } + update := Role{Role: "animal", Grant: &Permissions{KV: RWPermission{Read: []string{}, Write: []string{"/animal"}}}} + expected := Role{Role: "animal", Permissions: Permissions{KV: RWPermission{Read: []string{"/animal"}, Write: []string{"/animal"}}}} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true} + out, err := s.UpdateRole(update) + if err != nil { + t.Error("Unexpected error", err) + } + if !reflect.DeepEqual(out, expected) { + t.Error("UpdateRole doesn't match given update. Got", out, "expected", expected) + } +} + +func TestCreateRole(t *testing.T) { + role := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": []}}}` + d := &testDoer{ + put: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Create, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/animal", + Value: &role, + }, + }, + }, + { + Event: nil, + }, + }, + explicitlyEnabled: true, + } + r := Role{Role: "animal", Permissions: Permissions{KV: RWPermission{Read: []string{"/animal"}, Write: []string{}}}} + + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true} + err := s.CreateRole(Role{Role: "root"}) + if err == nil { + t.Error("Should error creating root role") + } + err = s.CreateRole(r) + if err != nil { + t.Error("Unexpected error", err) + } + err = s.CreateRole(r) + if err == nil { + t.Error("Creating duplicate role, should error") + } +} + +func TestEnableAuth(t *testing.T) { + rootUser := `{"user": "root", "password": ""}` + guestRole := `{"role": "guest", "permissions" : {"kv": {"read": ["*"], "write": ["*"]}}}` + trueval := "true" + falseval := "false" + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/enabled", + Value: &falseval, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/user/root", + Value: &rootUser, + }, + }, + }, + { + Event: nil, + }, + }, + put: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Create, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/roles/guest", + Value: &guestRole, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/enabled", + Value: &trueval, + }, + }, + }, + }, + explicitlyEnabled: false, + } + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true} + err := s.EnableAuth() + if err != nil { + t.Error("Unexpected error", err) + } +} + +func TestDisableAuth(t *testing.T) { + trueval := "true" + falseval := "false" + d := &testDoer{ + get: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/enabled", + Value: &falseval, + }, + }, + }, + { + Event: &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/enabled", + Value: &trueval, + }, + }, + }, + }, + put: []etcdserver.Response{ + { + Event: &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ + Key: StorePermsPrefix + "/enabled", + Value: &falseval, + }, + }, + }, + }, + explicitlyEnabled: false, + } + s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true} + err := s.DisableAuth() + if err == nil { + t.Error("Expected error; already disabled") + } + + err = s.DisableAuth() + if err != nil { + t.Error("Unexpected error", err) + } +} + +func TestSimpleMatch(t *testing.T) { + role := Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir/*", "/fookey"}, Write: []string{"/bardir/*", "/barkey"}}}} + if !role.HasKeyAccess("/foodir/foo/bar", false) { + t.Fatal("role lacks expected access") + } + if !role.HasKeyAccess("/fookey", false) { + t.Fatal("role lacks expected access") + } + if !role.HasRecursiveAccess("/foodir/*", false) { + t.Fatal("role lacks expected access") + } + if !role.HasRecursiveAccess("/foodir/foo*", false) { + t.Fatal("role lacks expected access") + } + if !role.HasRecursiveAccess("/bardir/*", true) { + t.Fatal("role lacks expected access") + } + if !role.HasKeyAccess("/bardir/bar/foo", true) { + t.Fatal("role lacks expected access") + } + if !role.HasKeyAccess("/barkey", true) { + t.Fatal("role lacks expected access") + } + + if role.HasKeyAccess("/bardir/bar/foo", false) { + t.Fatal("role has unexpected access") + } + if role.HasKeyAccess("/barkey", false) { + t.Fatal("role has unexpected access") + } + if role.HasKeyAccess("/foodir/foo/bar", true) { + t.Fatal("role has unexpected access") + } + if role.HasKeyAccess("/fookey", true) { + t.Fatal("role has unexpected access") + } +} diff --git a/etcd/etcdserver/api/v2discovery/discovery.go b/etcd/etcdserver/api/v2discovery/discovery.go new file mode 100644 index 00000000000..7820e4ed3ea --- /dev/null +++ b/etcd/etcdserver/api/v2discovery/discovery.go @@ -0,0 +1,411 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2discovery provides an implementation of the cluster discovery that +// is used by etcd with v2 clientv2. +package v2discovery + +import ( + "context" + "errors" + "fmt" + "math" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" + + clientv2 "github.com/ls-2018/etcd_cn/client_sdk/v2" + + "github.com/jonboulle/clockwork" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "go.uber.org/zap" +) + +var ( + ErrInvalidURL = errors.New("discovery: invalid URL") + ErrBadSizeKey = errors.New("discovery: size key is bad") + ErrSizeNotFound = errors.New("discovery: size key not found") + ErrTokenNotFound = errors.New("discovery: token not found") + ErrDuplicateID = errors.New("discovery: found duplicate id") + ErrDuplicateName = errors.New("discovery: found duplicate name") + ErrFullCluster = errors.New("discovery: cluster is full") + ErrTooManyRetries = errors.New("discovery: too many retries") + ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint") +) + +var ( + // Number of retries discovery will attempt before giving up and erroring out. + nRetries = uint(math.MaxUint32) + maxExpoentialRetries = uint(8) +) + +// JoinCluster 将连接到给定网址的发现服务,并将给定id和配置所代表的etcd注册到集群上. +func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) { + d, err := newDiscovery(lg, durl, dproxyurl, id) + if err != nil { + return "", err + } + return d.joinCluster(config) +} + +// GetCluster will connect to the discovery service at the given url and +// retrieve a string describing the cluster +func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) { + d, err := newDiscovery(lg, durl, dproxyurl, 0) + if err != nil { + return "", err + } + return d.getCluster() +} + +type discovery struct { + lg *zap.Logger + cluster string + id types.ID + c clientv2.KeysAPI + retries uint + url *url.URL + + clock clockwork.Clock +} + +// newProxyFunc builds a proxy function from the given string, which should +// represent a URL that can be used as a proxy. It performs basic +// sanitization of the URL and returns any error encountered. +func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) { + if lg == nil { + lg = zap.NewNop() + } + if proxy == "" { + return nil, nil + } + // Do a small amount of URL sanitization to help the user + // Derived from net/http.ProxyFromEnvironment + proxyURL, err := url.Parse(proxy) + if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we ignore the + // error and complain about the original one + var err2 error + proxyURL, err2 = url.Parse("http://" + proxy) + if err2 == nil { + err = nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + + lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String())) + return http.ProxyURL(proxyURL), nil +} + +func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) { + if lg == nil { + lg = zap.NewNop() + } + u, err := url.Parse(durl) + if err != nil { + return nil, err + } + token := u.Path + u.Path = "" + pf, err := newProxyFunc(lg, dproxyurl) + if err != nil { + return nil, err + } + + // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early + tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) + if err != nil { + return nil, err + } + tr.Proxy = pf + cfg := clientv2.Config{ + Transport: tr, + Endpoints: []string{u.String()}, + } + c, err := clientv2.New(cfg) + if err != nil { + return nil, err + } + dc := clientv2.NewKeysAPIWithPrefix(c, "") + return &discovery{ + lg: lg, + cluster: token, + c: dc, + id: id, + url: u, + clock: clockwork.NewRealClock(), + }, nil +} + +func (d *discovery) joinCluster(config string) (string, error) { + // fast path: if the cluster is full, return the error + // do not need to register to the cluster in this case. + if _, _, _, err := d.checkCluster(); err != nil { + return "", err + } + + if err := d.createSelf(config); err != nil { + // Fails, even on a timeout, if createSelf times out. + // TODO(barakmich): Retrying the same node might want to succeed here + // (ie, createSelf should be idempotent for discovery). + return "", err + } + + nodes, size, index, err := d.checkCluster() + if err != nil { + return "", err + } + + all, err := d.waitNodes(nodes, size, index) + if err != nil { + return "", err + } + + return nodesToCluster(all, size) +} + +func (d *discovery) getCluster() (string, error) { + nodes, size, index, err := d.checkCluster() + if err != nil { + if err == ErrFullCluster { + return nodesToCluster(nodes, size) + } + return "", err + } + + all, err := d.waitNodes(nodes, size, index) + if err != nil { + return "", err + } + return nodesToCluster(all, size) +} + +func (d *discovery) createSelf(contents string) error { + ctx, cancel := context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout) + resp, err := d.c.Create(ctx, d.selfKey(), contents) + cancel() + if err != nil { + if eerr, ok := err.(clientv2.Error); ok && eerr.Code == clientv2.ErrorCodeNodeExist { + return ErrDuplicateID + } + return err + } + + // ensure self appears on the etcd we connected to + w := d.c.Watcher(d.selfKey(), &clientv2.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) + _, err = w.Next(context.Background()) + return err +} + +func (d *discovery) checkCluster() ([]*clientv2.Node, uint64, uint64, error) { + configKey := path.Join("/", d.cluster, "_config") + ctx, cancel := context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout) + // find cluster size + resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) + cancel() + if err != nil { + if eerr, ok := err.(*clientv2.Error); ok && eerr.Code == clientv2.ErrorCodeKeyNotFound { + return nil, 0, 0, ErrSizeNotFound + } + if err == clientv2.ErrInvalidJSON { + return nil, 0, 0, ErrBadDiscoveryEndpoint + } + if ce, ok := err.(*clientv2.ClusterError); ok { + d.lg.Warn( + "failed to get from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.String("path", path.Join(configKey, "size")), + zap.Error(err), + zap.String("err-detail", ce.Detail()), + ) + return d.checkClusterRetry() + } + return nil, 0, 0, err + } + size, err := strconv.ParseUint(resp.Node.Value, 10, 0) + if err != nil { + return nil, 0, 0, ErrBadSizeKey + } + + ctx, cancel = context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout) + resp, err = d.c.Get(ctx, d.cluster, nil) + cancel() + if err != nil { + if ce, ok := err.(*clientv2.ClusterError); ok { + d.lg.Warn( + "failed to get from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.String("path", d.cluster), + zap.Error(err), + zap.String("err-detail", ce.Detail()), + ) + return d.checkClusterRetry() + } + return nil, 0, 0, err + } + var nodes []*clientv2.Node + // append non-config keys to nodes + for _, n := range resp.Node.Nodes { + if path.Base(n.Key) != path.Base(configKey) { + nodes = append(nodes, n) + } + } + + snodes := sortableNodes{nodes} + sort.Sort(snodes) + + // find self position + for i := range nodes { + if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { + break + } + if uint64(i) >= size-1 { + return nodes[:size], size, resp.Index, ErrFullCluster + } + } + return nodes, size, resp.Index, nil +} + +func (d *discovery) logAndBackoffForRetry(step string) { + d.retries++ + // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward. + retries := d.retries + if retries > maxExpoentialRetries { + retries = maxExpoentialRetries + } + retryTimeInSecond := time.Duration(0x1< size { + nodes = nodes[:size] + } + // watch from the next index + w := d.c.Watcher(d.cluster, &clientv2.WatcherOptions{AfterIndex: index, Recursive: true}) + all := make([]*clientv2.Node, len(nodes)) + copy(all, nodes) + for _, n := range all { + if path.Base(n.Key) == path.Base(d.selfKey()) { + d.lg.Info( + "found self from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.String("self", path.Base(d.selfKey())), + ) + } else { + d.lg.Info( + "found peer from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.String("peer", path.Base(n.Key)), + ) + } + } + + // wait for others + for uint64(len(all)) < size { + d.lg.Info( + "found peers from discovery etcd; waiting for more", + zap.String("discovery-url", d.url.String()), + zap.Int("found-peers", len(all)), + zap.Int("needed-peers", int(size-uint64(len(all)))), + ) + resp, err := w.Next(context.Background()) + if err != nil { + if ce, ok := err.(*clientv2.ClusterError); ok { + d.lg.Warn( + "error while waiting for peers", + zap.String("discovery-url", d.url.String()), + zap.Error(err), + zap.String("err-detail", ce.Detail()), + ) + return d.waitNodesRetry() + } + return nil, err + } + d.lg.Info( + "found peer from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.String("peer", path.Base(resp.Node.Key)), + ) + all = append(all, resp.Node) + } + d.lg.Info( + "found all needed peers from discovery etcd", + zap.String("discovery-url", d.url.String()), + zap.Int("found-peers", len(all)), + ) + return all, nil +} + +func (d *discovery) selfKey() string { + return path.Join("/", d.cluster, d.id.String()) +} + +func nodesToCluster(ns []*clientv2.Node, size uint64) (string, error) { + s := make([]string, len(ns)) + for i, n := range ns { + s[i] = n.Value + } + us := strings.Join(s, ",") + m, err := types.NewURLsMap(us) + if err != nil { + return us, ErrInvalidURL + } + if uint64(m.Len()) != size { + return us, ErrDuplicateName + } + return us, nil +} + +type sortableNodes struct{ Nodes []*clientv2.Node } + +func (ns sortableNodes) Len() int { return len(ns.Nodes) } +func (ns sortableNodes) Less(i, j int) bool { + return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex +} +func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] } diff --git a/etcd/etcdserver/api/v2error/error.go b/etcd/etcdserver/api/v2error/error.go new file mode 100644 index 00000000000..17fc3e2688e --- /dev/null +++ b/etcd/etcdserver/api/v2error/error.go @@ -0,0 +1,165 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2error describes errors in etcd project. When any change happens, +// https://github.com/etcd-io/website/blob/main/content/docs/v2/errorcode.md +// needs to be updated correspondingly. +// To be deprecated in favor of v3 APIs. +package v2error + +import ( + "encoding/json" + "fmt" + "net/http" +) + +var errors = map[int]string{ + // command related errors + EcodeKeyNotFound: "key没有找到", + EcodeTestFailed: "Compare failed", // test and set + EcodeNotFile: "Not a file", + ecodeNoMorePeer: "Reached the max number of peers in the cluster", + EcodeNotDir: "Not a directory", + EcodeNodeExist: "Key already exists", // create + ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", + EcodeRootROnly: "Root is read only", + EcodeDirNotEmpty: "Directory not empty", + ecodeExistingPeerAddr: "Peer address has existed", + EcodeUnauthorized: "The request requires user authentication", + + // Post form related errors + ecodeValueRequired: "Value is Required in POST form", + EcodePrevValueRequired: "PrevValue is Required in POST form", + EcodeTTLNaN: "The given TTL in POST form is not a number", + EcodeIndexNaN: "The given index in POST form is not a number", + ecodeValueOrTTLRequired: "Value or TTL is required in POST form", + ecodeTimeoutNaN: "The given timeout in POST form is not a number", + ecodeNameRequired: "Name is required in POST form", + ecodeIndexOrValueRequired: "Index or value is required", + ecodeIndexValueMutex: "Index and value cannot both be specified", + EcodeInvalidField: "Invalid field", + EcodeInvalidForm: "Invalid POST form", + EcodeRefreshValue: "Value provided on refresh", + EcodeRefreshTTLRequired: "A TTL必须是provided on refresh", + + // raft related errors + EcodeRaftInternal: "Raft Internal Error", + EcodeLeaderElect: "During Leader Election", + + // etcd related errors + EcodeWatcherCleared: "watcher is cleared due to etcd recovery", + EcodeEventIndexCleared: "The event in requested index is outdated and cleared", + ecodeStandbyInternal: "Standby Internal Error", + ecodeInvalidActiveSize: "Invalid active size", + ecodeInvalidRemoveDelay: "Standby remove delay", + + // client related errors + ecodeClientInternal: "Client Internal Error", +} + +var errorStatus = map[int]int{ + EcodeKeyNotFound: http.StatusNotFound, + EcodeNotFile: http.StatusForbidden, + EcodeDirNotEmpty: http.StatusForbidden, + EcodeUnauthorized: http.StatusUnauthorized, + EcodeTestFailed: http.StatusPreconditionFailed, + EcodeNodeExist: http.StatusPreconditionFailed, + EcodeRaftInternal: http.StatusInternalServerError, + EcodeLeaderElect: http.StatusInternalServerError, +} + +const ( + EcodeKeyNotFound = 100 + EcodeTestFailed = 101 + EcodeNotFile = 102 + ecodeNoMorePeer = 103 + EcodeNotDir = 104 + EcodeNodeExist = 105 + ecodeKeyIsPreserved = 106 + EcodeRootROnly = 107 + EcodeDirNotEmpty = 108 + ecodeExistingPeerAddr = 109 + EcodeUnauthorized = 110 + + ecodeValueRequired = 200 + EcodePrevValueRequired = 201 + EcodeTTLNaN = 202 + EcodeIndexNaN = 203 + ecodeValueOrTTLRequired = 204 + ecodeTimeoutNaN = 205 + ecodeNameRequired = 206 + ecodeIndexOrValueRequired = 207 + ecodeIndexValueMutex = 208 + EcodeInvalidField = 209 + EcodeInvalidForm = 210 + EcodeRefreshValue = 211 + EcodeRefreshTTLRequired = 212 + + EcodeRaftInternal = 300 + EcodeLeaderElect = 301 + + EcodeWatcherCleared = 400 + EcodeEventIndexCleared = 401 + ecodeStandbyInternal = 402 + ecodeInvalidActiveSize = 403 + ecodeInvalidRemoveDelay = 404 + + ecodeClientInternal = 500 +) + +type Error struct { + ErrorCode int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause,omitempty"` + Index uint64 `json:"index"` +} + +func NewRequestError(errorCode int, cause string) *Error { + return NewError(errorCode, cause, 0) +} + +func NewError(errorCode int, cause string, index uint64) *Error { + return &Error{ + ErrorCode: errorCode, + Message: errors[errorCode], + Cause: cause, + Index: index, + } +} + +// Error is for the error interface +func (e Error) Error() string { + return e.Message + " (" + e.Cause + ")" +} + +func (e Error) toJsonString() string { + b, _ := json.Marshal(e) + return string(b) +} + +func (e Error) StatusCode() int { + status, ok := errorStatus[e.ErrorCode] + if !ok { + status = http.StatusBadRequest + } + return status +} + +func (e Error) WriteTo(w http.ResponseWriter) error { + w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(e.StatusCode()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err +} diff --git a/etcd/etcdserver/api/v2http/capability.go b/etcd/etcdserver/api/v2http/capability.go new file mode 100644 index 00000000000..606c6180337 --- /dev/null +++ b/etcd/etcdserver/api/v2http/capability.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2http + +import ( + "fmt" + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" +) + +func authCapabilityHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !api.IsCapabilityEnabled(api.AuthCapability) { + notCapable(w, r, api.AuthCapability) + return + } + fn(w, r) + } +} + +func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) { + herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c)) + if err := herr.WriteTo(w); err != nil { + // TODO: the following plog was removed, add the logging back if possible + // plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) + } +} diff --git a/etcd/etcdserver/api/v2http/client.go b/etcd/etcdserver/api/v2http/client.go new file mode 100644 index 00000000000..cab41129446 --- /dev/null +++ b/etcd/etcdserver/api/v2http/client.go @@ -0,0 +1,757 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2http + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + + "github.com/jonboulle/clockwork" + "go.uber.org/zap" +) + +const ( + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + machinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" +) + +// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. +func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time.Duration) http.Handler { + if lg == nil { + lg = zap.NewNop() + } + mux := http.NewServeMux() + etcdhttp.HandleBasic(lg, mux, server) + etcdhttp.HandleMetricsHealth(lg, mux, server) + handleV2(lg, mux, server, timeout) + return requestLogger(lg, mux) +} + +func handleV2(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) { + sec := v2auth.NewStore(lg, server, timeout) + kh := &keysHandler{ + lg: lg, + sec: sec, + server: server, + cluster: server.Cluster(), + timeout: timeout, + clientCertAuthEnabled: server.ClientCertAuthEnabled(), + } + + sh := &statsHandler{ + lg: lg, + stats: server, + } + + mh := &membersHandler{ + lg: lg, + sec: sec, + server: server, + cluster: server.Cluster(), + timeout: timeout, + clock: clockwork.NewRealClock(), + clientCertAuthEnabled: server.ClientCertAuthEnabled(), + } + + mah := &machinesHandler{cluster: server.Cluster()} + + sech := &authHandler{ + lg: lg, + sec: sec, + cluster: server.Cluster(), + clientCertAuthEnabled: server.ClientCertAuthEnabled(), + } + mux.HandleFunc("/", http.NotFound) + mux.Handle(keysPrefix, kh) + mux.Handle(keysPrefix+"/", kh) + mux.HandleFunc(statsPrefix+"/store", sh.serveStore) + mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) + mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) + mux.Handle(membersPrefix, mh) + mux.Handle(membersPrefix+"/", mh) + mux.Handle(machinesPrefix, mah) + handleAuth(mux, sech) +} + +type keysHandler struct { + lg *zap.Logger + sec v2auth.Store + server etcdserver.ServerV2 + cluster api.Cluster + timeout time.Duration + clientCertAuthEnabled bool +} + +func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + ctx, cancel := context.WithTimeout(context.Background(), h.timeout) + defer cancel() + clock := clockwork.NewRealClock() + startTime := clock.Now() + rr, noValueOnSuccess, err := parseKeyRequest(r, clock) + if err != nil { + writeKeyError(h.lg, w, err) + return + } + // The path must be valid at this point (we've parsed the request successfully). + if !hasKeyPrefixAccess(h.lg, h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) { + writeKeyNoAuth(w) + return + } + if !rr.Wait { + reportRequestReceived(rr) + } + resp, err := h.server.Do(ctx, rr) + if err != nil { + err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) + writeKeyError(h.lg, w, err) + reportRequestFailed(rr, err) + return + } + switch { + case resp.Event != nil: + if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil { + // Should never be reached + h.lg.Warn("failed to write key event", zap.Error(err)) + } + reportRequestCompleted(rr, startTime) + case resp.Watcher != nil: + ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) + defer cancel() + handleKeyWatch(ctx, h.lg, w, resp, rr.Stream) + default: + writeKeyError(h.lg, w, errors.New("received response with no Event/Watcher")) + } +} + +type machinesHandler struct { + cluster api.Cluster +} + +func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET", "HEAD") { + return + } + endpoints := h.cluster.ClientURLs() + w.Write([]byte(strings.Join(endpoints, ", "))) +} + +type membersHandler struct { + lg *zap.Logger + sec v2auth.Store + server etcdserver.ServerV2 + cluster api.Cluster + timeout time.Duration + clock clockwork.Clock + clientCertAuthEnabled bool +} + +func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") { + return + } + if !hasWriteRootAccess(h.lg, h.sec, r, h.clientCertAuthEnabled) { + writeNoAuth(h.lg, w, r) + return + } + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + ctx, cancel := context.WithTimeout(context.Background(), h.timeout) + defer cancel() + + switch r.Method { + case "GET": + switch trimPrefix(r.URL.Path, membersPrefix) { + case "": + mc := newMemberCollection(h.cluster.Members()) + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(mc); err != nil { + h.lg.Warn("failed to encode members response", zap.Error(err)) + } + case "leader": + id := h.server.Leader() + if id == 0 { + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election")) + return + } + m := newMember(h.cluster.Member(id)) + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(m); err != nil { + h.lg.Warn("failed to encode members response", zap.Error(err)) + } + default: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found")) + } + + case "POST": + req := httptypes.MemberCreateRequest{} + if ok := unmarshalRequest(h.lg, r, &req, w); !ok { + return + } + now := h.clock.Now() + m := membership.NewMember("", req.PeerURLs, "", &now) + _, err := h.server.AddMember(ctx, *m) + switch { + case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) + return + case err != nil: + h.lg.Warn( + "failed to add a member", + zap.String("member-id", m.ID.String()), + zap.Error(err), + ) + writeError(h.lg, w, r, err) + return + } + res := newMember(m) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + if err := json.NewEncoder(w).Encode(res); err != nil { + h.lg.Warn("failed to encode members response", zap.Error(err)) + } + + case "DELETE": + id, ok := getID(h.lg, r.URL.Path, w) + if !ok { + return + } + _, err := h.server.RemoveMember(ctx, uint64(id)) + switch { + case err == membership.ErrIDRemoved: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) + case err == membership.ErrIDNotFound: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) + case err != nil: + h.lg.Warn( + "failed to remove a member", + zap.String("member-id", id.String()), + zap.Error(err), + ) + writeError(h.lg, w, r, err) + default: + w.WriteHeader(http.StatusNoContent) + } + + case "PUT": + id, ok := getID(h.lg, r.URL.Path, w) + if !ok { + return + } + req := httptypes.MemberUpdateRequest{} + if ok := unmarshalRequest(h.lg, r, &req, w); !ok { + return + } + m := membership.Member{ + ID: id, + RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, + } + _, err := h.server.UpdateMember(ctx, m) + switch { + case err == membership.ErrPeerURLexists: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) + case err == membership.ErrIDNotFound: + writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) + case err != nil: + h.lg.Warn( + "failed to update a member", + zap.String("member-id", m.ID.String()), + zap.Error(err), + ) + writeError(h.lg, w, r, err) + default: + w.WriteHeader(http.StatusNoContent) + } + } +} + +type statsHandler struct { + lg *zap.Logger + stats stats.Stats +} + +func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(h.stats.StoreStats()) +} + +func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(h.stats.SelfStats()) +} + +func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + stats := h.stats.LeaderStats() + if stats == nil { + etcdhttp.WriteError(h.lg, w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(stats) +} + +// parseKeyRequest converts a received http.Request on keysPrefix to +// a server Request, performing validation of supplied fields as appropriate. +// If any validation fails, an empty Request and non-nil error is returned. +func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) { + var noValueOnSuccess bool + emptyReq := etcdserverpb.Request{} + + err := r.ParseForm() + if err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidForm, + err.Error(), + ) + } + + if !strings.HasPrefix(r.URL.Path, keysPrefix) { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidForm, + "incorrect key prefix", + ) + } + p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):]) + + var pIdx, wIdx uint64 + if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeIndexNaN, + `invalid value for "prevIndex"`, + ) + } + if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeIndexNaN, + `invalid value for "waitIndex"`, + ) + } + + var rec, sort, wait, dir, quorum, stream bool + if rec, err = getBool(r.Form, "recursive"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "recursive"`, + ) + } + if sort, err = getBool(r.Form, "sorted"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "sorted"`, + ) + } + if wait, err = getBool(r.Form, "wait"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "wait"`, + ) + } + // TODO(jonboulle): define what parameters dir is/isn't compatible with? + if dir, err = getBool(r.Form, "dir"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "dir"`, + ) + } + if quorum, err = getBool(r.Form, "quorum"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "quorum"`, + ) + } + if stream, err = getBool(r.Form, "stream"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "stream"`, + ) + } + + if wait && r.Method != "GET" { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `"wait" can only be used with GET requests`, + ) + } + + pV := r.FormValue("prevValue") + if _, ok := r.Form["prevValue"]; ok && pV == "" { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodePrevValueRequired, + `"prevValue" cannot be empty`, + ) + } + + if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + `invalid value for "noValueOnSuccess"`, + ) + } + + // TTL is nullable, so leave it null if not specified + // or an empty string + var ttl *uint64 + if len(r.FormValue("ttl")) > 0 { + i, err := getUint64(r.Form, "ttl") + if err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeTTLNaN, + `invalid value for "ttl"`, + ) + } + ttl = &i + } + + // prevExist is nullable, so leave it null if not specified + var pe *bool + if _, ok := r.Form["prevExist"]; ok { + bv, err := getBool(r.Form, "prevExist") + if err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + "invalid value for prevExist", + ) + } + pe = &bv + } + + // refresh is nullable, so leave it null if not specified + var refresh *bool + if _, ok := r.Form["refresh"]; ok { + bv, err := getBool(r.Form, "refresh") + if err != nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, + "invalid value for refresh", + ) + } + refresh = &bv + if refresh != nil && *refresh { + val := r.FormValue("value") + if _, ok := r.Form["value"]; ok && val != "" { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeRefreshValue, + `A value was provided on a refresh`, + ) + } + if ttl == nil { + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeRefreshTTLRequired, + `No TTL value set`, + ) + } + } + } + + rr := etcdserverpb.Request{ + Method: r.Method, + Path: p, + Val: r.FormValue("value"), + Dir: dir, + PrevValue: pV, + PrevIndex: pIdx, + PrevExist: pe, + Wait: wait, + Since: wIdx, + Recursive: rec, + Sorted: sort, + Quorum: quorum, + Stream: stream, + } + + if pe != nil { + rr.PrevExist = pe + } + + if refresh != nil { + rr.Refresh = refresh + } + + // Null TTL is equivalent to unset Expiration + if ttl != nil { + expr := time.Duration(*ttl) * time.Second + rr.Expiration = clock.Now().Add(expr).UnixNano() + } + + return rr, noValueOnSuccess, nil +} + +// writeKeyEvent trims the prefix of key path in a single Event under +// StoreKeysPrefix, serializes it and writes the resulting JSON to the given +// ResponseWriter, along with the appropriate headers. +func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error { + ev := resp.Event + if ev == nil { + return errors.New("cannot write empty Event") + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex)) + w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) + w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) + + if ev.IsCreated() { + w.WriteHeader(http.StatusCreated) + } + + ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) + if noValueOnSuccess && + (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap || + ev.Action == v2store.Create || ev.Action == v2store.Update) { + ev.NodeExtern = nil + ev.PrevNode = nil + } + return json.NewEncoder(w).Encode(ev) +} + +func writeKeyNoAuth(w http.ResponseWriter) { + e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0) + e.WriteTo(w) +} + +// writeKeyError logs and writes the given Error to the ResponseWriter. +// If Error is not an etcdErr, the error will be converted to an etcd error. +func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *v2error.Error: + e.WriteTo(w) + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost: + if lg != nil { + lg.Warn( + "v2 response error", + zap.String("internal-server-error", err.Error()), + ) + } + default: + if lg != nil { + lg.Warn( + "unexpected v2 response error", + zap.String("internal-server-error", err.Error()), + ) + } + } + ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0) + ee.WriteTo(w) + } +} + +func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) { + wa := resp.Watcher + defer wa.Remove() + ech := wa.EventChan() + var nch <-chan bool + if x, ok := w.(http.CloseNotifier); ok { + nch = x.CloseNotify() + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) + w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) + w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) + w.WriteHeader(http.StatusOK) + + // Ensure headers are flushed early, in case of long polling + w.(http.Flusher).Flush() + + for { + select { + case <-nch: + // Client closed connection. Nothing to do. + return + case <-ctx.Done(): + // Timed out. net/http will close the connection for us, so nothing to do. + return + case ev, ok := <-ech: + if !ok { + // If the channel is closed this may be an indication of + // that notifications are much more than we are able to + // send to the client in time. Then we simply end streaming. + return + } + ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) + if err := json.NewEncoder(w).Encode(ev); err != nil { + // Should never be reached + lg.Warn("failed to encode event", zap.Error(err)) + return + } + if !stream { + return + } + w.(http.Flusher).Flush() + } + } +} + +func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event { + if ev == nil { + return nil + } + // Since the *Event may reference one in the store history + // history, we must copy it before modifying + e := ev.Clone() + trimNodeExternPrefix(e.NodeExtern, prefix) + trimNodeExternPrefix(e.PrevNode, prefix) + return e +} + +func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) { + if n == nil { + return + } + n.Key = strings.TrimPrefix(n.Key, prefix) + for _, nn := range n.ExternNodes { + trimNodeExternPrefix(nn, prefix) + } +} + +func trimErrorPrefix(err error, prefix string) error { + if e, ok := err.(*v2error.Error); ok { + e.Cause = strings.TrimPrefix(e.Cause, prefix) + } + return err +} + +func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { + ctype := r.Header.Get("Content-Type") + semicolonPosition := strings.Index(ctype, ";") + if semicolonPosition != -1 { + ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition])) + } + if ctype != "application/json" { + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) + return false + } + b, err := ioutil.ReadAll(r.Body) + if err != nil { + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) + return false + } + if err := req.UnmarshalJSON(b); err != nil { + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) + return false + } + return true +} + +func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) { + idStr := trimPrefix(p, membersPrefix) + if idStr == "" { + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return 0, false + } + id, err := types.IDFromString(idStr) + if err != nil { + writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) + return 0, false + } + return id, true +} + +// getUint64 extracts a uint64 by the given key from a Form. If the key does +// not exist in the form, 0 is returned. If the key exists but the value is +// badly formed, an error is returned. If multiple values are present only the +// first is considered. +func getUint64(form url.Values, key string) (i uint64, err error) { + if vals, ok := form[key]; ok { + i, err = strconv.ParseUint(vals[0], 10, 64) + } + return +} + +// getBool extracts a bool by the given key from a Form. If the key does not +// exist in the form, false is returned. If the key exists but the value is +// badly formed, an error is returned. If multiple values are present only the +// first is considered. +func getBool(form url.Values, key string) (b bool, err error) { + if vals, ok := form[key]; ok { + b, err = strconv.ParseBool(vals[0]) + } + return +} + +// trimPrefix removes a given prefix and any slash following the prefix +// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == "" +func trimPrefix(p, prefix string) (s string) { + s = strings.TrimPrefix(p, prefix) + s = strings.TrimPrefix(s, "/") + return +} + +func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection { + c := httptypes.MemberCollection(make([]httptypes.Member, len(ms))) + + for i, m := range ms { + c[i] = newMember(m) + } + + return &c +} + +func newMember(m *membership.Member) httptypes.Member { + tm := httptypes.Member{ + ID: m.ID.String(), + Name: m.Name, + PeerURLs: make([]string, len(m.PeerURLs)), + ClientURLs: make([]string, len(m.ClientURLs)), + } + + copy(tm.PeerURLs, m.PeerURLs) + copy(tm.ClientURLs, m.ClientURLs) + + return tm +} diff --git a/etcd/etcdserver/api/v2http/client_auth.go b/etcd/etcdserver/api/v2http/client_auth.go new file mode 100644 index 00000000000..15968a5a0b4 --- /dev/null +++ b/etcd/etcdserver/api/v2http/client_auth.go @@ -0,0 +1,604 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2http + +import ( + "encoding/json" + "net/http" + "path" + "strings" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + + "go.uber.org/zap" +) + +type authHandler struct { + lg *zap.Logger + sec v2auth.Store + cluster api.Cluster + clientCertAuthEnabled bool +} + +func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { + if r.Method == "GET" || r.Method == "HEAD" { + return true + } + return hasRootAccess(lg, sec, r, clientCertAuthEnabled) +} + +func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { + username, password, ok := r.BasicAuth() + if !ok { + lg.Warn("malformed basic auth encoding") + return nil + } + user, err := sec.GetUser(username) + if err != nil { + return nil + } + + ok = sec.CheckPassword(user, password) + if !ok { + lg.Warn("incorrect password", zap.String("user-name", username)) + return nil + } + return &user +} + +func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { + if r.TLS == nil { + return nil + } + + for _, chains := range r.TLS.VerifiedChains { + for _, chain := range chains { + lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName)) + user, err := sec.GetUser(chain.Subject.CommonName) + if err == nil { + lg.Debug( + "authenticated a user via common name", + zap.String("user-name", user.User), + zap.String("common-name", chain.Subject.CommonName), + ) + return &user + } + } + } + return nil +} + +func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { + if sec == nil { + // No store means no auth available, eg, tests. + return true + } + if !sec.AuthEnabled() { + return true + } + + var rootUser *v2auth.User + if r.Header.Get("Authorization") == "" && clientCertAuthEnabled { + rootUser = userFromClientCertificate(lg, sec, r) + if rootUser == nil { + return false + } + } else { + rootUser = userFromBasicAuth(lg, sec, r) + if rootUser == nil { + return false + } + } + + for _, role := range rootUser.Roles { + if role == v2auth.RootRoleName { + return true + } + } + + lg.Warn( + "a user does not have root role for resource", + zap.String("root-user", rootUser.User), + zap.String("root-role-name", v2auth.RootRoleName), + zap.String("resource-path", r.URL.Path), + ) + return false +} + +func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { + if sec == nil { + // No store means no auth available, eg, tests. + return true + } + if !sec.AuthEnabled() { + return true + } + + var user *v2auth.User + if r.Header.Get("Authorization") == "" { + if clientCertAuthEnabled { + user = userFromClientCertificate(lg, sec, r) + } + if user == nil { + return hasGuestAccess(lg, sec, r, key) + } + } else { + user = userFromBasicAuth(lg, sec, r) + if user == nil { + return false + } + } + + writeAccess := r.Method != "GET" && r.Method != "HEAD" + for _, roleName := range user.Roles { + role, err := sec.GetRole(roleName) + if err != nil { + continue + } + if recursive { + if role.HasRecursiveAccess(key, writeAccess) { + return true + } + } else if role.HasKeyAccess(key, writeAccess) { + return true + } + } + + lg.Warn( + "invalid access for user on key", + zap.String("user-name", user.User), + zap.String("key", key), + ) + return false +} + +func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool { + writeAccess := r.Method != "GET" && r.Method != "HEAD" + role, err := sec.GetRole(v2auth.GuestRoleName) + if err != nil { + return false + } + if role.HasKeyAccess(key, writeAccess) { + return true + } + + lg.Warn( + "invalid access for a guest role on key", + zap.String("role-name", v2auth.GuestRoleName), + zap.String("key", key), + ) + return false +} + +func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) { + herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials") + if err := herr.WriteTo(w); err != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.Error(err), + ) + } +} + +func handleAuth(mux *http.ServeMux, sh *authHandler) { + mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles)) + mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles)) + mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers)) + mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers)) + mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable)) +} + +func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) + w.Header().Set("Content-Type", "application/json") + + roles, err := sh.sec.AllRoles() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + if roles == nil { + roles = make([]string, 0) + } + + err = r.ParseForm() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + var rolesCollections struct { + Roles []v2auth.Role `json:"roles"` + } + for _, roleName := range roles { + var role v2auth.Role + role, err = sh.sec.GetRole(roleName) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + rolesCollections.Roles = append(rolesCollections.Roles, role) + } + err = json.NewEncoder(w).Encode(rolesCollections) + + if err != nil { + sh.lg.Warn( + "failed to encode base roles", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + writeError(sh.lg, w, r, err) + return + } +} + +func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) { + subpath := path.Clean(r.URL.Path[len(authPrefix):]) + // Split "/roles/rolename/command". + // First item is an empty string, second is "roles" + pieces := strings.Split(subpath, "/") + if len(pieces) == 2 { + sh.baseRoles(w, r) + return + } + if len(pieces) != 3 { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) + return + } + sh.forRole(w, r, pieces[2]) +} + +func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) { + if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { + return + } + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) + return + } + w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) + w.Header().Set("Content-Type", "application/json") + + switch r.Method { + case "GET": + data, err := sh.sec.GetRole(role) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + err = json.NewEncoder(w).Encode(data) + if err != nil { + sh.lg.Warn( + "failed to encode a role", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + return + } + return + + case "PUT": + var in v2auth.Role + err := json.NewDecoder(r.Body).Decode(&in) + if err != nil { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) + return + } + if in.Role != role { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) + return + } + + var out v2auth.Role + + // create + if in.Grant.IsEmpty() && in.Revoke.IsEmpty() { + err = sh.sec.CreateRole(in) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + w.WriteHeader(http.StatusCreated) + out = in + } else { + if !in.Permissions.IsEmpty() { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) + return + } + out, err = sh.sec.UpdateRole(in) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + w.WriteHeader(http.StatusOK) + } + + err = json.NewEncoder(w).Encode(out) + if err != nil { + sh.lg.Warn( + "failed to encode a role", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + return + } + return + + case "DELETE": + err := sh.sec.DeleteRole(role) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + } +} + +type userWithRoles struct { + User string `json:"user"` + Roles []v2auth.Role `json:"roles,omitempty"` +} + +type usersCollections struct { + Users []userWithRoles `json:"users"` +} + +func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) + return + } + w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) + w.Header().Set("Content-Type", "application/json") + + users, err := sh.sec.AllUsers() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + if users == nil { + users = make([]string, 0) + } + + err = r.ParseForm() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + ucs := usersCollections{} + for _, userName := range users { + var user v2auth.User + user, err = sh.sec.GetUser(userName) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + uwr := userWithRoles{User: user.User} + for _, roleName := range user.Roles { + var role v2auth.Role + role, err = sh.sec.GetRole(roleName) + if err != nil { + continue + } + uwr.Roles = append(uwr.Roles, role) + } + + ucs.Users = append(ucs.Users, uwr) + } + err = json.NewEncoder(w).Encode(ucs) + + if err != nil { + sh.lg.Warn( + "failed to encode users", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + writeError(sh.lg, w, r, err) + return + } +} + +func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) { + subpath := path.Clean(r.URL.Path[len(authPrefix):]) + // Split "/users/username". + // First item is an empty string, second is "users" + pieces := strings.Split(subpath, "/") + if len(pieces) == 2 { + sh.baseUsers(w, r) + return + } + if len(pieces) != 3 { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) + return + } + sh.forUser(w, r, pieces[2]) +} + +func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) { + if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { + return + } + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) + return + } + w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) + w.Header().Set("Content-Type", "application/json") + + switch r.Method { + case "GET": + u, err := sh.sec.GetUser(user) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + err = r.ParseForm() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + uwr := userWithRoles{User: u.User} + for _, roleName := range u.Roles { + var role v2auth.Role + role, err = sh.sec.GetRole(roleName) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + uwr.Roles = append(uwr.Roles, role) + } + err = json.NewEncoder(w).Encode(uwr) + + if err != nil { + sh.lg.Warn( + "failed to encode roles", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + return + } + return + + case "PUT": + var u v2auth.User + err := json.NewDecoder(r.Body).Decode(&u) + if err != nil { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) + return + } + if u.User != user { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) + return + } + + var ( + out v2auth.User + created bool + ) + + if len(u.Grant) == 0 && len(u.Revoke) == 0 { + // create or update + if len(u.Roles) != 0 { + out, err = sh.sec.CreateUser(u) + } else { + // if user passes in both password and roles, we are unsure about his/her + // intention. + out, created, err = sh.sec.CreateOrUpdateUser(u) + } + + if err != nil { + writeError(sh.lg, w, r, err) + return + } + } else { + // update case + if len(u.Roles) != 0 { + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) + return + } + out, err = sh.sec.UpdateUser(u) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + } + + if created { + w.WriteHeader(http.StatusCreated) + } else { + w.WriteHeader(http.StatusOK) + } + + out.Password = "" + + err = json.NewEncoder(w).Encode(out) + if err != nil { + sh.lg.Warn( + "failed to encode a user", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + return + } + return + + case "DELETE": + err := sh.sec.DeleteUser(user) + if err != nil { + writeError(sh.lg, w, r, err) + return + } + } +} + +type enabled struct { + Enabled bool `json:"enabled"` +} + +func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { + return + } + if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) + return + } + w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) + w.Header().Set("Content-Type", "application/json") + isEnabled := sh.sec.AuthEnabled() + switch r.Method { + case "GET": + jsonDict := enabled{isEnabled} + err := json.NewEncoder(w).Encode(jsonDict) + if err != nil { + sh.lg.Warn( + "failed to encode a auth state", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } + + case "PUT": + err := sh.sec.EnableAuth() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + + case "DELETE": + err := sh.sec.DisableAuth() + if err != nil { + writeError(sh.lg, w, r, err) + return + } + } +} diff --git a/etcd/etcdserver/api/v2http/http.go b/etcd/etcdserver/api/v2http/http.go new file mode 100644 index 00000000000..e1480afbff7 --- /dev/null +++ b/etcd/etcdserver/api/v2http/http.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2http + +import ( + "math" + "net/http" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + + "go.uber.org/zap" +) + +const ( + // time to wait for a Watch request + defaultWatchTimeout = time.Duration(math.MaxInt64) +) + +func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + if e, ok := err.(v2auth.Error); ok { + herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) + if et := herr.WriteTo(w); et != nil { + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("v2auth-error", e.Error()), + zap.Error(et), + ) + } + } + return + } + etcdhttp.WriteError(lg, w, r, err) +} + +// allowMethod verifies that the given method is one of the allowed methods, +// and if not, it writes an error to w. A boolean is returned indicating +// whether or not the method is allowed. +func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { + for _, meth := range ms { + if m == meth { + return true + } + } + w.Header().Set("Allow", strings.Join(ms, ",")) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if lg != nil { + lg.Debug( + "handling HTTP request", + zap.String("method", r.Method), + zap.String("request-uri", r.RequestURI), + zap.String("remote-addr", r.RemoteAddr), + ) + } + handler.ServeHTTP(w, r) + }) +} diff --git a/server/etcdserver/api/etcdhttp/types/errors.go b/etcd/etcdserver/api/v2http/httptypes/errors.go similarity index 100% rename from server/etcdserver/api/etcdhttp/types/errors.go rename to etcd/etcdserver/api/v2http/httptypes/errors.go diff --git a/server/etcdserver/api/etcdhttp/types/errors_test.go b/etcd/etcdserver/api/v2http/httptypes/errors_test.go similarity index 100% rename from server/etcdserver/api/etcdhttp/types/errors_test.go rename to etcd/etcdserver/api/v2http/httptypes/errors_test.go diff --git a/etcd/etcdserver/api/v2http/httptypes/member.go b/etcd/etcdserver/api/v2http/httptypes/member.go new file mode 100644 index 00000000000..30c6bb743fd --- /dev/null +++ b/etcd/etcdserver/api/v2http/httptypes/member.go @@ -0,0 +1,69 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httptypes defines how etcd's HTTP API entities are serialized to and +// deserialized from JSON. +package httptypes + +import ( + "encoding/json" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" +) + +type Member struct { + ID string `json:"id"` + Name string `json:"name"` + PeerURLs []string `json:"peerURLs"` + ClientURLs []string `json:"clientURLs"` +} + +type MemberCreateRequest struct { + PeerURLs types.URLs +} + +type MemberUpdateRequest struct { + MemberCreateRequest +} + +func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{} + + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + urls, err := types.NewURLs(s.PeerURLs) + if err != nil { + return err + } + + m.PeerURLs = urls + return nil +} + +type MemberCollection []Member + +func (c *MemberCollection) MarshalJSON() ([]byte, error) { + d := struct { + Members []Member `json:"members"` + }{ + Members: []Member(*c), + } + + return json.Marshal(d) +} diff --git a/etcd/etcdserver/api/v2http/httptypes/member_test.go b/etcd/etcdserver/api/v2http/httptypes/member_test.go new file mode 100644 index 00000000000..3704256a0ad --- /dev/null +++ b/etcd/etcdserver/api/v2http/httptypes/member_test.go @@ -0,0 +1,135 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httptypes + +import ( + "encoding/json" + "net/url" + "reflect" + "testing" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" +) + +func TestMemberUnmarshal(t *testing.T) { + tests := []struct { + body []byte + wantMember Member + wantError bool + }{ + // no URLs, just check ID & Name + { + body: []byte(`{"id": "c", "name": "dungarees"}`), + wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil}, + }, + + // both client and peer URLs + { + body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`), + wantMember: Member{ + PeerURLs: []string{ + "http://127.0.0.1:2379", + }, + ClientURLs: []string{ + "http://127.0.0.1:2379", + }, + }, + }, + + // multiple peer URLs + { + body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), + wantMember: Member{ + PeerURLs: []string{ + "http://127.0.0.1:2379", + "https://example.com", + }, + ClientURLs: nil, + }, + }, + + // multiple client URLs + { + body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`), + wantMember: Member{ + PeerURLs: nil, + ClientURLs: []string{ + "http://127.0.0.1:2379", + "https://example.com", + }, + }, + }, + + // invalid JSON + { + body: []byte(`{"peerU`), + wantError: true, + }, + } + + for i, tt := range tests { + got := Member{} + err := json.Unmarshal(tt.body, &got) + if tt.wantError != (err != nil) { + t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err) + continue + } + + if !reflect.DeepEqual(tt.wantMember, got) { + t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got) + } + } +} + +func TestMemberCreateRequestUnmarshal(t *testing.T) { + body := []byte(`{"peerURLs": ["http://127.0.0.1:8081", "https://127.0.0.1:8080"]}`) + want := MemberCreateRequest{ + PeerURLs: types.URLs([]url.URL{ + {Scheme: "http", Host: "127.0.0.1:8081"}, + {Scheme: "https", Host: "127.0.0.1:8080"}, + }), + } + + var req MemberCreateRequest + if err := json.Unmarshal(body, &req); err != nil { + t.Fatalf("Unmarshal returned unexpected err=%v", err) + } + + if !reflect.DeepEqual(want, req) { + t.Fatalf("Failed to unmarshal MemberCreateRequest: want=%#v, got=%#v", want, req) + } +} + +func TestMemberCreateRequestUnmarshalFail(t *testing.T) { + tests := [][]byte{ + // invalid JSON + []byte(``), + []byte(`{`), + + // spot-check validation done in types.NewURLs + []byte(`{"peerURLs": "foo"}`), + []byte(`{"peerURLs": ["."]}`), + []byte(`{"peerURLs": []}`), + []byte(`{"peerURLs": ["http://127.0.0.1:2379/foo"]}`), + []byte(`{"peerURLs": ["http://127.0.0.1"]}`), + } + + for i, tt := range tests { + var req MemberCreateRequest + if err := json.Unmarshal(tt, &req); err == nil { + t.Errorf("#%d: expected err, got nil", i) + } + } +} diff --git a/etcd/etcdserver/api/v2http/metrics.go b/etcd/etcdserver/api/v2http/metrics.go new file mode 100644 index 00000000000..527b9db6bd9 --- /dev/null +++ b/etcd/etcdserver/api/v2http/metrics.go @@ -0,0 +1,98 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2http + +import ( + "net/http" + "strconv" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + incomingEvents = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "http", + Name: "received_total", + Help: "Counter of requests received into the system (successfully parsed and authd).", + }, []string{"method"}) + + failedEvents = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "http", + Name: "failed_total", + Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).", + }, []string{"method", "code"}) + + successfulEventsHandlingSec = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "http", + Name: "successful_duration_seconds", + Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", + + // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2 + // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec + Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), + }, []string{"method"}) +) + +func init() { + prometheus.MustRegister(incomingEvents) + prometheus.MustRegister(failedEvents) + prometheus.MustRegister(successfulEventsHandlingSec) +} + +func reportRequestReceived(request etcdserverpb.Request) { + incomingEvents.WithLabelValues(methodFromRequest(request)).Inc() +} + +func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) { + method := methodFromRequest(request) + successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) +} + +func reportRequestFailed(request etcdserverpb.Request, err error) { + method := methodFromRequest(request) + failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc() +} + +func methodFromRequest(request etcdserverpb.Request) string { + if request.Method == "GET" && request.Quorum { + return "QGET" + } + return request.Method +} + +func codeFromError(err error) int { + if err == nil { + return http.StatusInternalServerError + } + switch e := err.(type) { + case *v2error.Error: + return e.StatusCode() + case *httptypes.HTTPError: + return e.Code + default: + return http.StatusInternalServerError + } +} diff --git a/etcd/etcdserver/api/v2http/testdata/ca.pem b/etcd/etcdserver/api/v2http/testdata/ca.pem new file mode 100644 index 00000000000..60cbee3bb4b --- /dev/null +++ b/etcd/etcdserver/api/v2http/testdata/ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEjCCAfqgAwIBAgIIYpX+8HgWGfkwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE +AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA1MDBaFw0yMDExMjIwMzA1MDBaMBUx +EzARBgNVBAMTCmV0Y2QgdGVzdHMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDa9PkwEwiBD8mB+VIKz5r5gRHnNF4Icj6T6R/RsdatecQe6vU0EU4FXtKZ +drWnCGlATyrQooqHpb+rDc7CUt3mXrIxrNkcGTMaesF7P0GWxVkyOGSjJMxGBv3e +bAZknBe4eLMi68L1aT/uYmxcp/B3L2mfdFtc1Gd6mYJpNm1PgilRyIrO0mY5ysIX +4WHCa3yudAv8HrFbQcw7l7OyKA6uSWg6h07lE3d5jw5YOly+hz0iaRtzhb4tJrYD +Lm1tehb0nnoLuW6yYblRSoyBVDT50MFVlyvW40Po5WkOXw/wnsnyxWRR4yqU23wq +quQU0HXJEBLFnT+KbLOQ0EAE35vXAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSbUCGB95ochDrbEZlzGGYuA7xu +xjAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlzGGYuA7xuxjANBgkqhkiG9w0BAQsF +AAOCAQEAardO/SGCu7Snz3YRBUinzpZEUFTFend+FJtBkxBXCao1RvTXg8PBMkza +LUsaR4mLsGoXLIbNCoIinvVG0QULYCZe11N3l1L0G2g5uhEM4MfJ2rwrMD0o17i+ +nwNRRE3tfKAlWhYQg+4ye36kQVxASPniHjdQgjKYUFTNXdyG6DzuAclaVte9iVw6 +cWl61fB2CZya3+uMtih8t/Kgl2KbMO2PvNByfnDjKmW+v58qHbXyoJZqnpvDn14+ +p2Ox+AvvxYiEiUIvFdWy101QB7NJMCtdwq6oG6OvIOgXzLgitTFSq4kfWDfupQjW +iFoQ+vWmYhK5ld0nBaiz+JmHuemK7A== +-----END CERTIFICATE----- diff --git a/etcd/etcdserver/api/v2http/testdata/otheruser.pem b/etcd/etcdserver/api/v2http/testdata/otheruser.pem new file mode 100644 index 00000000000..d0c74eb9f8d --- /dev/null +++ b/etcd/etcdserver/api/v2http/testdata/otheruser.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDOTCCAiGgAwIBAgIINYpsso1f3SswDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE +AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMBQx +EjAQBgNVBAMTCW90aGVydXNlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAPOAUa5GblwIjHTEnox2c/Am9jV1TMvzBuVXxnp2UnNHMNwstAooFrEs/Z+d +ft5AOsooP6zVuM3eBQa4i9huJbVNDfPU2H94yA89jYfJYUgo7C838V6NjGsCCptQ +WzkKPNlDbT9xA/7XpIUJ2WltuYDRrjWq8pXQONqTjcg5n4l0JO8xdHJHRUkFQ76F +1npXeLndgGaP11lqzpYlglEGi5URhzAT1xxQ0hLSe8WNmiCxxkq++C8Gx4sPg9mX +M94aoJDzZSnoaqDxckbP/7Q0ZKe/fVdCFkd5+jqT4Mt7hwmz9jTCHcVnAz4EKI+t +rbWgbCfMK6013GotXz7InStVe+MCAwEAAaOBjTCBijAOBgNVHQ8BAf8EBAMCBaAw +HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD +VR0OBBYEFFwMmf+pnaejmri6y1T+lfU+MBq/MB8GA1UdIwQYMBaAFJtQIYH3mhyE +OtsRmXMYZi4DvG7GMAsGA1UdEQQEMAKCADANBgkqhkiG9w0BAQsFAAOCAQEACOn6 +mec29MTMGPt/EPOmSyhvTKSwH+5YWjCbyUFeoB8puxrJlIphK4mvT+sXp2wzno89 +FVCliO/rJurdErKvyOjlK1QrVGPYIt7Wz9ssAfvlwCyBM8PqgEG8dJN9aAkf2h4r +Ye+hBh1y6Nnataf7lxe9mqAOvD/7wVIgzjCnMD1q5QSY2Mln3HwVQXtbZFbY363Z +X9Fk3PUpjJSX9jbEz9kIlT8AJAdxl6GB8Z9B8PrA8qf4Bhk15ICRHxb67EhDrGWV +8q7ArU2XBqs/+GWpUIMoGKNZv+K+/SksZK1KnzaUvApUCJzt+ac+p8HOgMdvDRgr +GfVVJqcZgyEmeczy0A== +-----END CERTIFICATE----- diff --git a/etcd/etcdserver/api/v2http/testdata/user.pem b/etcd/etcdserver/api/v2http/testdata/user.pem new file mode 100644 index 00000000000..0fc2108651b --- /dev/null +++ b/etcd/etcdserver/api/v2http/testdata/user.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNDCCAhygAwIBAgIIcQ0DAfgevocwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE +AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMA8x +DTALBgNVBAMTBHVzZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0 ++3Lm1SmUJJLufaFTYz+e5qyQEshNRyeAhXIeZ1aw+yBjslXGZQ3/uGOwnOnGqUeA +Nidc9ty4NsK6RVppHlezUrBnpl4hws8vHWFKZpU2R6kKL8EYLmg+iVqEBj7XqfAp +8bJqqZI3KOqLXpRH55mA69KP7VEK9ngTVR/tERSrUPT8jcjwbvhSOqD8Qk07BUDR +6RpDr94Mnaf+fMGG36Sh7iUl+i4Oh6FFar+7+b0+5Bhs2/6uVsK4A1Z3jqqfSQH8 +q8Wf5h9Ka4aqGSw4ia5G3Uw7Jsl2aDgpJ7uwJo1k8SclbMYnYdhZuo+U+esY/Fai +YdbjG+AroZ+y9TB8bMlHAgMBAAGjgY0wgYowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud +JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBRuTt0lJIVKYaz76aSxl/MQOLRwfDAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlz +GGYuA7xuxjALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBABLRWZm+Lgjs +c5qDXbgOJW2pR630syY8ixR9c6HvzPVJim8mFioMX+xrlbOC6BmOUlOb9j83bTKn +aOg/0xlpxNbd8QYzgRxZmHZLULPdiNeeRvIzsrzrH88+inrmZhRXRVcHjdO6CG6t +hCdDdRiNU6GkF7dPna0xNcEOKe2wUfzd1ZtKOqzi1w+fKjSeMplZomeWgP4WRvkh +JJ/0ujlMMckgyTxRh8EEaJ35OnpXX7EdipoWhOMmiUnlPqye2icC8Y+CMdZsrod6 +nkoEQnXDCLf/Iv0qj7B9iKbxn7t3QDVxY4UILUReDuD8yrGULlGOl//aY/T3pkZ6 +R5trduZhI3o= +-----END CERTIFICATE----- diff --git a/server/etcdserver/api/v2stats/leader.go b/etcd/etcdserver/api/v2stats/leader.go similarity index 100% rename from server/etcdserver/api/v2stats/leader.go rename to etcd/etcdserver/api/v2stats/leader.go diff --git a/etcd/etcdserver/api/v2stats/queue.go b/etcd/etcdserver/api/v2stats/queue.go new file mode 100644 index 00000000000..60b7342fbf1 --- /dev/null +++ b/etcd/etcdserver/api/v2stats/queue.go @@ -0,0 +1,108 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2stats + +import ( + "sync" + "time" +) + +const ( + queueCapacity = 200 +) + +// RequestStats represent the stats for a request. +// It encapsulates the sending time and the size of the request. +type RequestStats struct { + SendingTime time.Time + Size int +} + +type statsQueue struct { + items [queueCapacity]*RequestStats + size int + front int + back int + totalReqSize int + rwl sync.RWMutex +} + +func (q *statsQueue) Len() int { + return q.size +} + +func (q *statsQueue) ReqSize() int { + return q.totalReqSize +} + +// FrontAndBack gets the front and back elements in the queue +// We must grab front and back together with the protection of the lock +func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) { + q.rwl.RLock() + defer q.rwl.RUnlock() + if q.size != 0 { + return q.items[q.front], q.items[q.back] + } + return nil, nil +} + +func (q *statsQueue) Insert(p *RequestStats) { + q.rwl.Lock() + defer q.rwl.Unlock() + + q.back = (q.back + 1) % queueCapacity // 200 + + if q.size == queueCapacity { // dequeue + q.totalReqSize -= q.items[q.front].Size + q.front = (q.back + 1) % queueCapacity + } else { + q.size++ + } + + q.items[q.back] = p + q.totalReqSize += q.items[q.back].Size +} + +// Rate function returns the package rate and byte rate +func (q *statsQueue) Rate() (float64, float64) { + front, back := q.frontAndBack() + + if front == nil || back == nil { + return 0, 0 + } + + if time.Since(back.SendingTime) > time.Second { + q.Clear() + return 0, 0 + } + + sampleDuration := back.SendingTime.Sub(front.SendingTime) + + pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second) + + br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second) + + return pr, br +} + +// Clear function clear up the statsQueue +func (q *statsQueue) Clear() { + q.rwl.Lock() + defer q.rwl.Unlock() + q.back = -1 + q.front = 0 + q.size = 0 + q.totalReqSize = 0 +} diff --git a/etcd/etcdserver/api/v2stats/server.go b/etcd/etcdserver/api/v2stats/server.go new file mode 100644 index 00000000000..8ef6b075a70 --- /dev/null +++ b/etcd/etcdserver/api/v2stats/server.go @@ -0,0 +1,134 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2stats + +import ( + "encoding/json" + "log" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" +) + +// ServerStats 封装了关于EtcdServer及其与集群其他成员通信的各种统计信息 +type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { + Name string `json:"name"` // 该节点的name . + ID string `json:"id"` // 每个节点的唯一标识符. + State raft.StateType `json:"state"` // 该节点在Raft 协议里的角色, Leader 或Follower . + StartTime time.Time `json:"startTime"` // 该etcd server 的启动时间. + LeaderInfo struct { // + Name string `json:"leader"` // + Uptime string `json:"uptime"` // 集群当前Leader 的在任时长. + StartTime time.Time `json:"startTime"` // leader首次通信的时间 + } `json:"leaderInfo"` // + sendRateQueue *statsQueue // 发送消息的队列 + SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"` // 该节点已发送的append 请求数. + SendingPkgRate float64 `json:"sendPkgRate,omitempty"` // 该节点每秒发送的请求数( 只有Follower 才有, 并且单 节点集群没有这项数据) . + SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"` // 该节点每秒发送的字节(只有Follower 才有,且单节点集群没有这项数据) . + recvRateQueue *statsQueue // 处理接受消息的队列 + RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` // 该节点己处理的append 请求数. + RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` // 该节点每秒收到的请求数(只有Follower 才有) . + RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` // 该节点每秒收到的字节(只有Follower 才有) . +} + +func (ss *ServerStats) JSON() []byte { + ss.Lock() + stats := ss.serverStats + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() + stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() + ss.Unlock() + b, err := json.Marshal(stats) + // TODO(jonboulle): appropriate error handling? + if err != nil { + log.Printf("stats: error marshalling etcd stats: %v", err) + } + return b +} + +// RecvAppendReq 在收到来自leader的AppendRequest后,更新ServerStats. +func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { + ss.Lock() + defer ss.Unlock() + + now := time.Now() + + ss.State = raft.StateFollower + if leader != ss.LeaderInfo.Name { + ss.LeaderInfo.Name = leader + ss.LeaderInfo.StartTime = now + } + + ss.recvRateQueue.Insert( + &RequestStats{ + SendingTime: now, + Size: reqSize, + }, + ) + ss.RecvAppendRequestCnt++ +} + +// SendAppendReq updates the ServerStats in response to an AppendRequest +// being sent by this etcd +func (ss *ServerStats) SendAppendReq(reqSize int) { + ss.Lock() + defer ss.Unlock() + + ss.becomeLeader() + + ss.sendRateQueue.Insert( + &RequestStats{ + SendingTime: time.Now(), + Size: reqSize, + }, + ) + + ss.SendAppendRequestCnt++ +} + +func (ss *ServerStats) BecomeLeader() { + ss.Lock() + defer ss.Unlock() + ss.becomeLeader() +} + +func (ss *ServerStats) becomeLeader() { + if ss.State != raft.StateLeader { + ss.State = raft.StateLeader + ss.LeaderInfo.Name = ss.ID + ss.LeaderInfo.StartTime = time.Now() + } +} diff --git a/etcd/etcdserver/api/v2stats/stats.go b/etcd/etcdserver/api/v2stats/stats.go new file mode 100644 index 00000000000..20d7dd0e115 --- /dev/null +++ b/etcd/etcdserver/api/v2stats/stats.go @@ -0,0 +1,26 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2stats defines a standard interface for etcd cluster statistics. +package v2stats + +type Stats interface { + // SelfStats returns the struct representing statistics of this etcd + SelfStats() []byte + // LeaderStats returns the statistics of all followers in the cluster + // if this etcd is leader. Otherwise, nil is returned. + LeaderStats() []byte + // StoreStats returns statistics of the store backing this EtcdServer + StoreStats() []byte +} diff --git a/etcd/etcdserver/api/v2store/event.go b/etcd/etcdserver/api/v2store/event.go new file mode 100644 index 00000000000..34f9a2df70a --- /dev/null +++ b/etcd/etcdserver/api/v2store/event.go @@ -0,0 +1,72 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +const ( + Get = "get" + Create = "create" + Set = "set" + Update = "update" + Delete = "delete" + CompareAndSwap = "compareAndSwap" + CompareAndDelete = "compareAndDelete" + Expire = "expire" +) + +type Event struct { + Action string `json:"action"` + NodeExtern *NodeExtern `json:"node,omitempty"` + PrevNode *NodeExtern `json:"prevNode,omitempty"` + EtcdIndex uint64 `json:"-"` + Refresh bool `json:"refresh,omitempty"` +} + +// 节点变更事件、包括节点的创建、删除... +func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event { + n := &NodeExtern{ + Key: key, + ModifiedIndex: modifiedIndex, + CreatedIndex: createdIndex, + } + + return &Event{ + Action: action, + NodeExtern: n, + } +} + +func (e *Event) IsCreated() bool { + if e.Action == Create { + return true + } + return e.Action == Set && e.PrevNode == nil +} + +func (e *Event) Index() uint64 { + return e.NodeExtern.ModifiedIndex +} + +func (e *Event) Clone() *Event { + return &Event{ + Action: e.Action, + EtcdIndex: e.EtcdIndex, + NodeExtern: e.NodeExtern.Clone(), + PrevNode: e.PrevNode.Clone(), + } +} + +func (e *Event) SetRefresh() { + e.Refresh = true +} diff --git a/server/etcdserver/api/v2store/event_history.go b/etcd/etcdserver/api/v2store/event_history.go similarity index 94% rename from server/etcdserver/api/v2store/event_history.go rename to etcd/etcdserver/api/v2store/event_history.go index c9bcdca0513..f43638b630a 100644 --- a/server/etcdserver/api/v2store/event_history.go +++ b/etcd/etcdserver/api/v2store/event_history.go @@ -20,9 +20,10 @@ import ( "strings" "sync" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" ) +// EventHistory 历史事件 type EventHistory struct { Queue eventQueue StartIndex uint64 @@ -79,7 +80,7 @@ func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, e := eh.Queue.Events[i] if !e.Refresh { - ok := e.Node.Key == key + ok := e.NodeExtern.Key == key if recursive { // add tailing slash @@ -88,7 +89,7 @@ func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, nkey = nkey + "/" } - ok = ok || strings.HasPrefix(e.Node.Key, nkey) + ok = ok || strings.HasPrefix(e.NodeExtern.Key, nkey) } if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir { @@ -125,5 +126,4 @@ func (eh *EventHistory) clone() *EventHistory { Queue: clonedQueue, LastIndex: eh.LastIndex, } - } diff --git a/server/etcdserver/api/v2store/event_queue.go b/etcd/etcdserver/api/v2store/event_queue.go similarity index 95% rename from server/etcdserver/api/v2store/event_queue.go rename to etcd/etcdserver/api/v2store/event_queue.go index 7ea03de8c9a..aa2a645d6ff 100644 --- a/server/etcdserver/api/v2store/event_queue.go +++ b/etcd/etcdserver/api/v2store/event_queue.go @@ -26,7 +26,7 @@ func (eq *eventQueue) insert(e *Event) { eq.Events[eq.Back] = e eq.Back = (eq.Back + 1) % eq.Capacity - if eq.Size == eq.Capacity { //dequeue + if eq.Size == eq.Capacity { // dequeue eq.Front = (eq.Front + 1) % eq.Capacity } else { eq.Size++ diff --git a/etcd/etcdserver/api/v2store/node.go b/etcd/etcdserver/api/v2store/node.go new file mode 100644 index 00000000000..67d44b638d6 --- /dev/null +++ b/etcd/etcdserver/api/v2store/node.go @@ -0,0 +1,359 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +import ( + "path" + "sort" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + + "github.com/jonboulle/clockwork" +) + +// 对比函数结果的说明 +const ( + CompareMatch = iota // 匹配 + CompareIndexNotMatch // 索引不匹配 + CompareValueNotMatch // 值不匹配 + CompareNotMatch // 不匹配 +) + +var Permanent time.Time // 永久性时间,默认零值 + +// node is the basic element in the store system. +// A key-value pair will have a string value +// A directory will have a children map +type node struct { + Path string + CreatedIndex uint64 + ModifiedIndex uint64 + Parent *node `json:"-"` // 不应该对这个字段进行编码!避免循环依赖. + ExpireTime time.Time + Value string // 键值对 + Children map[string]*node // 目录 + store *store // 对该节点所连接的商店的引用. +} + +// newKV creates a Key-Value pair +func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node { + return &node{ + Path: nodePath, + CreatedIndex: createdIndex, + ModifiedIndex: createdIndex, + Parent: parent, + store: store, + ExpireTime: expireTime, + Value: value, + } +} + +// Write function set the value of the node to the given value. +// If the receiver node is a directory, a "Not A File" error will be returned. +func (n *node) Write(value string, index uint64) *v2error.Error { + if n.IsDir() { + return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) + } + + n.Value = value + n.ModifiedIndex = index + + return nil +} + +// Remove 清理node包含的子数据 +func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error { + if !n.IsDir() { // key-value pair + _, name := path.Split(n.Path) + + // find its parent and remove the node from the map + if n.Parent != nil && n.Parent.Children[name] == n { + delete(n.Parent.Children, name) + } + + if callback != nil { + callback(n.Path) + } + + if !n.IsPermanent() { + n.store.ttlKeyHeap.remove(n) + } + + return nil + } + + if !dir { + // cannot delete a directory without dir set to true + return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex) + } + + if len(n.Children) != 0 && !recursive { + // cannot delete a directory if it is not empty and the operation + // is not recursive + return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) + } + + for _, child := range n.Children { // delete all children + child.Remove(true, true, callback) + } + + // delete self + _, name := path.Split(n.Path) + if n.Parent != nil && n.Parent.Children[name] == n { + delete(n.Parent.Children, name) + + if callback != nil { + callback(n.Path) + } + + if !n.IsPermanent() { + n.store.ttlKeyHeap.remove(n) + } + } + + return nil +} + +func (n *node) UpdateTTL(expireTime time.Time) { + if !n.IsPermanent() { + if expireTime.IsZero() { + // from ttl to permanent + n.ExpireTime = expireTime + // remove from ttl heap + n.store.ttlKeyHeap.remove(n) + return + } + + // update ttl + n.ExpireTime = expireTime + // update ttl heap + n.store.ttlKeyHeap.update(n) + return + } + + if expireTime.IsZero() { + return + } + + // from permanent to ttl + n.ExpireTime = expireTime + // push into ttl heap + n.store.ttlKeyHeap.push(n) +} + +// Compare function compares node index and value with provided ones. +// second result value explains result and equals to one of Compare.. constants +func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { + indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex + valueMatch := prevValue == "" || n.Value == prevValue + ok = valueMatch && indexMatch + switch { + case valueMatch && indexMatch: + which = CompareMatch + case indexMatch && !valueMatch: + which = CompareValueNotMatch + case valueMatch && !indexMatch: + which = CompareIndexNotMatch + default: + which = CompareNotMatch + } + return ok, which +} + +// Clone function clone the node recursively and return the new node. +// If the node is a directory, it will clone all the content under this directory. +// If the node is a key-value pair, it will clone the pair. +func (n *node) Clone() *node { + if !n.IsDir() { + newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime) + newkv.ModifiedIndex = n.ModifiedIndex + return newkv + } + + clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime) + clone.ModifiedIndex = n.ModifiedIndex + + for key, child := range n.Children { + clone.Children[key] = child.Clone() + } + + return clone +} + +// recoverAndclean function help to do recovery. +// Two things need to be done: 1. recovery structure; 2. delete expired nodes +// +// If the node is a directory, it will help recover children's parent pointer and recursively +// call this function on its children. +// We check the expire last since we need to recover the whole structure first and add all the +// notifications into the event history. +func (n *node) recoverAndclean() { + if n.IsDir() { + for _, child := range n.Children { + child.Parent = n + child.store = n.store + child.recoverAndclean() + } + } + + if !n.ExpireTime.IsZero() { + n.store.ttlKeyHeap.push(n) + } +} + +// -------------------------------------- OVER ----------------------------------------------- + +// List 返回当前节点下的所有节点 +func (n *node) List() ([]*node, *v2error.Error) { + if !n.IsDir() { + return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) + } + + nodes := make([]*node, len(n.Children)) + + i := 0 + for _, node := range n.Children { + nodes[i] = node + i++ + } + + return nodes, nil +} + +// GetChild 返回目录节点 +func (n *node) GetChild(name string) (*node, *v2error.Error) { + if !n.IsDir() { + return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex) + } + + child, ok := n.Children[name] + + if ok { + return child, nil + } + + return nil, nil +} + +// Add 添加一个子节点 +func (n *node) Add(child *node) *v2error.Error { + if !n.IsDir() { // /0/members/8e9e05c52164694d + return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) + } + + _, name := path.Split(child.Path) + + if _, ok := n.Children[name]; ok { + return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex) + } + + n.Children[name] = child + + return nil +} + +// Repr 递归的 封信过期时间、剩余时间,跳过隐藏节点 +func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern { + if n.IsDir() { + node := &NodeExtern{ + Key: n.Path, + Dir: true, + ModifiedIndex: n.ModifiedIndex, + CreatedIndex: n.CreatedIndex, + } + node.Expiration, node.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL + if !recursive { + return node + } + children, _ := n.List() + node.ExternNodes = make(NodeExterns, len(children)) + i := 0 + for _, child := range children { + if child.IsHidden() { // 跳过隐藏节点 + continue + } + node.ExternNodes[i] = child.Repr(recursive, sorted, clock) + i++ + } + node.ExternNodes = node.ExternNodes[:i] + if sorted { + sort.Sort(node.ExternNodes) + } + return node + } + + // since n.Value could be changed later, so we need to copy the value out + value := n.Value + node := &NodeExtern{ + Key: n.Path, + Value: &value, + ModifiedIndex: n.ModifiedIndex, + CreatedIndex: n.CreatedIndex, + } + node.Expiration, node.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL + return node +} + +// 过期时间和 剩余时间 TTL +func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) { + if !n.IsPermanent() { + ttlN := n.ExpireTime.Sub(clock.Now()) // 还有多长时间过期 + ttl := ttlN / time.Second + if (ttlN % time.Second) > 0 { + ttl++ // 整除+1 + } + t := n.ExpireTime.UTC() + return &t, int64(ttl) + } + return nil, 0 +} + +// newDir 创建一个目录 +func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node { + return &node{ + Path: nodePath, + CreatedIndex: createdIndex, + ModifiedIndex: createdIndex, + Parent: parent, + ExpireTime: expireTime, + Children: make(map[string]*node), + store: store, + } +} + +// IsHidden 判断节点名字是不是以 _ 开头 /0/members/_hidden +func (n *node) IsHidden() bool { + _, name := path.Split(n.Path) + return name[0] == '_' +} + +// IsPermanent 函数检查该节点是否为永久节点. +func (n *node) IsPermanent() bool { + // 我们使用一个未初始化的time.Time来表示该节点是一个永久的节点. 未初始化的time.Time应该等于0. + return n.ExpireTime.IsZero() +} + +func (n *node) IsDir() bool { + return n.Children != nil +} + +func (n *node) Read() (string, *v2error.Error) { + if n.IsDir() { + return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) + } + + return n.Value, nil +} diff --git a/etcd/etcdserver/api/v2store/over_node_extern.go b/etcd/etcdserver/api/v2store/over_node_extern.go new file mode 100644 index 00000000000..4f045282060 --- /dev/null +++ b/etcd/etcdserver/api/v2store/over_node_extern.go @@ -0,0 +1,112 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +import ( + "sort" + "time" + + "github.com/jonboulle/clockwork" +) + +var _ node + +// NodeExtern 是内部节点的外部表示,带有附加字段 PrevValue是节点的前一个值 TTL是生存时间,以秒为单位 +type NodeExtern struct { + Key string `json:"key,omitempty"` // /0/members/8e9e05c52164694d/raftAttributes + Value *string `json:"value,omitempty"` + Dir bool `json:"dir,omitempty"` + Expiration *time.Time `json:"expiration,omitempty"` + TTL int64 `json:"ttl,omitempty"` + ExternNodes NodeExterns `json:"nodes,omitempty"` + ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` + CreatedIndex uint64 `json:"createdIndex,omitempty"` +} + +// &v2store.NodeExtern{Key: "/1234", ExternNodes: []*v2store.NodeExtern{ +// {Key: "/1234/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)}, +// {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, +// }} + +// 加载node,主要是获取node中数据 n: /0/members +func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) { + if n.IsDir() { + eNode.Dir = true + children, _ := n.List() + eNode.ExternNodes = make(NodeExterns, len(children)) + // 我们不直接使用子片中的索引,我们需要跳过隐藏的node. + i := 0 + for _, child := range children { + if child.IsHidden() { + continue + } + eNode.ExternNodes[i] = child.Repr(recursive, sorted, clock) + i++ + } + // 消除隐藏节点 + eNode.ExternNodes = eNode.ExternNodes[:i] + if sorted { + sort.Sort(eNode.ExternNodes) + } + } else { + value, _ := n.Read() + eNode.Value = &value + } + eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL +} + +func (eNode *NodeExtern) Clone() *NodeExtern { + if eNode == nil { + return nil + } + nn := &NodeExtern{ + Key: eNode.Key, + Dir: eNode.Dir, + TTL: eNode.TTL, + ModifiedIndex: eNode.ModifiedIndex, + CreatedIndex: eNode.CreatedIndex, + } + if eNode.Value != nil { + s := *eNode.Value + nn.Value = &s + } + if eNode.Expiration != nil { + t := *eNode.Expiration + nn.Expiration = &t + } + if eNode.ExternNodes != nil { + nn.ExternNodes = make(NodeExterns, len(eNode.ExternNodes)) + for i, n := range eNode.ExternNodes { + nn.ExternNodes[i] = n.Clone() + } + } + return nn +} + +type NodeExterns []*NodeExtern + +// interfaces for sorting + +func (ns NodeExterns) Len() int { + return len(ns) +} + +func (ns NodeExterns) Less(i, j int) bool { + return ns[i].Key < ns[j].Key +} + +func (ns NodeExterns) Swap(i, j int) { + ns[i], ns[j] = ns[j], ns[i] +} diff --git a/etcd/etcdserver/api/v2store/over_watcher.go b/etcd/etcdserver/api/v2store/over_watcher.go new file mode 100644 index 00000000000..1e51c908e77 --- /dev/null +++ b/etcd/etcdserver/api/v2store/over_watcher.go @@ -0,0 +1,79 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +type Watcher interface { + EventChan() chan *Event + StartIndex() uint64 // watch创建时的EtcdIndex + Remove() +} + +type watcher struct { + eventChan chan *Event // 注册之后,通过这个chan返回给监听者 + stream bool // 是否是流观察、还是一次性观察 + recursive bool // 是否是递归 + sinceIndex uint64 // 从那个索引之后开始监听 + startIndex uint64 + hub *watcherHub + removed bool // 是否移除 + remove func() // 移除时,回调函数 +} + +func (w *watcher) EventChan() chan *Event { + return w.eventChan +} + +func (w *watcher) StartIndex() uint64 { + return w.startIndex +} + +// notify 函数通知观察者.如果观察者在给定的路径中感兴趣,该函数将返回true. +func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool { + // originalPath 对应 1deleted 对应 3 + + // 观察者在三种情况下和一个条件下对路径感兴趣,该条件是事件发生在观察者的sinceIndex之后. + // 1.事件发生的路径是观察者正在观察的路径.例如,如果观察者在"/foo "观察,而事件发生在"/foo",观察者必须是对该事件感兴趣. + // 2.观察者是一个递归观察者,它对其观察路径之后发生的事件感兴趣.例如,如果观察者A在"/foo "处观察,并且它是一个递归观察者,它将对发生在"/foo/bar "的事件感兴趣. + // 3.当我们删除一个目录时,我们需要强制通知所有在我们需要删除的文件处观察的观察者.例如,一个观察者正在观察"/foo/bar".而我们删除了"/foo".即使"/foo "不是它正在监听的路径,该监听者也应该得到通知. + if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex { + // 如果eventChan的容量已满,我们就不能在这里进行阻塞,否则etcd会挂起.当通知的速率高于我们的发送速率时,eventChan的容量就满了.如果发生这种情况,我们会关闭该通道. + select { + case w.eventChan <- e: + default: + w.remove() // 移除、关闭chan + } + return true + } + return false +} + +// Remove 移除监听者 +func (w *watcher) Remove() { + w.hub.mutex.Lock() + defer w.hub.mutex.Unlock() + + close(w.eventChan) + if w.remove != nil { + w.remove() + } +} + +// nopWatcher is a watcher that receives nothing, always blocking. +type nopWatcher struct{} + +func NewNopWatcher() Watcher { return &nopWatcher{} } +func (w *nopWatcher) EventChan() chan *Event { return nil } +func (w *nopWatcher) StartIndex() uint64 { return 0 } +func (w *nopWatcher) Remove() {} diff --git a/etcd/etcdserver/api/v2store/stats.go b/etcd/etcdserver/api/v2store/stats.go new file mode 100644 index 00000000000..10e5392a23c --- /dev/null +++ b/etcd/etcdserver/api/v2store/stats.go @@ -0,0 +1,124 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +import ( + "encoding/json" + "sync/atomic" +) + +const ( + SetSuccess = iota + SetFail + DeleteSuccess + DeleteFail + CreateSuccess + CreateFail + UpdateSuccess + UpdateFail + CompareAndSwapSuccess + CompareAndSwapFail + GetSuccess + GetFail + ExpireCount + CompareAndDeleteSuccess + CompareAndDeleteFail +) + +// 请求状态记录 +type Stats struct { + GetSuccess uint64 `json:"getsSuccess"` // 获取请求的数量 + GetFail uint64 `json:"getsFail"` + SetSuccess uint64 `json:"setsSuccess"` // set 请求数 + SetFail uint64 `json:"setsFail"` + DeleteSuccess uint64 `json:"deleteSuccess"` // delete 请求数 + DeleteFail uint64 `json:"deleteFail"` + UpdateSuccess uint64 `json:"updateSuccess"` // update请求数 + UpdateFail uint64 `json:"updateFail"` + CreateSuccess uint64 `json:"createSuccess"` // create请求数 + CreateFail uint64 `json:"createFail"` + CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"` // testAndSet 请求数 + CompareAndSwapFail uint64 `json:"compareAndSwapFail"` + CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"` // compareAndDelete请求数 + CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"` + ExpireCount uint64 `json:"expireCount"` + Watchers uint64 `json:"watchers"` +} + +func newStats() *Stats { + s := new(Stats) + return s +} + +func (s *Stats) clone() *Stats { + return &Stats{ + GetSuccess: atomic.LoadUint64(&s.GetSuccess), + GetFail: atomic.LoadUint64(&s.GetFail), + SetSuccess: atomic.LoadUint64(&s.SetSuccess), + SetFail: atomic.LoadUint64(&s.SetFail), + DeleteSuccess: atomic.LoadUint64(&s.DeleteSuccess), + DeleteFail: atomic.LoadUint64(&s.DeleteFail), + UpdateSuccess: atomic.LoadUint64(&s.UpdateSuccess), + UpdateFail: atomic.LoadUint64(&s.UpdateFail), + CreateSuccess: atomic.LoadUint64(&s.CreateSuccess), + CreateFail: atomic.LoadUint64(&s.CreateFail), + CompareAndSwapSuccess: atomic.LoadUint64(&s.CompareAndSwapSuccess), + CompareAndSwapFail: atomic.LoadUint64(&s.CompareAndSwapFail), + CompareAndDeleteSuccess: atomic.LoadUint64(&s.CompareAndDeleteSuccess), + CompareAndDeleteFail: atomic.LoadUint64(&s.CompareAndDeleteFail), + ExpireCount: atomic.LoadUint64(&s.ExpireCount), + Watchers: atomic.LoadUint64(&s.Watchers), + } +} + +func (s *Stats) toJson() []byte { + b, _ := json.Marshal(s) + return b +} + +func (s *Stats) Inc(field int) { + switch field { + case SetSuccess: + atomic.AddUint64(&s.SetSuccess, 1) + case SetFail: + atomic.AddUint64(&s.SetFail, 1) + case CreateSuccess: + atomic.AddUint64(&s.CreateSuccess, 1) + case CreateFail: + atomic.AddUint64(&s.CreateFail, 1) + case DeleteSuccess: + atomic.AddUint64(&s.DeleteSuccess, 1) + case DeleteFail: + atomic.AddUint64(&s.DeleteFail, 1) + case GetSuccess: + atomic.AddUint64(&s.GetSuccess, 1) + case GetFail: + atomic.AddUint64(&s.GetFail, 1) + case UpdateSuccess: + atomic.AddUint64(&s.UpdateSuccess, 1) + case UpdateFail: + atomic.AddUint64(&s.UpdateFail, 1) + case CompareAndSwapSuccess: + atomic.AddUint64(&s.CompareAndSwapSuccess, 1) + case CompareAndSwapFail: + atomic.AddUint64(&s.CompareAndSwapFail, 1) + case CompareAndDeleteSuccess: + atomic.AddUint64(&s.CompareAndDeleteSuccess, 1) + case CompareAndDeleteFail: + atomic.AddUint64(&s.CompareAndDeleteFail, 1) + case ExpireCount: + atomic.AddUint64(&s.ExpireCount, 1) + } +} diff --git a/etcd/etcdserver/api/v2store/store.go b/etcd/etcdserver/api/v2store/store.go new file mode 100644 index 00000000000..dc3300fc0b5 --- /dev/null +++ b/etcd/etcdserver/api/v2store/store.go @@ -0,0 +1,741 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +import ( + "encoding/json" + "fmt" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/jonboulle/clockwork" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" +) + +// The 当store第一次被初始化时,要设置的默认版本. +const defaultVersion = 2 + +var minExpireTime time.Time + +func init() { + minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") +} + +// Store Etcd是存储有如下特点: +// 1、采用kv型数据存储,一般情况下比关系型数据库快. +// 2、支持动态存储(内存)以及静态存储(磁盘). +// 3、分布式存储,可集成为多节点集群. +// 4、存储方式,采用类似目录结构. +// 1)只有叶子节点才能真正存储数据,相当于文件. +// 2)叶子节点的父节点一定是目录,目录不能存储数据. +type Store interface { + Version() int + Index() uint64 + Get(nodePath string, recursive, sorted bool) (*Event, error) + Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) + Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) + Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) + CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts TTLOptionSet) (*Event, error) + Delete(nodePath string, dir, recursive bool) (*Event, error) + CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) + Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error) + Save() ([]byte, error) + Recovery(state []byte) error + Clone() Store + SaveNoCopy() ([]byte, error) + JsonStats() []byte + DeleteExpiredKeys(cutoff time.Time) + HasTTLKeys() bool +} + +type TTLOptionSet struct { + ExpireTime time.Time // key的有效期 + Refresh bool +} + +type store struct { + Root *node // 根节点 + WatcherHub *watcherHub // 关于node的所有key的watcher + CurrentIndex uint64 // 对应存储内容的index + Stats *Stats + CurrentVersion int // 最新数据的版本 + ttlKeyHeap *ttlKeyHeap // 用于数据恢复的(需手动操作) 过期时间的最小堆 + worldLock sync.RWMutex // 停止当前存储的world锁 + clock clockwork.Clock // + readonlySet types.Set // 只读操作 +} + +// New 创建一个存储空间,给定的命名空间将被创建为初始目录. +func New(namespaces ...string) Store { + s := newStore(namespaces...) + s.clock = clockwork.NewRealClock() + return s +} + +// OK /0 /1 +func newStore(namespaces ...string) *store { + s := new(store) + s.CurrentVersion = defaultVersion // 2 + s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent) // 0 永久性 //创建其在etcd中对应的目录,第一个目录是以(/) + for _, namespace := range namespaces { + s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent)) + } + s.Stats = newStats() + s.WatcherHub = newWatchHub(1000) + s.ttlKeyHeap = newTtlKeyHeap() + s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...) + return s +} + +// Set creates or replace the node at nodePath. +func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { + var err *v2error.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(SetSuccess) + return + } + + s.Stats.Inc(SetFail) + }() + + // Get prevNode value + n, getErr := s.internalGet(nodePath) + if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound { + err = getErr + return nil, err + } + + if expireOpts.Refresh { + if getErr != nil { + err = getErr + return nil, err + } + value = n.Value + } + + // Set new value + e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set) + if err != nil { + return nil, err + } + e.EtcdIndex = s.CurrentIndex + + // Put prevNode into event + if getErr == nil { + prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) + prev.NodeExtern.loadInternalNode(n, false, false, s.clock) + e.PrevNode = prev.NodeExtern + } + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + return e, nil +} + +// returns user-readable cause of failed comparison +func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string { + switch which { + case CompareIndexNotMatch: + return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex) + case CompareValueNotMatch: + return fmt.Sprintf("[%v != %v]", prevValue, n.Value) + default: + return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex) + } +} + +func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, + value string, expireOpts TTLOptionSet) (*Event, error, +) { + var err *v2error.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CompareAndSwapSuccess) + return + } + + s.Stats.Inc(CompareAndSwapFail) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) + } + + n, err := s.internalGet(nodePath) + if err != nil { + return nil, err + } + if n.IsDir() { // can only compare and swap file + err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) + return nil, err + } + + // If both of the prevValue and prevIndex are given, we will test both of them. + // Command will be executed, only if both of the tests are successful. + if ok, which := n.Compare(prevValue, prevIndex); !ok { + cause := getCompareFailCause(n, which, prevValue, prevIndex) + err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) + return nil, err + } + + if expireOpts.Refresh { + value = n.Value + } + + // update etcd index + s.CurrentIndex++ + + e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.NodeExtern + + // if test succeed, write the value + if err := n.Write(value, s.CurrentIndex); err != nil { + return nil, err + } + n.UpdateTTL(expireOpts.ExpireTime) + + // copy the value for safety + valueCopy := value + eNode.Value = &valueCopy + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + return e, nil +} + +// Delete 删除节点,并删除该节点包含的 /0/members/8e9e05c52164694d true,true +func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { + var err *v2error.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(DeleteSuccess) + return + } + + s.Stats.Inc(DeleteFail) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + if s.readonlySet.Contains(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) + } + + // 递归意味着dir + if recursive { + dir = true + } + + n, err := s.internalGet(nodePath) + if err != nil { // 如果该节点不存在,则返回错误 + return nil, err + } + + nextIndex := s.CurrentIndex + 1 + e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex) + e.EtcdIndex = nextIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.NodeExtern + + if n.IsDir() { + eNode.Dir = true + } + callback := func(path string) { + s.WatcherHub.notifyWatchers(e, path, true) + } + + err = n.Remove(dir, recursive, callback) + if err != nil { + return nil, err + } + s.CurrentIndex++ + s.WatcherHub.notify(e) // 通知上层 + + return e, nil +} + +func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) { + var err *v2error.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CompareAndDeleteSuccess) + return + } + + s.Stats.Inc(CompareAndDeleteFail) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + + n, err := s.internalGet(nodePath) + if err != nil { // if the node does not exist, return error + return nil, err + } + if n.IsDir() { // can only compare and delete file + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) + } + + // If both of the prevValue and prevIndex are given, we will test both of them. + // Command will be executed, only if both of the tests are successful. + if ok, which := n.Compare(prevValue, prevIndex); !ok { + cause := getCompareFailCause(n, which, prevValue, prevIndex) + return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) + } + + // update etcd index + s.CurrentIndex++ + + e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = n.Repr(false, false, s.clock) + + callback := func(path string) { // notify function + // notify the watchers with deleted set true + s.WatcherHub.notifyWatchers(e, path, true) + } + + err = n.Remove(false, false, callback) + if err != nil { + return nil, err + } + + s.WatcherHub.notify(e) + + return e, nil +} + +func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + + key = path.Clean(path.Join("/", key)) + if sinceIndex == 0 { + sinceIndex = s.CurrentIndex + 1 + } + // WatcherHub does not know about the current index, so we need to pass it in + w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex) + if err != nil { + return nil, err + } + + return w, nil +} + +// walk 遍历所有nodePath并在每个目录上应用walkFunc +func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) { + components := strings.Split(nodePath, "/") + + curr := s.Root + var err *v2error.Error + + for i := 1; i < len(components); i++ { + if len(components[i]) == 0 { // 忽略空字符 + return curr, nil + } + + curr, err = walkFunc(curr, components[i]) + if err != nil { + return nil, err + } + } + + return curr, nil +} + +// Update updates the value/ttl of the node. +// If the node is a file, the value and the ttl can be updated. +// If the node is a directory, only the ttl can be updated. +func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) { + var err *v2error.Error + + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(UpdateSuccess) + return + } + + s.Stats.Inc(UpdateFail) + }() + + nodePath = path.Clean(path.Join("/", nodePath)) + // we do not allow the user to change "/" + if s.readonlySet.Contains(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) + } + + currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 + + n, err := s.internalGet(nodePath) + if err != nil { // if the node does not exist, return error + return nil, err + } + if n.IsDir() && len(newValue) != 0 { + // if the node is a directory, we cannot update value to non-empty + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) + } + + if expireOpts.Refresh { + newValue = n.Value + } + + e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex) + e.EtcdIndex = nextIndex + e.PrevNode = n.Repr(false, false, s.clock) + eNode := e.NodeExtern + + if err := n.Write(newValue, nextIndex); err != nil { + return nil, fmt.Errorf("nodePath %v : %v", nodePath, err) + } + + if n.IsDir() { + eNode.Dir = true + } else { + // copy the value for safety + newValueCopy := newValue + eNode.Value = &newValueCopy + } + + // update ttl + n.UpdateTTL(expireOpts.ExpireTime) + + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL + + if !expireOpts.Refresh { + s.WatcherHub.notify(e) + } else { + e.SetRefresh() + s.WatcherHub.add(e) + } + + s.CurrentIndex = nextIndex + + return e, nil +} + +// DeleteExpiredKeys will delete all expired keys +func (s *store) DeleteExpiredKeys(cutoff time.Time) { + s.worldLock.Lock() + defer s.worldLock.Unlock() + + for { + node := s.ttlKeyHeap.top() + if node == nil || node.ExpireTime.After(cutoff) { + break + } + + s.CurrentIndex++ + e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) + e.EtcdIndex = s.CurrentIndex + e.PrevNode = node.Repr(false, false, s.clock) + if node.IsDir() { + e.NodeExtern.Dir = true + } + + callback := func(path string) { // notify function + // notify the watchers with deleted set true + s.WatcherHub.notifyWatchers(e, path, true) + } + + s.ttlKeyHeap.pop() + node.Remove(true, true, callback) + + s.Stats.Inc(ExpireCount) + + s.WatcherHub.notify(e) + } +} + +// Save saves the static state of the store system. +// It will not be able to save the state of watchers. +// It will not save the parent field of the node. Or there will +// be cyclic dependencies issue for the json package. +func (s *store) Save() ([]byte, error) { + b, err := json.Marshal(s.Clone()) + if err != nil { + return nil, err + } + + return b, nil +} + +func (s *store) SaveNoCopy() ([]byte, error) { + b, err := json.Marshal(s) + if err != nil { + return nil, err + } + + return b, nil +} + +func (s *store) Clone() Store { + s.worldLock.RLock() + + clonedStore := newStore() + clonedStore.CurrentIndex = s.CurrentIndex + clonedStore.Root = s.Root.Clone() + clonedStore.WatcherHub = s.WatcherHub.clone() + clonedStore.Stats = s.Stats.clone() + clonedStore.CurrentVersion = s.CurrentVersion + + s.worldLock.RUnlock() + return clonedStore +} + +// Recovery recovers the store system from a static state +// It needs to recover the parent field of the nodes. +// It needs to delete the expired nodes since the saved time and also +// needs to create monitoring goroutines. +func (s *store) Recovery(state []byte) error { + s.worldLock.Lock() + defer s.worldLock.Unlock() + err := json.Unmarshal(state, s) + if err != nil { + return err + } + + s.ttlKeyHeap = newTtlKeyHeap() + + s.Root.recoverAndclean() + return nil +} + +func (s *store) JsonStats() []byte { + s.Stats.Watchers = uint64(s.WatcherHub.count) + return s.Stats.toJson() +} + +func (s *store) HasTTLKeys() bool { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + return s.ttlKeyHeap.Len() != 0 +} + +// ------------------------------------------ OVER -------------------------------------------------------- + +// checkDir 检查目录存不存在,不存在创建 +func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) { + node, ok := parent.Children[dirName] + if ok { + if node.IsDir() { + return node, nil + } + return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex) + } + n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent) + parent.Children[dirName] = n + return n, nil +} + +// /0/members/8e9e05c52164694d/raftAttributes创建节点 +func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, expireTime time.Time, action string) (*Event, *v2error.Error) { + currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 + + if unique { // 在节点路径下附加唯一的项目 + nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10)) + } + + nodePath = path.Clean(path.Join("/", nodePath)) + + // 我们不允许用户修改"/". + if s.readonlySet.Contains(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex) + } + + if expireTime.Before(minExpireTime) { + expireTime = Permanent + } + + dirName, nodeName := path.Split(nodePath) + d, err := s.walk(dirName, s.checkDir) // 检查节点目录,以及创建 + if err != nil { + s.Stats.Inc(SetFail) + err.Index = currIndex + return nil, err + } + // create, /0/members/8e9e05c52164694d/raftAttributes, 1, 1 + e := newEvent(action, nodePath, nextIndex, nextIndex) + eNode := e.NodeExtern + + n, _ := d.GetChild(nodeName) + + if n != nil { + if replace { + if n.IsDir() { + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) + } + e.PrevNode = n.Repr(false, false, s.clock) + + if err := n.Remove(false, false, nil); err != nil { + return nil, err + } + } else { + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex) + } + } + + if !dir { + valueCopy := value + eNode.Value = &valueCopy + // 生成新的树节点node,作为叶子节点 + n = newKV(s, nodePath, value, nextIndex, d, expireTime) + } else { + eNode.Dir = true + n = newDir(s, nodePath, nextIndex, d, expireTime) + } + + if err := d.Add(n); err != nil { // 添加父节点中,即挂到map中 + return nil, err + } + + if !n.IsPermanent() { // 存在有效期 + s.ttlKeyHeap.push(n) + eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL + } + + s.CurrentIndex = nextIndex + + return e, nil +} + +// Version 检索存储的当前版本. <= CurrentIndex +func (s *store) Version() int { + return s.CurrentVersion +} + +// Index 检索存储的当前索引. +func (s *store) Index() uint64 { + s.worldLock.RLock() + defer s.worldLock.RUnlock() + return s.CurrentIndex +} + +// Get 返回一个get事件.如果递归为真,它将返回节点路径下的所有内容.如果sorted为真,它将按键对内容进行排序. +func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { + // /0/members + var err *v2error.Error + + s.worldLock.RLock() + defer s.worldLock.RUnlock() + + defer func() { + if err == nil { + s.Stats.Inc(GetSuccess) + return + } + + s.Stats.Inc(GetFail) + }() + + n, err := s.internalGet(nodePath) // 没有 /0/members 这个node + if err != nil { + return nil, err + } + + e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) + e.EtcdIndex = s.CurrentIndex // 给事件分配索引 + e.NodeExtern.loadInternalNode(n, recursive, sorted, s.clock) // 加载node,主要是获取node中数据 + + return e, nil +} + +// Create 在nodePath创建节点.创建将有助于创建没有ttl的中间目录.如果该节点已经存在,创建将失败. 如果路径上的任何节点是一个文件,创建将失败. +func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) { + var err *v2error.Error + s.worldLock.Lock() + defer s.worldLock.Unlock() + + defer func() { + if err == nil { + s.Stats.Inc(CreateSuccess) + return + } + + s.Stats.Inc(CreateFail) + }() + + // 创建一个内存节点, 有ttl放入ttlKeyHeap, 返回一个创建事件 + e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create) + if err != nil { + return nil, err + } + + e.EtcdIndex = s.CurrentIndex + s.WatcherHub.notify(e) // ✅ + + return e, nil +} + +// InternalGet 获取给定nodePath的节点. +func (s *store) internalGet(nodePath string) (*node, *v2error.Error) { + nodePath = path.Clean(path.Join("/", nodePath)) // /0/members + + walkFunc := func(parent *node, name string) (*node, *v2error.Error) { + if !parent.IsDir() { + err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex) + return nil, err + } + + child, ok := parent.Children[name] + if ok { + return child, nil + } + + return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) + } + + n, err := s.walk(nodePath, walkFunc) + if err != nil { + return nil, err + } + return n, nil +} diff --git a/server/etcdserver/api/v2store/ttl_key_heap.go b/etcd/etcdserver/api/v2store/ttl_key_heap.go similarity index 96% rename from server/etcdserver/api/v2store/ttl_key_heap.go rename to etcd/etcdserver/api/v2store/ttl_key_heap.go index 477d2b9f3aa..f5fb5013a52 100644 --- a/server/etcdserver/api/v2store/ttl_key_heap.go +++ b/etcd/etcdserver/api/v2store/ttl_key_heap.go @@ -16,7 +16,7 @@ package v2store import "container/heap" -// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time +// TTLKeyHeap 过期时间的最小堆 type ttlKeyHeap struct { array []*node keyMap map[*node]int @@ -77,6 +77,7 @@ func (h *ttlKeyHeap) pop() *node { return n } +// 存入一个有过期时间的node func (h *ttlKeyHeap) push(x interface{}) { heap.Push(h, x) } diff --git a/etcd/etcdserver/api/v2store/watcher_hub.go b/etcd/etcdserver/api/v2store/watcher_hub.go new file mode 100644 index 00000000000..21aac559852 --- /dev/null +++ b/etcd/etcdserver/api/v2store/watcher_hub.go @@ -0,0 +1,171 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2store + +import ( + "container/list" + "path" + "strings" + "sync" + "sync/atomic" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" +) + +// A watcherHub 一个watcherHub包含所有订阅的watcher,watcher是一个以watched路径为key,以watcher为值的map, +// EventHistory为watcherHub保存旧的事件. +// 它被用来帮助watcher获得一个连续的事件历史.观察者可能会错过在第一个观察命令结束和第二个命令开始之间发生的事件. +type watcherHub struct { + count int64 // 当前的监听者数量 + mutex sync.Mutex // + watchers map[string]*list.List // 所有在xxx目录下的监听者 + EventHistory *EventHistory // 历史事件 +} + +// newWatchHub 创建一个watcherHub.容量决定了我们将在eventHistory中保留多少个事件. +func newWatchHub(capacity int) *watcherHub { + return &watcherHub{ + watchers: make(map[string]*list.List), + EventHistory: newEventHistory(capacity), + } +} + +// Watch 返回一个Watcher. +func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) { + event, err := wh.EventHistory.scan(key, recursive, index) + if err != nil { + err.Index = storeIndex + return nil, err + } + + w := &watcher{ + eventChan: make(chan *Event, 100), // use a buffered channel + recursive: recursive, + stream: stream, + sinceIndex: index, + startIndex: storeIndex, + hub: wh, + } + + wh.mutex.Lock() + defer wh.mutex.Unlock() + // If the event exists in the known history, append the EtcdIndex and return immediately + if event != nil { + ne := event.Clone() + ne.EtcdIndex = storeIndex + w.eventChan <- ne + return w, nil + } + + l, ok := wh.watchers[key] + + var elem *list.Element + + if ok { // add the new watcher to the back of the list + elem = l.PushBack(w) + } else { // create a new list and add the new watcher + l = list.New() + elem = l.PushBack(w) + wh.watchers[key] = l + } + + w.remove = func() { + if w.removed { // avoid removing it twice + return + } + w.removed = true + l.Remove(elem) + atomic.AddInt64(&wh.count, -1) + if l.Len() == 0 { + delete(wh.watchers, key) + } + } + + atomic.AddInt64(&wh.count, 1) + return w, nil +} + +func (wh *watcherHub) add(e *Event) { + wh.EventHistory.addEvent(e) +} + +// notify 接收一个事件,通知watcher +func (wh *watcherHub) notify(e *Event) { + e = wh.EventHistory.addEvent(e) + segments := strings.Split(e.NodeExtern.Key, "/") // /0/members/8e9e05c52164694d/raftAttributes + currPath := "/" + // if the path is "/foo/bar", --> "/","/foo", "/foo/bar" + for _, segment := range segments { + currPath = path.Join(currPath, segment) + // 通知对当前路径变化 感兴趣的观察者 + // e.NodeExtern.Key /0/members/8e9e05c52164694d/raftAttributes + // nodePath : / + // nodePath : /0 + // nodePath : /0/members + // nodePath : /0/members/8e9e05c52164694d + // nodePath : /0/members/8e9e05c52164694d/raftAttributes + wh.notifyWatchers(e, currPath, false) + } +} + +// ok +func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) { + wh.mutex.Lock() + defer wh.mutex.Unlock() + + l, ok := wh.watchers[nodePath] + if ok { + curr := l.Front() + // e.NodeExtern.Key /0/members/8e9e05c52164694d/raftAttributes + // nodePath : /0/members/8e9e05c52164694d/raftAttributes + for curr != nil { + next := curr.Next() + w, _ := curr.Value.(*watcher) + originalPath := e.NodeExtern.Key == nodePath + // 是不是起源,或者该目录不是隐藏节点 + if (originalPath || !isHidden(nodePath, e.NodeExtern.Key)) && w.notify(e, originalPath, deleted) { + if !w.stream { + // 如果不是流观察者---> 删除,-1 + w.removed = true + l.Remove(curr) + atomic.AddInt64(&wh.count, -1) + } + } + curr = next + } + + if l.Len() == 0 { + // 通知之后,就删除 + delete(wh.watchers, nodePath) + } + } +} + +func (wh *watcherHub) clone() *watcherHub { + clonedHistory := wh.EventHistory.clone() + + return &watcherHub{ + EventHistory: clonedHistory, + } +} + +// isHidden 检查关键路径是否被认为是隐藏的观察路径,即最后一个元素是隐藏的,或者它在一个隐藏的目录中. +func isHidden(watchPath, keyPath string) bool { + if len(watchPath) > len(keyPath) { + return false + } + afterPath := path.Clean("/" + keyPath[len(watchPath):]) // 去后边的路径 + return strings.Contains(afterPath, "/_") +} diff --git a/etcd/etcdserver/api/v2v3/cluster.go b/etcd/etcdserver/api/v2v3/cluster.go new file mode 100644 index 00000000000..acb3d0fedf0 --- /dev/null +++ b/etcd/etcdserver/api/v2v3/cluster.go @@ -0,0 +1,31 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2v3 + +import ( + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + + "github.com/coreos/go-semver/semver" +) + +func (s *v2v3Server) ID() types.ID { + // TODO: use an actual member ID + return types.ID(0xe7cd2f00d) +} +func (s *v2v3Server) ClientURLs() []string { panic("STUB") } +func (s *v2v3Server) Members() []*membership.Member { panic("STUB") } +func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") } +func (s *v2v3Server) Version() *semver.Version { panic("STUB") } diff --git a/etcd/etcdserver/api/v2v3/doc.go b/etcd/etcdserver/api/v2v3/doc.go new file mode 100644 index 00000000000..2ff372f1876 --- /dev/null +++ b/etcd/etcdserver/api/v2v3/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client. +package v2v3 diff --git a/etcd/etcdserver/api/v2v3/server.go b/etcd/etcdserver/api/v2v3/server.go new file mode 100644 index 00000000000..48a2ad8a9be --- /dev/null +++ b/etcd/etcdserver/api/v2v3/server.go @@ -0,0 +1,131 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2v3 + +import ( + "context" + "net/http" + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +type fakeStats struct{} + +func (s *fakeStats) SelfStats() []byte { return nil } +func (s *fakeStats) LeaderStats() []byte { return nil } +func (s *fakeStats) StoreStats() []byte { return nil } + +type v2v3Server struct { + lg *zap.Logger + c *clientv3.Client + store *v2v3Store + fakeStats +} + +func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer { + return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)} +} + +func (s *v2v3Server) ClientCertAuthEnabled() bool { return false } + +func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") } +func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") } + +func (s *v2v3Server) Leader() types.ID { + ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) + defer cancel() + resp, err := s.c.Status(ctx, s.c.Endpoints()[0]) + if err != nil { + return 0 + } + return types.ID(resp.Leader) +} + +func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + // adding member as learner is not supported by V2 Server. + resp, err := s.c.MemberAdd(ctx, memb.PeerURLs) + if err != nil { + return nil, err + } + return v3MembersToMembership(resp.Members), nil +} + +func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + resp, err := s.c.MemberRemove(ctx, id) + if err != nil { + return nil, err + } + return v3MembersToMembership(resp.Members), nil +} + +func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + resp, err := s.c.MemberPromote(ctx, id) + if err != nil { + return nil, err + } + return v3MembersToMembership(resp.Members), nil +} + +func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) { + resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs) + if err != nil { + return nil, err + } + return v3MembersToMembership(resp.Members), nil +} + +func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member { + membs := make([]*membership.Member, len(v3membs)) + for i, m := range v3membs { + membs[i] = &membership.Member{ + ID: types.ID(m.ID), + RaftAttributes: membership.RaftAttributes{ + PeerURLs: m.PeerURLs, + IsLearner: m.IsLearner, + }, + Attributes: membership.Attributes{ + Name: m.Name, + ClientURLs: m.ClientURLs, + }, + } + } + return membs +} + +func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() } +func (s *v2v3Server) Cluster() api.Cluster { return s } +func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil } +func (s *v2v3Server) LeaderChangedNotify() <-chan struct{} { return nil } + +func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) { + applier := etcdserver.NewApplierV2(s.lg, s.store, nil) + reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier) + req := (*etcdserver.RequestV2)(&r) + resp, err := req.Handle(ctx, reqHandler) + if resp.Err != nil { + return resp, resp.Err + } + return resp, err +} diff --git a/etcd/etcdserver/api/v2v3/store.go b/etcd/etcdserver/api/v2v3/store.go new file mode 100644 index 00000000000..bff3162a532 --- /dev/null +++ b/etcd/etcdserver/api/v2v3/store.go @@ -0,0 +1,621 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2v3 + +import ( + "context" + "fmt" + "path" + "sort" + "strings" + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// store 使用v3客户端实现V2的存储接口. +type v2v3Store struct { + c *clientv3.Client + // pfx 是应该存储钥匙的v3前缀. + pfx string + ctx context.Context +} + +const maxPathDepth = 63 + +var errUnsupported = fmt.Errorf("TTLs are unsupported") + +func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) } + +func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} } + +func (s *v2v3Store) Index() uint64 { panic("STUB") } + +func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) { + key := s.mkPath(nodePath) + resp, err := s.c.Txn(s.ctx).Then( + clientv3.OpGet(key+"/"), + clientv3.OpGet(key), + ).Commit() + if err != nil { + return nil, err + } + + if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) { + nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision) + if err != nil { + return nil, err + } + cidx, midx := uint64(0), uint64(0) + if len(kvs) > 0 { + cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision) + } + return &v2store.Event{ + Action: v2store.Get, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + Dir: true, + ExternNodes: nodes, + CreatedIndex: cidx, + ModifiedIndex: midx, + }, + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil + } + + kvs := resp.Responses[1].GetResponseRange().Kvs + if len(kvs) == 0 { + return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + } + + return &v2store.Event{ + Action: v2store.Get, + NodeExtern: s.mkV2Node(kvs[0]), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) { + rootNodes, err := s.getDirDepth(nodePath, 1, rev) + if err != nil || !recursive { + if sorted { + sort.Sort(v2store.NodeExterns(rootNodes)) + } + return rootNodes, err + } + nextNodes := rootNodes + nodes := make(map[string]*v2store.NodeExtern) + // Breadth walk the subdirectories + for i := 2; len(nextNodes) > 0; i++ { + for _, n := range nextNodes { + nodes[n.Key] = n + if parent := nodes[path.Dir(n.Key)]; parent != nil { + parent.ExternNodes = append(parent.ExternNodes, n) + } + } + if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil { + return nil, err + } + } + + if sorted { + sort.Sort(v2store.NodeExterns(rootNodes)) + } + return rootNodes, nil +} + +func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) { + pd := s.mkPathDepth(nodePath, depth) + resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev)) + if err != nil { + return nil, err + } + + nodes := make([]*v2store.NodeExtern, len(resp.Kvs)) + for i, kv := range resp.Kvs { + nodes[i] = s.mkV2Node(kv) + } + return nodes, nil +} + +func (s *v2v3Store) Set(nodePath string, dir bool, value string, expireOpts v2store.TTLOptionSet, +) (*v2store.Event, error) { + if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { + return nil, errUnsupported + } + + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + + ecode := 0 + applyf := func(stm concurrency.STM) error { + // build path if any directories in path do not exist + dirs := []string{} + for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { + pp := s.mkPath(p) + if stm.Rev(pp) > 0 { + ecode = v2error.EcodeNotDir + return nil + } + if stm.Rev(pp+"/") == 0 { + dirs = append(dirs, pp+"/") + } + } + for _, d := range dirs { + stm.Put(d, "") + } + + key := s.mkPath(nodePath) + if dir { + if stm.Rev(key) != 0 { + // exists as non-dir + ecode = v2error.EcodeNotDir + return nil + } + key = key + "/" + } else if stm.Rev(key+"/") != 0 { + ecode = v2error.EcodeNotFile + return nil + } + stm.Put(key, value, clientv3.WithPrevKV()) + stm.Put(s.mkActionKey(), v2store.Set) + return nil + } + + resp, err := s.newSTM(applyf) + if err != nil { + return nil, err + } + if ecode != 0 { + return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) + } + + createRev := resp.Header.Revision + var pn *v2store.NodeExtern + if pkv := prevKeyFromPuts(resp); pkv != nil { + pn = s.mkV2Node(pkv) + createRev = pkv.CreateRevision + } + + vp := &value + if dir { + vp = nil + } + return &v2store.Event{ + Action: v2store.Set, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + Value: vp, + Dir: dir, + ModifiedIndex: mkV2Rev(resp.Header.Revision), + CreatedIndex: mkV2Rev(createRev), + }, + PrevNode: pn, + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +// Update 更新节点属性, 例如将islearner变成false +func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + + if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { + return nil, errUnsupported + } + + key := s.mkPath(nodePath) + ecode := 0 + applyf := func(stm concurrency.STM) error { + if rev := stm.Rev(key + "/"); rev != 0 { + ecode = v2error.EcodeNotFile + return nil + } + if rev := stm.Rev(key); rev == 0 { + ecode = v2error.EcodeKeyNotFound + return nil + } + stm.Put(key, newValue, clientv3.WithPrevKV()) + stm.Put(s.mkActionKey(), v2store.Update) + return nil + } + + resp, err := s.newSTM(applyf) + if err != nil { + return nil, err + } + if ecode != 0 { + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + } + + pkv := prevKeyFromPuts(resp) + return &v2store.Event{ + Action: v2store.Update, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + Value: &newValue, + ModifiedIndex: mkV2Rev(resp.Header.Revision), + CreatedIndex: mkV2Rev(pkv.CreateRevision), + }, + PrevNode: s.mkV2Node(pkv), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) Create(nodePath string, dir bool, value string, unique bool, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { + return nil, errUnsupported + } + ecode := 0 + applyf := func(stm concurrency.STM) error { + ecode = 0 + key := s.mkPath(nodePath) + if unique { + // append unique item under the node path + for { + key = nodePath + "/" + fmt.Sprintf("%020s", time.Now()) + key = path.Clean(path.Join("/", key)) + key = s.mkPath(key) + if stm.Rev(key) == 0 { + break + } + } + } + if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 { + ecode = v2error.EcodeNodeExist + return nil + } + // build path if any directories in path do not exist + dirs := []string{} + for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { + pp := s.mkPath(p) + if stm.Rev(pp) > 0 { + ecode = v2error.EcodeNotDir + return nil + } + if stm.Rev(pp+"/") == 0 { + dirs = append(dirs, pp+"/") + } + } + for _, d := range dirs { + stm.Put(d, "") + } + + if dir { + // directories marked with extra slash in key name + key += "/" + } + stm.Put(key, value) + stm.Put(s.mkActionKey(), v2store.Create) + return nil + } + + resp, err := s.newSTM(applyf) + if err != nil { + return nil, err + } + if ecode != 0 { + return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) + } + + var v *string + if !dir { + v = &value + } + + return &v2store.Event{ + Action: v2store.Create, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + Value: v, + Dir: dir, + ModifiedIndex: mkV2Rev(resp.Header.Revision), + CreatedIndex: mkV2Rev(resp.Header.Revision), + }, + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { + return nil, errUnsupported + } + + key := s.mkPath(nodePath) + resp, err := s.c.Txn(s.ctx).If( + s.mkCompare(nodePath, prevValue, prevIndex)..., + ).Then( + clientv3.OpPut(key, value, clientv3.WithPrevKV()), + clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap), + ).Else( + clientv3.OpGet(key), + clientv3.OpGet(key+"/"), + ).Commit() + if err != nil { + return nil, err + } + if !resp.Succeeded { + return nil, compareFail(nodePath, prevValue, prevIndex, resp) + } + + pkv := resp.Responses[0].GetResponsePut().PrevKv + return &v2store.Event{ + Action: v2store.CompareAndSwap, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + Value: &value, + CreatedIndex: mkV2Rev(pkv.CreateRevision), + ModifiedIndex: mkV2Rev(resp.Header.Revision), + }, + PrevNode: s.mkV2Node(pkv), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) { + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + if !dir && !recursive { + return s.deleteNode(nodePath) + } + if !recursive { + return s.deleteEmptyDir(nodePath) + } + + dels := make([]clientv3.Op, maxPathDepth+1) + dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()) + for i := 1; i < maxPathDepth; i++ { + dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix()) + } + dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete) + + resp, err := s.c.Txn(s.ctx).If( + clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0), + clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0), + ).Then( + dels..., + ).Commit() + if err != nil { + return nil, err + } + if !resp.Succeeded { + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) + } + dresp := resp.Responses[0].GetResponseDeleteRange() + return &v2store.Event{ + Action: v2store.Delete, + PrevNode: s.mkV2Node(dresp.PrevKvs[0]), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) { + resp, err := s.c.Txn(s.ctx).If( + clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(), + ).Then( + clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()), + clientv3.OpPut(s.mkActionKey(), v2store.Delete), + ).Commit() + if err != nil { + return nil, err + } + if !resp.Succeeded { + return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision)) + } + dresp := resp.Responses[0].GetResponseDeleteRange() + if len(dresp.PrevKvs) == 0 { + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) + } + return &v2store.Event{ + Action: v2store.Delete, + PrevNode: s.mkV2Node(dresp.PrevKvs[0]), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) { + resp, err := s.c.Txn(s.ctx).If( + clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0), + ).Then( + clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()), + clientv3.OpPut(s.mkActionKey(), v2store.Delete), + ).Commit() + if err != nil { + return nil, err + } + if !resp.Succeeded { + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + } + pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs + if len(pkvs) == 0 { + return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + } + pkv := pkvs[0] + return &v2store.Event{ + Action: v2store.Delete, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + CreatedIndex: mkV2Rev(pkv.CreateRevision), + ModifiedIndex: mkV2Rev(resp.Header.Revision), + }, + PrevNode: s.mkV2Node(pkv), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) { + if isRoot(nodePath) { + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) + } + + key := s.mkPath(nodePath) + resp, err := s.c.Txn(s.ctx).If( + s.mkCompare(nodePath, prevValue, prevIndex)..., + ).Then( + clientv3.OpDelete(key, clientv3.WithPrevKV()), + clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete), + ).Else( + clientv3.OpGet(key), + clientv3.OpGet(key+"/"), + ).Commit() + if err != nil { + return nil, err + } + if !resp.Succeeded { + return nil, compareFail(nodePath, prevValue, prevIndex, resp) + } + + // len(pkvs) > 1 since txn only succeeds when key exists + pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0] + return &v2store.Event{ + Action: v2store.CompareAndDelete, + NodeExtern: &v2store.NodeExtern{ + Key: nodePath, + CreatedIndex: mkV2Rev(pkv.CreateRevision), + ModifiedIndex: mkV2Rev(resp.Header.Revision), + }, + PrevNode: s.mkV2Node(pkv), + EtcdIndex: mkV2Rev(resp.Header.Revision), + }, nil +} + +func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error { + if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 { + return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + } + kvs := resp.Responses[0].GetResponseRange().Kvs + if len(kvs) == 0 { + return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + } + kv := kvs[0] + indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex) + valueMatch := prevValue == "" || string(kv.Value) == prevValue + var cause string + switch { + case indexMatch && !valueMatch: + cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value)) + case valueMatch && !indexMatch: + cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision) + default: + cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision) + } + return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision)) +} + +func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp { + key := s.mkPath(nodePath) + cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)} + if prevIndex != 0 { + cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex))) + } + if prevValue != "" { + cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue)) + } + return cmps +} + +func (s *v2v3Store) JsonStats() []byte { panic("STUB") } +func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") } + +func (s *v2v3Store) Version() int { return 2 } + +// TODO: move this out of the Store interface? + +func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") } +func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") } +func (s *v2v3Store) Clone() v2store.Store { panic("STUB") } +func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") } +func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") } + +func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) } + +func (s *v2v3Store) mkNodePath(p string) string { + return path.Clean(p[len(s.pfx)+len("/k/000/"):]) +} + +// mkPathDepth makes a path to a key that encodes its directory depth +// for fast directory listing. If a depth is provided, it is added +// to the computed depth. +func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string { + normalForm := path.Clean(path.Join("/", nodePath)) + n := strings.Count(normalForm, "/") + depth + return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm) +} + +func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" } + +func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" } + +func mkV2Rev(v3Rev int64) uint64 { + if v3Rev == 0 { + return 0 + } + return uint64(v3Rev - 1) +} + +func mkV3Rev(v2Rev uint64) int64 { + if v2Rev == 0 { + return 0 + } + return int64(v2Rev + 1) +} + +// mkV2Node creates a V2 NodeExtern from a V3 KeyValue +func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern { + if kv == nil { + return nil + } + n := &v2store.NodeExtern{ + Key: s.mkNodePath(string(kv.Key)), + Dir: kv.Key[len(kv.Key)-1] == '/', + CreatedIndex: mkV2Rev(kv.CreateRevision), + ModifiedIndex: mkV2Rev(kv.ModRevision), + } + if !n.Dir { + v := string(kv.Value) + n.Value = &v + } + return n +} + +// prevKeyFromPuts gets the prev key that is being put; ignores +// the put action response. +func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue { + for _, r := range resp.Responses { + pkv := r.GetResponsePut().PrevKv + if pkv != nil && pkv.CreateRevision > 0 { + return pkv + } + } + return nil +} + +func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) { + return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable)) +} diff --git a/etcd/etcdserver/api/v2v3/watcher.go b/etcd/etcdserver/api/v2v3/watcher.go new file mode 100644 index 00000000000..f4f25890f06 --- /dev/null +++ b/etcd/etcdserver/api/v2v3/watcher.go @@ -0,0 +1,143 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v2v3 + +import ( + "context" + "strings" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" +) + +func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) { + ctx, cancel := context.WithCancel(s.ctx) + wch := s.c.Watch( + ctx, + // TODO: very pricey; use a single store-wide watch in future + s.pfx, + clientv3.WithPrefix(), + clientv3.WithRev(int64(sinceIndex)), + clientv3.WithCreatedNotify(), + clientv3.WithPrevKV()) + resp, ok := <-wch + if err := resp.Err(); err != nil || !ok { + cancel() + return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0) + } + + evc, donec := make(chan *v2store.Event), make(chan struct{}) + go func() { + defer func() { + close(evc) + close(donec) + }() + for resp := range wch { + for _, ev := range s.mkV2Events(resp) { + k := ev.NodeExtern.Key + if recursive { + if !strings.HasPrefix(k, prefix) { + continue + } + // accept events on hidden keys given in prefix + k = strings.Replace(k, prefix, "/", 1) + // ignore hidden keys deeper than prefix + if strings.Contains(k, "/_") { + continue + } + } + if !recursive && k != prefix { + continue + } + select { + case evc <- ev: + case <-ctx.Done(): + return + } + if !stream { + return + } + } + } + }() + + return &v2v3Watcher{ + startRev: resp.Header.Revision, + evc: evc, + donec: donec, + cancel: cancel, + }, nil +} + +func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) { + ak := s.mkActionKey() + for _, rev := range mkRevs(wr) { + var act, key *clientv3.Event + for _, ev := range rev { + if string(ev.Kv.Key) == ak { + act = ev + } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) { + // use longest key to ignore intermediate new + // directories from Create. + key = ev + } else if key == nil { + key = ev + } + } + if act != nil && act.Kv != nil && key != nil { + v2ev := &v2store.Event{ + Action: string(act.Kv.Value), + NodeExtern: s.mkV2Node(key.Kv), + PrevNode: s.mkV2Node(key.PrevKv), + EtcdIndex: mkV2Rev(wr.Header.Revision), + } + evs = append(evs, v2ev) + } + } + return evs +} + +func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) { + var curRev []*clientv3.Event + for _, ev := range wr.Events { + if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision { + revs = append(revs, curRev) + curRev = nil + } + curRev = append(curRev, ev) + } + if curRev != nil { + revs = append(revs, curRev) + } + return revs +} + +type v2v3Watcher struct { + startRev int64 + evc chan *v2store.Event + donec chan struct{} + cancel context.CancelFunc +} + +func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) } + +func (w *v2v3Watcher) Remove() { + w.cancel() + <-w.donec +} + +func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc } diff --git a/etcd/etcdserver/api/v3alarm/over_alarms.go b/etcd/etcdserver/api/v3alarm/over_alarms.go new file mode 100644 index 00000000000..4e24214dc76 --- /dev/null +++ b/etcd/etcdserver/api/v3alarm/over_alarms.go @@ -0,0 +1,164 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3alarm manages health status alarms in etcd. +package v3alarm + +import ( + "sync" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" +) + +type BackendGetter interface { + Backend() backend.Backend +} + +type alarmSet map[types.ID]*pb.AlarmMember + +type AlarmStore struct { + lg *zap.Logger + mu sync.Mutex + types map[pb.AlarmType]alarmSet + // { + // "AlarmType_NONE": { + // }, + // "AlarmType_NOSPACE": { + // "1": { + // "MemberID": "1", + // "AlarmType": "AlarmType_NOSPACE" + // } + // }, + // "AlarmType_CORRUPT": {} + //} + bg BackendGetter +} + +func NewAlarmStore(lg *zap.Logger, bg BackendGetter) (*AlarmStore, error) { + if lg == nil { + lg = zap.NewNop() + } + ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), bg: bg} + err := ret.restore() + return ret, err +} + +// Activate 记录、入库警报 +func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { + a.mu.Lock() + defer a.mu.Unlock() + + newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at} + if m := a.addToMap(newAlarm); m != newAlarm { + return m + } + + v, err := newAlarm.Marshal() + if err != nil { + a.lg.Panic("序列化报警成员失败", zap.Error(err)) + } + + b := a.bg.Backend() + b.BatchTx().Lock() + b.BatchTx().UnsafePut(buckets.Alarm, v, nil) + b.BatchTx().Unlock() + + return newAlarm +} + +func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { + a.mu.Lock() + defer a.mu.Unlock() + + t := a.types[at] + if t == nil { + t = make(alarmSet) + a.types[at] = t + } + m := t[id] + if m == nil { + return nil + } + + delete(t, id) + + v, err := m.Marshal() + if err != nil { + a.lg.Panic("反序列化报警成员失败", zap.Error(err)) + } + + b := a.bg.Backend() + b.BatchTx().Lock() + b.BatchTx().UnsafeDelete(buckets.Alarm, v) + b.BatchTx().Unlock() + + return m +} + +// Get 获取指定类型的警报,NONE 是所有 +func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { + a.mu.Lock() + defer a.mu.Unlock() + if at == pb.AlarmType_NONE { + for _, t := range a.types { + for _, m := range t { + ret = append(ret, m) + } + } + return ret + } + for _, m := range a.types[at] { + ret = append(ret, m) + } + return ret +} + +func (a *AlarmStore) restore() error { + b := a.bg.Backend() + tx := b.BatchTx() + + tx.Lock() + tx.UnsafeCreateBucket(buckets.Alarm) + err := tx.UnsafeForEach(buckets.Alarm, func(k, v []byte) error { + var m pb.AlarmMember + if err := m.Unmarshal(k); err != nil { + return err + } + a.addToMap(&m) + return nil + }) + tx.Unlock() + + b.ForceCommit() + return err +} + +func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember { + t := a.types[newAlarm.Alarm] + if t == nil { + t = make(alarmSet) + a.types[newAlarm.Alarm] = t + } + m := t[types.ID(newAlarm.MemberID)] + if m != nil { + return m + } + t[types.ID(newAlarm.MemberID)] = newAlarm + return newAlarm +} diff --git a/etcd/etcdserver/api/v3client/doc.go b/etcd/etcdserver/api/v3client/doc.go new file mode 100644 index 00000000000..0c532019904 --- /dev/null +++ b/etcd/etcdserver/api/v3client/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3client provides clientv3 interfaces from an etcdserver. +// +// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: +// +// import ( +// "github.com/ls-2018/etcd_cn/client_sdk/v3" +// "context" +// +// "github.com/ls-2018/etcd_cn/etcd/embed" +// "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client" +// ) +// +// ... +// +// // create an embedded EtcdServer from the default configuration +// cfg := embed.NewConfig() +// cfg.Dir = "default.etcd" +// e, err := embed.StartEtcd(cfg) +// if err != nil { +// // handle error! +// } +// +// // wrap the EtcdServer with v3client +// cli := v3client.New(e.Server) +// +// // use like an ordinary clientv3 +// resp, err := cli.Put(context.TODO(), "some-key", "it works!") +// if err != nil { +// // handle error! +// } +// +package v3client diff --git a/server/etcdserver/api/v3client/v3client.go b/etcd/etcdserver/api/v3client/v3client.go similarity index 89% rename from server/etcdserver/api/v3client/v3client.go rename to etcd/etcdserver/api/v3client/v3client.go index c44479ffad2..a67e0c6fbd5 100644 --- a/server/etcdserver/api/v3client/v3client.go +++ b/etcd/etcdserver/api/v3client/v3client.go @@ -18,15 +18,16 @@ import ( "context" "time" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc" + "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy/adapter" ) // New creates a clientv3 client that wraps an in-process EtcdServer. Instead // of making gRPC calls through sockets, the client makes direct function calls -// to the etcd server through its api/v3rpc function interfaces. +// to the etcd etcd through its api/v3rpc function interfaces. func New(s *etcdserver.EtcdServer) *clientv3.Client { c := clientv3.NewCtxClient(context.Background(), clientv3.WithZapLogger(s.Logger())) diff --git a/server/etcdserver/api/v3compactor/compactor.go b/etcd/etcdserver/api/v3compactor/compactor.go similarity index 97% rename from server/etcdserver/api/v3compactor/compactor.go rename to etcd/etcdserver/api/v3compactor/compactor.go index e352670c12b..f7f46871395 100644 --- a/server/etcdserver/api/v3compactor/compactor.go +++ b/etcd/etcdserver/api/v3compactor/compactor.go @@ -19,7 +19,7 @@ import ( "fmt" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "github.com/jonboulle/clockwork" "go.uber.org/zap" diff --git a/server/etcdserver/api/v3compactor/doc.go b/etcd/etcdserver/api/v3compactor/doc.go similarity index 100% rename from server/etcdserver/api/v3compactor/doc.go rename to etcd/etcdserver/api/v3compactor/doc.go diff --git a/server/etcdserver/api/v3compactor/periodic.go b/etcd/etcdserver/api/v3compactor/periodic.go similarity index 88% rename from server/etcdserver/api/v3compactor/periodic.go rename to etcd/etcdserver/api/v3compactor/periodic.go index 853c1a9e7f3..e87797ac197 100644 --- a/server/etcdserver/api/v3compactor/periodic.go +++ b/etcd/etcdserver/api/v3compactor/periodic.go @@ -19,8 +19,8 @@ import ( "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/storage/mvcc" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "github.com/jonboulle/clockwork" "go.uber.org/zap" @@ -54,9 +54,8 @@ func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevG period: h, rg: rg, c: c, + revs: make([]int64, 0), } - // revs won't be longer than the retentions. - pc.revs = make([]int64, 0, pc.getRetentions()) pc.ctx, pc.cancel = context.WithCancel(context.Background()) return pc } @@ -67,7 +66,7 @@ Compaction period 1-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 1-hour 4. do compact with revs[0] - - success? continue on for-loop and move sliding window; revs = revs[1:] + - success? contiue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 24-hour: @@ -75,7 +74,7 @@ Compaction period 24-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 24-hour 4. do compact with revs[0] - - success? continue on for-loop and move sliding window; revs = revs[1:] + - success? contiue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 59-min: @@ -83,7 +82,7 @@ Compaction period 59-min: 2. record revisions for every 1/10 of 59-min (5.9-min) 3. keep recording revisions with no compaction for first 59-min 4. do compact with revs[0] - - success? continue on for-loop and move sliding window; revs = revs[1:] + - success? contiue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 59-min (5.9-min) Compaction period 5-sec: @@ -91,7 +90,7 @@ Compaction period 5-sec: 2. record revisions for every 1/10 of 5-sec (0.5-sec) 3. keep recording revisions with no compaction for first 5-sec 4. do compact with revs[0] - - success? continue on for-loop and move sliding window; revs = revs[1:] + - success? contiue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec) */ @@ -102,7 +101,6 @@ func (pc *Periodic) Run() { retentions := pc.getRetentions() go func() { - lastRevision := int64(0) lastSuccess := pc.clock.Now() baseInterval := pc.period for { @@ -115,15 +113,15 @@ func (pc *Periodic) Run() { case <-pc.ctx.Done(): return case <-pc.clock.After(retryInterval): - pc.mu.RLock() + pc.mu.Lock() p := pc.paused - pc.mu.RUnlock() + pc.mu.Unlock() if p { continue } } - rev := pc.revs[0] - if pc.clock.Now().Sub(lastSuccess) < baseInterval || rev == lastRevision { + + if pc.clock.Now().Sub(lastSuccess) < baseInterval { continue } @@ -131,6 +129,7 @@ func (pc *Periodic) Run() { if baseInterval == pc.period { baseInterval = compactInterval } + rev := pc.revs[0] pc.lg.Info( "starting auto periodic compaction", @@ -146,7 +145,6 @@ func (pc *Periodic) Run() { zap.Duration("compact-period", pc.period), zap.Duration("took", pc.clock.Now().Sub(startTime)), ) - lastRevision = rev lastSuccess = pc.clock.Now() } else { pc.lg.Warn( diff --git a/etcd/etcdserver/api/v3compactor/revision.go b/etcd/etcdserver/api/v3compactor/revision.go new file mode 100644 index 00000000000..9a30feb77e5 --- /dev/null +++ b/etcd/etcdserver/api/v3compactor/revision.go @@ -0,0 +1,130 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3compactor + +import ( + "context" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/etcd/mvcc" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "github.com/jonboulle/clockwork" + "go.uber.org/zap" +) + +// Revision compacts the log by purging revisions older than +// the configured reivison number. Compaction happens every 5 minutes. +type Revision struct { + lg *zap.Logger + + clock clockwork.Clock + retention int64 + + rg RevGetter + c Compactable + + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + paused bool +} + +// newRevision creates a new instance of Revisonal compactor that purges +// the log older than retention revisions from the current revision. +func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision { + rc := &Revision{ + lg: lg, + clock: clock, + retention: retention, + rg: rg, + c: c, + } + rc.ctx, rc.cancel = context.WithCancel(context.Background()) + return rc +} + +const revInterval = 5 * time.Minute + +// Run runs revision-based compactor. +func (rc *Revision) Run() { + prev := int64(0) + go func() { + for { + select { + case <-rc.ctx.Done(): + return + case <-rc.clock.After(revInterval): + rc.mu.Lock() + p := rc.paused + rc.mu.Unlock() + if p { + continue + } + } + + rev := rc.rg.Rev() - rc.retention + if rev <= 0 || rev == prev { + continue + } + + now := time.Now() + rc.lg.Info( + "starting auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + ) + _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev}) + if err == nil || err == mvcc.ErrCompacted { + prev = rev + rc.lg.Info( + "completed auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + zap.Duration("took", time.Since(now)), + ) + } else { + rc.lg.Warn( + "failed auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + zap.Duration("retry-interval", revInterval), + zap.Error(err), + ) + } + } + }() +} + +// Stop stops revision-based compactor. +func (rc *Revision) Stop() { + rc.cancel() +} + +// Pause pauses revision-based compactor. +func (rc *Revision) Pause() { + rc.mu.Lock() + rc.paused = true + rc.mu.Unlock() +} + +// Resume resumes revision-based compactor. +func (rc *Revision) Resume() { + rc.mu.Lock() + rc.paused = false + rc.mu.Unlock() +} diff --git a/server/etcdserver/api/v3election/doc.go b/etcd/etcdserver/api/v3election/doc.go similarity index 100% rename from server/etcdserver/api/v3election/doc.go rename to etcd/etcdserver/api/v3election/doc.go diff --git a/etcd/etcdserver/api/v3election/election.go b/etcd/etcdserver/api/v3election/election.go new file mode 100644 index 00000000000..f21a8efc213 --- /dev/null +++ b/etcd/etcdserver/api/v3election/election.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3election + +import ( + "context" + "errors" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + epb "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" +) + +// ErrMissingLeaderKey is returned when election API request +// is missing the "leader" field. +var ErrMissingLeaderKey = errors.New(`"leader" field必须是provided`) + +type electionServer struct { + c *clientv3.Client +} + +// NewElectionServer 选举server +func NewElectionServer(c *clientv3.Client) epb.ElectionServer { + return &electionServer{c} +} + +func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { + s, err := es.session(ctx, req.Lease) + if err != nil { + return nil, err + } + // 创建用于选举的Session,有效时间可以根据实际情况设置 + e := concurrency.NewElection(s, string(req.Name)) + if err = e.Campaign(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.CampaignResponse{ + Header: e.Header(), + Leader: &epb.LeaderKey{ + Name: req.Name, + Key: []byte(e.Key()), + Rev: e.Rev(), + Lease: int64(s.Lease()), + }, + }, nil +} + +func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { + if req.Leader == nil { + return nil, ErrMissingLeaderKey + } + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Proclaim(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.ProclaimResponse{Header: e.Header()}, nil +} + +func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { + s, err := es.session(stream.Context(), -1) + if err != nil { + return err + } + e := concurrency.NewElection(s, string(req.Name)) + ch := e.Observe(stream.Context()) + for stream.Context().Err() == nil { + select { + case <-stream.Context().Done(): + case resp, ok := <-ch: + if !ok { + return nil + } + lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} + if err := stream.Send(lresp); err != nil { + return err + } + } + } + return stream.Context().Err() +} + +func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { + s, err := es.session(ctx, -1) + if err != nil { + return nil, err + } + l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) + if lerr != nil { + return nil, lerr + } + return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil +} + +func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { + if req.Leader == nil { + return nil, ErrMissingLeaderKey + } + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Resign(ctx); err != nil { + return nil, err + } + return &epb.ResignResponse{Header: e.Header()}, nil +} + +func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { + s, err := concurrency.NewSession( + es.c, + concurrency.WithLease(clientv3.LeaseID(lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + return s, nil +} diff --git a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go similarity index 92% rename from server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go rename to etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go index 432fb9c4477..28d383e42bd 100644 --- a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go +++ b/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -1,5 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: server/etcdserver/api/v3election/v3electionpb/v3election.proto +// Code generated by protoc-gen-grpc-gateway. +// source: etcd/etcdserver/api/v3election/v3electionpb/v3election.proto /* Package v3electionpb is a reverse proxy. @@ -10,10 +10,11 @@ package gw import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" "io" "net/http" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -21,18 +22,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) // Suppress "imported and not used" errors var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join + +var ( + _ io.Reader + _ status.Status + _ = runtime.String + _ = utilities.NewDoubleArray + _ = descriptor.ForMessage +) func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq v3electionpb.CampaignRequest @@ -48,7 +50,6 @@ func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -65,7 +66,6 @@ func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Ma msg, err := server.Campaign(ctx, &protoReq) return msg, metadata, err - } func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -82,7 +82,6 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -99,7 +98,6 @@ func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Ma msg, err := server.Proclaim(ctx, &protoReq) return msg, metadata, err - } func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -116,7 +114,6 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -133,7 +130,6 @@ func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Mars msg, err := server.Leader(ctx, &protoReq) return msg, metadata, err - } func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { @@ -158,7 +154,6 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler } metadata.HeaderMD = header return stream, metadata, nil - } func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -175,7 +170,6 @@ func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -192,20 +186,15 @@ func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Mars msg, err := server.Resign(ctx, &protoReq) return msg, metadata, err - } // v3electionpb.RegisterElectionHandlerServer registers the http handlers for service Election to "mux". // UnaryRPC :call v3electionpb.ElectionServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterElectionHandlerFromEndpoint instead. func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3electionpb.ElectionServer) error { - mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -213,7 +202,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s return } resp, md, err := local_request_Election_Campaign_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -221,14 +209,11 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s } forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -236,7 +221,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s return } resp, md, err := local_request_Election_Proclaim_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -244,14 +228,11 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s } forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -259,7 +240,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s return } resp, md, err := local_request_Election_Leader_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -267,7 +247,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s } forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -280,8 +259,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -289,7 +266,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s return } resp, md, err := local_request_Election_Resign_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -297,7 +273,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s } forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -340,7 +315,6 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "ElectionClient" to call the correct interceptors. func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { - mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -358,7 +332,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -378,7 +351,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -398,7 +370,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -418,7 +389,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -438,7 +408,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil diff --git a/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go new file mode 100644 index 00000000000..3947ffe4224 --- /dev/null +++ b/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -0,0 +1,748 @@ +package v3electionpb + +// source: v3election.proto + +import ( + context "context" + "encoding/json" + fmt "fmt" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + mvccpb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CampaignRequest struct { + // name is the election's identifier for the campaign. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` + // value is the initial proclaimed value set when the campaigner wins the + // election. + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } +func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } +func (*CampaignRequest) ProtoMessage() {} +func (*CampaignRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{0} +} + +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type CampaignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // leader describes the resources used for holding leadereship of the election. + Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } +func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } +func (*CampaignResponse) ProtoMessage() {} +func (*CampaignResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{1} +} + +func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *CampaignResponse) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type LeaderKey struct { + // name is the election identifier that correponds to the leadership key. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` + // lease is the lease ID of the election leader. + Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LeaderKey) Reset() { *m = LeaderKey{} } +func (m *LeaderKey) String() string { return proto.CompactTextString(m) } +func (*LeaderKey) ProtoMessage() {} +func (*LeaderKey) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{2} +} + +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LeaderRequest struct { + // name is the election identifier for the leadership information. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } +func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } +func (*LeaderRequest) ProtoMessage() {} +func (*LeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{3} +} + +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type LeaderResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // kv is the key-value pair representing the latest leader update. + Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } +func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } +func (*LeaderResponse) ProtoMessage() {} +func (*LeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{4} +} + +func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { + if m != nil { + return m.Kv + } + return nil +} + +type ResignRequest struct { + // leader is the leadership to relinquish by resignation. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResignRequest) Reset() { *m = ResignRequest{} } +func (m *ResignRequest) String() string { return proto.CompactTextString(m) } +func (*ResignRequest) ProtoMessage() {} +func (*ResignRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{5} +} + +func (m *ResignRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type ResignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResignResponse) Reset() { *m = ResignResponse{} } +func (m *ResignResponse) String() string { return proto.CompactTextString(m) } +func (*ResignResponse) ProtoMessage() {} +func (*ResignResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{6} +} + +func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type ProclaimRequest struct { + // leader is the leadership hold on the election. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` + // value is an update meant to overwrite the leader's current value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } +func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } +func (*ProclaimRequest) ProtoMessage() {} +func (*ProclaimRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{7} +} + +func (m *ProclaimRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type ProclaimResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } +func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } +func (*ProclaimResponse) ProtoMessage() {} +func (*ProclaimResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c9b1f26cc432a035, []int{8} +} + +func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") + proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") + proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") + proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") + proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") + proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") + proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") + proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") + proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") +} + +func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) } + +var fileDescriptor_c9b1f26cc432a035 = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x08, 0x21, 0xb8, 0xd1, 0x72, 0xa9, + 0x72, 0xb0, 0x51, 0xc3, 0x29, 0x27, 0x04, 0x02, 0x55, 0x2a, 0x12, 0xe0, 0x03, 0x82, 0xe3, 0xda, + 0x1d, 0xb9, 0x91, 0x1d, 0xaf, 0xb1, 0x5d, 0x4b, 0xb9, 0xf2, 0x0a, 0x1c, 0xe0, 0x91, 0x38, 0x22, + 0xf1, 0x02, 0x28, 0xf0, 0x20, 0x68, 0x77, 0xed, 0xfa, 0x8f, 0x12, 0x84, 0x9a, 0xdb, 0x78, 0xe7, + 0xdb, 0xf9, 0xcd, 0x37, 0x3b, 0x09, 0xe8, 0xf9, 0x0c, 0x43, 0xf4, 0xb2, 0x05, 0x8f, 0xac, 0x38, + 0xe1, 0x19, 0x37, 0xfa, 0xd5, 0x49, 0xec, 0x8e, 0x06, 0x3e, 0xf7, 0xb9, 0x4c, 0xd8, 0x22, 0x52, + 0x9a, 0xd1, 0x11, 0x66, 0xde, 0xb9, 0xcd, 0xe2, 0x85, 0x2d, 0x82, 0x14, 0x93, 0x1c, 0x93, 0xd8, + 0xb5, 0x93, 0xd8, 0x2b, 0x04, 0xc3, 0x2b, 0xc1, 0x32, 0xf7, 0xbc, 0xd8, 0xb5, 0x83, 0xbc, 0xc8, + 0x8c, 0x7d, 0xce, 0xfd, 0x10, 0x65, 0x8e, 0x45, 0x11, 0xcf, 0x98, 0x20, 0xa5, 0x2a, 0x4b, 0xdf, + 0xc1, 0xe1, 0x0b, 0xb6, 0x8c, 0xd9, 0xc2, 0x8f, 0x1c, 0xfc, 0x74, 0x89, 0x69, 0x66, 0x18, 0xd0, + 0x8d, 0xd8, 0x12, 0x87, 0x64, 0x42, 0x8e, 0xfb, 0x8e, 0x8c, 0x8d, 0x01, 0xdc, 0x0c, 0x91, 0xa5, + 0x38, 0xd4, 0x26, 0xe4, 0xb8, 0xe3, 0xa8, 0x0f, 0x71, 0x9a, 0xb3, 0xf0, 0x12, 0x87, 0x1d, 0x29, + 0x55, 0x1f, 0x74, 0x05, 0x7a, 0x55, 0x32, 0x8d, 0x79, 0x94, 0xa2, 0xf1, 0x14, 0x7a, 0x17, 0xc8, + 0xce, 0x31, 0x91, 0x55, 0xef, 0x9c, 0x8c, 0xad, 0xba, 0x0f, 0xab, 0xd4, 0x9d, 0x4a, 0x8d, 0x53, + 0x68, 0x0d, 0x1b, 0x7a, 0xa1, 0xba, 0xa5, 0xc9, 0x5b, 0xf7, 0xad, 0xfa, 0xa8, 0xac, 0xd7, 0x32, + 0x77, 0x86, 0x2b, 0xa7, 0x90, 0xd1, 0x8f, 0x70, 0xfb, 0xea, 0x70, 0xa3, 0x0f, 0x1d, 0x3a, 0x01, + 0xae, 0x64, 0xb9, 0xbe, 0x23, 0x42, 0x71, 0x92, 0x60, 0x2e, 0x1d, 0x74, 0x1c, 0x11, 0x56, 0x5e, + 0xbb, 0x35, 0xaf, 0xf4, 0x31, 0xec, 0xab, 0xd2, 0xff, 0x18, 0x13, 0xbd, 0x80, 0x83, 0x52, 0xb4, + 0x93, 0xf1, 0x09, 0x68, 0x41, 0x5e, 0x98, 0xd6, 0x2d, 0xf5, 0xa2, 0xd6, 0x19, 0xae, 0xde, 0x8b, + 0x01, 0x3b, 0x5a, 0x90, 0xd3, 0x67, 0xb0, 0xef, 0x60, 0x5a, 0x7b, 0xb5, 0x6a, 0x56, 0xe4, 0xff, + 0x66, 0xf5, 0x0a, 0x0e, 0xca, 0x0a, 0xbb, 0xf4, 0x4a, 0x3f, 0xc0, 0xe1, 0xdb, 0x84, 0x7b, 0x21, + 0x5b, 0x2c, 0xaf, 0xdb, 0x4b, 0xb5, 0x48, 0x5a, 0x7d, 0x91, 0x4e, 0x41, 0xaf, 0x2a, 0xef, 0xd2, + 0xe3, 0xc9, 0xd7, 0x2e, 0xec, 0xbd, 0x2c, 0x1a, 0x30, 0x02, 0xd8, 0x2b, 0xf7, 0xd3, 0x78, 0xd4, + 0xec, 0xac, 0xf5, 0x53, 0x18, 0x99, 0xdb, 0xd2, 0x8a, 0x42, 0x27, 0x9f, 0x7f, 0xfe, 0xf9, 0xa2, + 0x8d, 0xe8, 0x3d, 0x3b, 0x9f, 0xd9, 0xa5, 0xd0, 0xf6, 0x0a, 0xd9, 0x9c, 0x4c, 0x05, 0xac, 0xf4, + 0xd0, 0x86, 0xb5, 0xa6, 0xd6, 0x86, 0xb5, 0xad, 0x6f, 0x81, 0xc5, 0x85, 0x4c, 0xc0, 0x3c, 0xe8, + 0xa9, 0xd9, 0x1a, 0x0f, 0x37, 0x4d, 0xbc, 0x04, 0x8d, 0x37, 0x27, 0x0b, 0x8c, 0x29, 0x31, 0x43, + 0x7a, 0xb7, 0x81, 0x51, 0x0f, 0x25, 0x20, 0x3e, 0xdc, 0x7a, 0xe3, 0xca, 0x81, 0xef, 0x42, 0x39, + 0x92, 0x94, 0x07, 0x74, 0xd0, 0xa0, 0x70, 0x55, 0x78, 0x4e, 0xa6, 0x4f, 0x88, 0x70, 0xa3, 0x16, + 0xb4, 0xcd, 0x69, 0x2c, 0x7e, 0x9b, 0xd3, 0xdc, 0xe9, 0x2d, 0x6e, 0x12, 0x29, 0x9a, 0x93, 0xe9, + 0x73, 0xfd, 0xfb, 0xda, 0x24, 0x3f, 0xd6, 0x26, 0xf9, 0xb5, 0x36, 0xc9, 0xb7, 0xdf, 0xe6, 0x0d, + 0xb7, 0x27, 0xff, 0x18, 0x67, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xe6, 0x7c, 0x66, 0xa9, + 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ElectionClient is the client API for Election service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ElectionClient interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) +} + +type electionClient struct { + cc *grpc.ClientConn +} + +func NewElectionClient(cc *grpc.ClientConn) ElectionClient { + return &electionClient{cc} +} + +func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { + out := new(CampaignResponse) + err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { + out := new(ProclaimResponse) + err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { + out := new(LeaderResponse) + err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { + stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...) + if err != nil { + return nil, err + } + x := &electionObserveClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Election_ObserveClient interface { + Recv() (*LeaderResponse, error) + grpc.ClientStream +} + +type electionObserveClient struct { + grpc.ClientStream +} + +func (x *electionObserveClient) Recv() (*LeaderResponse, error) { + m := new(LeaderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { + out := new(ResignResponse) + err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ElectionServer is the etcd API for Election service. +type ElectionServer interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(*LeaderRequest, Election_ObserveServer) error + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(context.Context, *ResignRequest) (*ResignResponse, error) +} + +// UnimplementedElectionServer can be embedded to have forward compatible implementations. +type UnimplementedElectionServer struct{} + +func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented") +} + +func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented") +} + +func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented") +} + +func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error { + return status.Errorf(codes.Unimplemented, "method Observe not implemented") +} + +func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented") +} + +func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { + s.RegisterService(&_Election_serviceDesc, srv) +} + +func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CampaignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Campaign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Campaign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProclaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Proclaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Proclaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Leader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Leader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LeaderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) +} + +type Election_ObserveServer interface { + Send(*LeaderResponse) error + grpc.ServerStream +} + +type electionObserveServer struct { + grpc.ServerStream +} + +func (x *electionObserveServer) Send(m *LeaderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Resign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Resign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Election_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3electionpb.Election", + HandlerType: (*ElectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Campaign", + Handler: _Election_Campaign_Handler, + }, + { + MethodName: "Proclaim", + Handler: _Election_Proclaim_Handler, + }, + { + MethodName: "Leader", + Handler: _Election_Leader_Handler, + }, + { + MethodName: "Resign", + Handler: _Election_Resign_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Observe", + Handler: _Election_Observe_Handler, + ServerStreams: true, + }, + }, + Metadata: "v3election.proto", +} + +func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LeaderKey) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *ResignRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *ResignResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *CampaignRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *CampaignResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LeaderKey) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LeaderRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LeaderResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ResignRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ResignResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ProclaimRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ProclaimResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func sovV3Election(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} + +func (m *CampaignRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *CampaignResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LeaderKey) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LeaderRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LeaderResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ResignRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ResignResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +var ( + ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupV3Election = fmt.Errorf("proto: unexpected end of group") +) diff --git a/server/etcdserver/api/v3election/v3electionpb/v3election.proto b/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto similarity index 100% rename from server/etcdserver/api/v3election/v3electionpb/v3election.proto rename to etcd/etcdserver/api/v3election/v3electionpb/v3election.proto diff --git a/server/etcdserver/api/v3lock/doc.go b/etcd/etcdserver/api/v3lock/doc.go similarity index 100% rename from server/etcdserver/api/v3lock/doc.go rename to etcd/etcdserver/api/v3lock/doc.go diff --git a/etcd/etcdserver/api/v3lock/lock.go b/etcd/etcdserver/api/v3lock/lock.go new file mode 100644 index 00000000000..5f63b8962b2 --- /dev/null +++ b/etcd/etcdserver/api/v3lock/lock.go @@ -0,0 +1,57 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3lock + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockServer struct { + c *clientv3.Client +} + +func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { + return &lockServer{c} +} + +func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + s, err := concurrency.NewSession( + ls.c, + concurrency.WithLease(clientv3.LeaseID(req.Lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + m := concurrency.NewMutex(s, string(req.Name)) + if err = m.Lock(ctx); err != nil { + return nil, err + } + return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil +} + +func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + resp, err := ls.c.Delete(ctx, string(req.Key)) + if err != nil { + return nil, err + } + return &v3lockpb.UnlockResponse{Header: resp.Header}, nil +} diff --git a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go similarity index 90% rename from server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go rename to etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go index 27be5acc558..26594f21d4a 100644 --- a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go +++ b/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -1,5 +1,5 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: server/etcdserver/api/v3lock/v3lockpb/v3lock.proto +// Code generated by protoc-gen-grpc-gateway. +// source: etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto /* Package v3lockpb is a reverse proxy. @@ -10,10 +10,11 @@ package gw import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" "io" "net/http" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" + "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -21,18 +22,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) // Suppress "imported and not used" errors var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join + +var ( + _ io.Reader + _ status.Status + _ = runtime.String + _ = utilities.NewDoubleArray + _ = descriptor.ForMessage +) func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq v3lockpb.LockRequest @@ -48,7 +50,6 @@ func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, clien msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -65,7 +66,6 @@ func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := server.Lock(ctx, &protoReq) return msg, metadata, err - } func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -82,7 +82,6 @@ func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, cli msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -99,20 +98,15 @@ func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshale msg, err := server.Unlock(ctx, &protoReq) return msg, metadata, err - } // v3lockpb.RegisterLockHandlerServer registers the http handlers for service Lock to "mux". // UnaryRPC :call v3lockpb.LockServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLockHandlerFromEndpoint instead. func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3lockpb.LockServer) error { - mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -120,7 +114,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Lock_Lock_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -128,14 +121,11 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -143,7 +133,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Lock_Unlock_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -151,7 +140,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -194,7 +182,6 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "LockClient" to call the correct interceptors. func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { - mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -212,7 +199,6 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -232,7 +218,6 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil diff --git a/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go new file mode 100644 index 00000000000..c6edbc60a31 --- /dev/null +++ b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -0,0 +1,365 @@ +// Code generated by protoc-gen-gogo. +// source: v3lock.proto + +package v3lockpb + +import ( + context "context" + "encoding/json" + fmt "fmt" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LockRequest struct { + // name is the identifier for the distributed shared lock to be acquired. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquisition; locking twice with the same lease is a + // no-op. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LockRequest) Reset() { *m = LockRequest{} } +func (m *LockRequest) String() string { return proto.CompactTextString(m) } +func (*LockRequest) ProtoMessage() {} +func (*LockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_52389b3e2f253201, []int{0} +} + +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *LockResponse) Reset() { *m = LockResponse{} } +func (m *LockResponse) String() string { return proto.CompactTextString(m) } +func (*LockResponse) ProtoMessage() {} +func (*LockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_52389b3e2f253201, []int{1} +} + +func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockRequest struct { + // key is the lock ownership key granted by Lock. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } +func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockRequest) ProtoMessage() {} +func (*UnlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_52389b3e2f253201, []int{2} +} + +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } +func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockResponse) ProtoMessage() {} +func (*UnlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_52389b3e2f253201, []int{3} +} + +func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") + proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") + proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") + proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") +} + +func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) } + +var fileDescriptor_52389b3e2f253201 = []byte{ + // 330 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x3e, 0xb5, 0x24, 0x39, + 0x45, 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, + 0x2f, 0x2a, 0x48, 0x86, 0x2a, 0x90, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x2b, 0x49, 0xcc, + 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xc8, 0x2a, 0x99, 0x73, 0x71, 0xfb, + 0xe4, 0x27, 0x67, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, + 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x39, + 0xa9, 0x89, 0xc5, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x52, 0x18, 0x17, + 0x0f, 0x44, 0x63, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x09, 0x17, 0x5b, 0x46, 0x6a, 0x62, + 0x4a, 0x6a, 0x11, 0x58, 0x2f, 0xb7, 0x91, 0x8c, 0x1e, 0xb2, 0x7b, 0xf4, 0x60, 0xea, 0x3c, 0xc0, + 0x6a, 0x82, 0xa0, 0x6a, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0xc1, 0x26, 0xf3, 0x04, 0x81, + 0x98, 0x4a, 0x8a, 0x5c, 0xbc, 0xa1, 0x79, 0x39, 0x48, 0x4e, 0x82, 0x2a, 0x61, 0x44, 0x28, 0x71, + 0xe3, 0xe2, 0x83, 0x29, 0xa1, 0xc4, 0x72, 0xa3, 0x0d, 0x8c, 0x5c, 0x2c, 0x20, 0x3f, 0x08, 0xf9, + 0x43, 0x69, 0x51, 0x3d, 0x58, 0x60, 0xeb, 0x21, 0x05, 0x8a, 0x94, 0x18, 0xba, 0x30, 0xc4, 0x34, + 0x25, 0x89, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, 0x29, 0xf1, 0xea, 0x97, 0x19, 0xeb, 0x83, 0x14, + 0x80, 0x09, 0x2b, 0x46, 0x2d, 0xa1, 0x70, 0x2e, 0x36, 0x88, 0x0b, 0x85, 0xc4, 0x11, 0x7a, 0x51, + 0xbc, 0x25, 0x25, 0x81, 0x29, 0x01, 0x35, 0x56, 0x0a, 0x6c, 0xac, 0x88, 0x12, 0x3f, 0xdc, 0xd8, + 0xd2, 0x3c, 0xa8, 0xc1, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, + 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, 0x78, 0x34, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x4a, 0x4d, 0xca, 0xbb, 0x36, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LockClient is the client API for Lock service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LockClient interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) +} + +type lockClient struct { + cc *grpc.ClientConn +} + +func NewLockClient(cc *grpc.ClientConn) LockClient { + return &lockClient{cc} +} + +func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { + out := new(LockResponse) + err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { + out := new(UnlockResponse) + err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LockServer is the etcd API for Lock service. +type LockServer interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(context.Context, *LockRequest) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) +} + +// UnimplementedLockServer can be embedded to have forward compatible implementations. +type UnimplementedLockServer struct{} + +func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented") +} + +func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented") +} + +func RegisterLockServer(s *grpc.Server, srv LockServer) { + s.RegisterService(&_Lock_serviceDesc, srv) +} + +func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Lock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Lock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Lock(ctx, req.(*LockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Unlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Unlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lock_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3lockpb.Lock", + HandlerType: (*LockServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lock", + Handler: _Lock_Lock_Handler, + }, + { + MethodName: "Unlock", + Handler: _Lock_Unlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v3lock.proto", +} + +func (m *LockRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LockResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LockRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LockResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *UnlockRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *UnlockResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func sovV3Lock(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} + +func (m *LockRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LockResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *UnlockRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *UnlockResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +var ( + ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupV3Lock = fmt.Errorf("proto: unexpected end of group") +) diff --git a/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto similarity index 100% rename from server/etcdserver/api/v3lock/v3lockpb/v3lock.proto rename to etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto diff --git a/etcd/etcdserver/api/v3rpc/auth.go b/etcd/etcdserver/api/v3rpc/auth.go new file mode 100644 index 00000000000..17ce3262c21 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/auth.go @@ -0,0 +1,166 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type AuthServer struct { + authenticator etcdserver.Authenticator +} + +func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { + return &AuthServer{authenticator: s} +} + +func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + resp, err := as.authenticator.AuthEnable(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + resp, err := as.authenticator.AuthDisable(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { + resp, err := as.authenticator.AuthStatus(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + resp, err := as.authenticator.Authenticate(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := as.authenticator.RoleAdd(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := as.authenticator.RoleDelete(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := as.authenticator.RoleGet(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := as.authenticator.RoleList(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := as.authenticator.RoleRevokePermission(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := as.authenticator.RoleGrantPermission(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := as.authenticator.UserAdd(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := as.authenticator.UserDelete(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := as.authenticator.UserGet(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := as.authenticator.UserList(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := as.authenticator.UserGrantRole(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := as.authenticator.UserRevokeRole(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} + +func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := as.authenticator.UserChangePassword(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + return resp, nil +} diff --git a/server/etcdserver/api/v3rpc/codec.go b/etcd/etcdserver/api/v3rpc/codec.go similarity index 93% rename from server/etcdserver/api/v3rpc/codec.go rename to etcd/etcdserver/api/v3rpc/codec.go index d599ff63cc3..42cef4462c1 100644 --- a/server/etcdserver/api/v3rpc/codec.go +++ b/etcd/etcdserver/api/v3rpc/codec.go @@ -20,12 +20,10 @@ type codec struct{} func (c *codec) Marshal(v interface{}) ([]byte, error) { b, err := proto.Marshal(v.(proto.Message)) - sentBytes.Add(float64(len(b))) return b, err } func (c *codec) Unmarshal(data []byte, v interface{}) error { - receivedBytes.Add(float64(len(data))) return proto.Unmarshal(data, v.(proto.Message)) } diff --git a/server/etcdserver/api/v3rpc/header.go b/etcd/etcdserver/api/v3rpc/header.go similarity index 80% rename from server/etcdserver/api/v3rpc/header.go rename to etcd/etcdserver/api/v3rpc/header.go index a8f1f92cf99..4bb62f6850c 100644 --- a/server/etcdserver/api/v3rpc/header.go +++ b/etcd/etcdserver/api/v3rpc/header.go @@ -15,28 +15,27 @@ package v3rpc import ( - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/apply" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" ) type header struct { clusterID int64 - memberID int64 - sg apply.RaftStatusGetter + memberID int64 // 本节点的ID + sg etcdserver.RaftStatusGetter rev func() int64 } func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), - memberID: int64(s.MemberId()), + memberID: int64(s.ID()), sg: s, rev: func() int64 { return s.KV().Rev() }, } } -// fill populates pb.ResponseHeader using etcdserver information +// fill 填充pb.使用etcdserver信息的ResponseHeader func (h *header) fill(rh *pb.ResponseHeader) { if rh == nil { panic("unexpected nil resp.Header") diff --git a/etcd/etcdserver/api/v3rpc/interceptor.go b/etcd/etcdserver/api/v3rpc/interceptor.go new file mode 100644 index 00000000000..0fc4ebecae8 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/interceptor.go @@ -0,0 +1,200 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + maxNoLeaderCnt = 3 + warnUnaryRequestLatency = 300 * time.Millisecond + snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" +) + +type streamsMap struct { + mu sync.Mutex + streams map[grpc.ServerStream]struct{} +} + +func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if !api.IsCapabilityEnabled(api.V3rpcCapability) { // 是否启用了v3rpc + return nil, rpctypes.ErrGRPCNotCapable + } + // 集群包含自己,自己是learner, + if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { + return nil, rpctypes.ErrGPRCNotSupportedForLearner + } + + md, ok := metadata.FromIncomingContext(ctx) + if ok { + // data, _ := json.Marshal(md) + // s.Logger().Info("-", zap.String("metadata", string(data))) + // hasleader + if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { + if s.Leader() == types.ID(raft.None) { + return nil, rpctypes.ErrGRPCNoLeader + } + } + } + + return handler(ctx, req) + } +} + +func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { + smap := monitorLeader(s) + + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if !api.IsCapabilityEnabled(api.V3rpcCapability) { + return rpctypes.ErrGRPCNotCapable + } + + if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // 除了快照,学习者不支持流RPC + return rpctypes.ErrGPRCNotSupportedForLearner + } + + md, ok := metadata.FromIncomingContext(ss.Context()) + if ok { + ks := md[rpctypes.MetadataRequireLeaderKey] // hasleader + if len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { + if s.Leader() == types.ID(raft.None) { + return rpctypes.ErrGRPCNoLeader + } + + ctx := newCancellableContext(ss.Context()) + ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss} + + smap.mu.Lock() + smap.streams[ss] = struct{}{} + smap.mu.Unlock() + + defer func() { + smap.mu.Lock() + delete(smap.streams, ss) + smap.mu.Unlock() + // TODO: investigate whether the reason for cancellation here is useful to know + ctx.Cancel(nil) + }() + } + } + + return handler(srv, ss) + } +} + +// cancellableContext wraps a context with new cancellable context that allows a +// specific cancellation error to be preserved and later retrieved using the +// Context.Err() function. This is so downstream context users can disambiguate +// the reason for the cancellation which could be from the client (for example) +// or from this interceptor code. +type cancellableContext struct { + context.Context + + lock sync.RWMutex + cancel context.CancelFunc + cancelReason error +} + +func newCancellableContext(parent context.Context) *cancellableContext { + ctx, cancel := context.WithCancel(parent) + return &cancellableContext{ + Context: ctx, + cancel: cancel, + } +} + +// Cancel stores the cancellation reason and then delegates to context.WithCancel +// against the parent context. +func (c *cancellableContext) Cancel(reason error) { + c.lock.Lock() + c.cancelReason = reason + c.lock.Unlock() + c.cancel() +} + +// Err will return the preserved cancel reason error if present, and will +// otherwise return the underlying error from the parent context. +func (c *cancellableContext) Err() error { + c.lock.RLock() + defer c.lock.RUnlock() + if c.cancelReason != nil { + return c.cancelReason + } + return c.Context.Err() +} + +type serverStreamWithCtx struct { + grpc.ServerStream + + // ctx is used so that we can preserve a reason for cancellation. + ctx *cancellableContext +} + +func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } + +func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { + smap := &streamsMap{ + streams: make(map[grpc.ServerStream]struct{}), + } + + s.GoAttach(func() { + election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond + noLeaderCnt := 0 + + for { + select { + case <-s.StoppingNotify(): + return + case <-time.After(election): + if s.Leader() == types.ID(raft.None) { + noLeaderCnt++ + } else { + noLeaderCnt = 0 + } + + // We are more conservative on canceling existing streams. Reconnecting streams + // cost much more than just rejecting new requests. So we wait until the member + // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. + if noLeaderCnt >= maxNoLeaderCnt { + smap.mu.Lock() + for ss := range smap.streams { + if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { + ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader) + <-ss.Context().Done() + } + } + smap.streams = make(map[grpc.ServerStream]struct{}) + smap.mu.Unlock() + } + } + } + }) + + return smap +} diff --git a/etcd/etcdserver/api/v3rpc/key.go b/etcd/etcdserver/api/v3rpc/key.go new file mode 100644 index 00000000000..301d038129f --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/key.go @@ -0,0 +1,287 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3rpc implements etcd v3 RPC system based on gRPC. +package v3rpc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/adt" +) + +type kvServer struct { + hdr header + kv etcdserver.RaftKV + // maxTxnOps is the max operations per txn. + // e.g suppose maxTxnOps = 128. + // Txn.Success can have at most 128 operations, + // and Txn.Failure can have at most 128 operations. + maxTxnOps uint +} + +func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { + return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps} +} + +func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := checkPutRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.Put(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + s.hdr.fill(resp.Header) + return resp, nil +} + +// DeleteRange 从键值存储中删除给定的范围 +// 删除请求增加键值存储的revision ,并在事件历史中为每个被删除的key生成一个删除事件 +func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + if err := checkDeleteRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.DeleteRange(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + s.hdr.fill(resp.Header) + return resp, nil +} + +// Txn 在单个事务中处理多个请求一个事务中请求增加键值存储的z evisio n ,并为每个完成的请求生成一个带有相同 +// revision 的事件不允许在一个txn 中多次修改同一个key. +func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil { + return nil, err + } + // check for forbidden put/del overlaps after checking request to avoid quadratic blowup + if _, _, err := checkIntervals(r.Success); err != nil { + return nil, err + } + if _, _, err := checkIntervals(r.Failure); err != nil { + return nil, err + } + + resp, err := s.kv.Txn(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + s.hdr.fill(resp.Header) + return resp, nil +} + +// Compact 压缩在etcd键值存储中的事件历史 +// 键值存储应该定期压缩,否则事件历史会无限制地持续增长,消耗系统的大量磁盘空间 +func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + resp, err := s.kv.Compact(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + s.hdr.fill(resp.Header) + return resp, nil +} + +func checkPutRequest(r *pb.PutRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + if r.IgnoreValue && len(r.Value) != 0 { + return rpctypes.ErrGRPCValueProvided + } + if r.IgnoreLease && r.Lease != 0 { + return rpctypes.ErrGRPCLeaseProvided + } + return nil +} + +func checkDeleteRequest(r *pb.DeleteRangeRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + return nil +} + +func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error { + opc := len(r.Compare) + if opc < len(r.Success) { + opc = len(r.Success) + } + if opc < len(r.Failure) { + opc = len(r.Failure) + } + if opc > maxTxnOps { + return rpctypes.ErrGRPCTooManyOps + } + + for _, c := range r.Compare { + if len(c.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + } + for _, u := range r.Success { + if err := checkRequestOp(u, maxTxnOps-opc); err != nil { + return err + } + } + for _, u := range r.Failure { + if err := checkRequestOp(u, maxTxnOps-opc); err != nil { + return err + } + } + + return nil +} + +// checkIntervals tests whether puts and deletes overlap for a list of ops. If +// there is an overlap, returns an error. If no overlap, return put and delete +// sets for recursive evaluation. +func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) { + dels := adt.NewIntervalTree() + + // collect deletes from this level; build first to check lower level overlapped puts + for _, req := range reqs { + ok := req.RequestOp_RequestDeleteRange != nil + tv := req.RequestOp_RequestDeleteRange + if !ok { + continue + } + dreq := tv.RequestDeleteRange + if dreq == nil { + continue + } + var iv adt.Interval + if len(dreq.RangeEnd) != 0 { + iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd)) + } else { + iv = adt.NewStringAffinePoint(string(dreq.Key)) + } + dels.Insert(iv, struct{}{}) + } + + // collect children puts/deletes + puts := make(map[string]struct{}) + for _, req := range reqs { + ok := req.RequestOp_RequestTxn != nil + tv := req.RequestOp_RequestTxn + if !ok { + continue + } + putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success) + if err != nil { + return nil, dels, err + } + putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure) + if err != nil { + return nil, dels, err + } + for k := range putsThen { + if _, ok := puts[k]; ok { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + puts[k] = struct{}{} + } + for k := range putsElse { + if _, ok := puts[k]; ok { + // if key is from putsThen, overlap is OK since + // either then/else are mutually exclusive + if _, isSafe := putsThen[k]; !isSafe { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + } + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + puts[k] = struct{}{} + } + dels.Union(delsThen, adt.NewStringAffineInterval("\x00", "")) + dels.Union(delsElse, adt.NewStringAffineInterval("\x00", "")) + } + + // collect and check this level's puts + for _, req := range reqs { + ok := req.RequestOp_RequestPut != nil + tv := req.RequestOp_RequestPut + if !ok || tv.RequestPut == nil { + continue + } + k := string(tv.RequestPut.Key) + if _, ok := puts[k]; ok { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + puts[k] = struct{}{} + } + return puts, dels, nil +} + +func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error { + if u.RequestOp_RequestRange != nil { + return checkRangeRequest(u.RequestOp_RequestRange.RequestRange) + } + + if u.RequestOp_RequestPut != nil { + return checkPutRequest(u.RequestOp_RequestPut.RequestPut) + } + if u.RequestOp_RequestDeleteRange != nil { + return checkDeleteRequest(u.RequestOp_RequestDeleteRange.RequestDeleteRange) + } + if u.RequestOp_RequestTxn != nil { + return checkTxnRequest(u.RequestOp_RequestTxn.RequestTxn, maxTxnOps) + } + + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound +} + +// -------------------------------------------- OVER ---------------------------------------------------- + +var _ = NewQuotaKVServer + +func checkRangeRequest(r *pb.RangeRequest) error { + if len(r.Key) == 0 { + return rpctypes.ErrGRPCEmptyKey + } + return nil +} + +// Range etcdctl get a +func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if err := checkRangeRequest(r); err != nil { + return nil, err + } + + resp, err := s.kv.Range(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + + s.hdr.fill(resp.Header) + return resp, nil +} diff --git a/etcd/etcdserver/api/v3rpc/maintenance.go b/etcd/etcdserver/api/v3rpc/maintenance.go new file mode 100644 index 00000000000..2a90a6636d6 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/maintenance.go @@ -0,0 +1,306 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "crypto/sha256" + "io" + "time" + + "github.com/dustin/go-humanize" + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" +) + +type KVGetter interface { + KV() mvcc.WatchableKV +} + +type BackendGetter interface { + Backend() backend.Backend +} + +type Alarmer interface { + Alarms() []*pb.AlarmMember + Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) +} + +type Downgrader interface { + Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error) +} + +type LeaderTransferrer interface { + MoveLeader(ctx context.Context, lead, target uint64) error +} + +type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) + AuthStore() auth.AuthStore +} + +type ClusterStatusGetter interface { + IsLearner() bool +} + +type maintenanceServer struct { + lg *zap.Logger + rg etcdserver.RaftStatusGetter // 获取raft状态 + kg KVGetter + bg BackendGetter + a Alarmer + lt LeaderTransferrer + hdr header + cs ClusterStatusGetter + d Downgrader +} + +func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { + srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s} + if srv.lg == nil { + srv.lg = zap.NewNop() + } + return &authMaintenanceServer{srv, s} +} + +// Defragment 碎片整理 +func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + ms.lg.Info("开始 碎片整理") + err := ms.bg.Backend().Defrag() + if err != nil { + ms.lg.Warn("碎片整理是啊比", zap.Error(err)) + return nil, err + } + ms.lg.Info("结束 碎片整理") + return &pb.DefragmentResponse{}, nil +} + +// big enough size to hold >1 OS pages in the buffer +const snapshotSendBufferSize = 32 * 1024 + +// MoveLeader OK +func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + if ms.rg.ID() != ms.rg.Leader() { + return nil, rpctypes.ErrGRPCNotLeader + } + + if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil { + return nil, togRPCError(err) + } + return &pb.MoveLeaderResponse{}, nil +} + +func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + resp, err := ms.d.Downgrade(ctx, r) + if err != nil { + return nil, togRPCError(err) + } + resp.Header = &pb.ResponseHeader{} + ms.hdr.fill(resp.Header) + return resp, nil +} + +type authMaintenanceServer struct { + *maintenanceServer + ag AuthGetter +} + +func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { + authInfo, err := ams.ag.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return ams.ag.AuthStore().IsAdminPermitted(authInfo) +} + +func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + if err := ams.isAuthenticated(ctx); err != nil { + return nil, err + } + + return ams.maintenanceServer.Defragment(ctx, sr) +} + +func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { + if err := ams.isAuthenticated(ctx); err != nil { + return nil, err + } + + return ams.maintenanceServer.Hash(ctx, r) +} + +func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { + if err := ams.isAuthenticated(ctx); err != nil { + return nil, err + } + return ams.maintenanceServer.HashKV(ctx, r) +} + +func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + return ams.maintenanceServer.Status(ctx, ar) +} + +func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + return ams.maintenanceServer.MoveLeader(ctx, tr) +} + +func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + return ams.maintenanceServer.Downgrade(ctx, r) +} + +// ------------------------------------ OVER --------------------------------------------------------------- + +// Alarm ok +func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp, err := ms.a.Alarm(ctx, ar) + if err != nil { + return nil, togRPCError(err) + } + if resp.Header == nil { + resp.Header = &pb.ResponseHeader{} + } + ms.hdr.fill(resp.Header) + return resp, nil +} + +// Status ok +func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + hdr := &pb.ResponseHeader{} + ms.hdr.fill(hdr) + resp := &pb.StatusResponse{ + Header: hdr, + Version: version.Version, + Leader: uint64(ms.rg.Leader()), + RaftIndex: ms.rg.CommittedIndex(), + RaftAppliedIndex: ms.rg.AppliedIndex(), + RaftTerm: ms.rg.Term(), + DbSize: ms.bg.Backend().Size(), + DbSizeInUse: ms.bg.Backend().SizeInUse(), + IsLearner: ms.cs.IsLearner(), + } + if resp.Leader == raft.None { + resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error()) + } + for _, a := range ms.a.Alarms() { + resp.Errors = append(resp.Errors, a.String()) + } + return resp, nil +} + +// Snapshot 获取一个快照 +func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + if err := ams.isAuthenticated(srv.Context()); err != nil { + return err + } + + return ams.maintenanceServer.Snapshot(sr, srv) +} + +// Snapshot 获取一个快照 +func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + snap := ms.bg.Backend().Snapshot() // 快照结构体, 初始化 发送超时 + pr, pw := io.Pipe() + defer pr.Close() + + go func() { + snap.WriteTo(pw) + if err := snap.Close(); err != nil { + ms.lg.Warn("关闭快照失败", zap.Error(err)) + } + pw.Close() + }() + + // record快照恢复操作中用于完整性检查的快照数据的SHA摘要 + h := sha256.New() + + sent := int64(0) + total := snap.Size() + size := humanize.Bytes(uint64(total)) + + start := time.Now() + ms.lg.Info("往客户端发送一个快照", zap.Int64("total-bytes", total), zap.String("size", size)) + for total-sent > 0 { + // buffer只保存从流中读取的字节,响应大小是OS页面大小的倍数,从boltdb中获取 + // 例如4 * 1024 + // Send并不等待消息被客户端接收.因此,在Send操作之间不能安全地重用缓冲区 + + buf := make([]byte, snapshotSendBufferSize) + + n, err := io.ReadFull(pr, buf) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return togRPCError(err) + } + sent += int64(n) + + // 如果total是x * snapshotSendBufferSize.这种反应是有可能的.RemainingBytes = = 0 + // 分别地.这是否使etcd响应发送到客户端nil在原型和客户端停止接收从快照流之前etcd发送快照SHA? + // 不,客户端仍然会收到非nil响应,直到etcd用EOF关闭流 + resp := &pb.SnapshotResponse{ + RemainingBytes: uint64(total - sent), + Blob: buf[:n], + } + if err = srv.Send(resp); err != nil { + return togRPCError(err) + } + h.Write(buf[:n]) + } + + sha := h.Sum(nil) + + ms.lg.Info("往客户端发送一个快照校验和", zap.Int64("total-bytes", total), zap.Int("checksum-size", len(sha))) + hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} + if err := srv.Send(hresp); err != nil { + return togRPCError(err) + } + + ms.lg.Info("成功发送快照到客户端", zap.Int64("total-bytes", total), + zap.String("size", size), + zap.String("took", humanize.Time(start)), + ) + return nil +} + +// Hash ok +func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { + h, rev, err := ms.kg.KV().Hash() + if err != nil { + return nil, togRPCError(err) + } + resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} + ms.hdr.fill(resp.Header) + return resp, nil +} + +// HashKV OK +func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { + h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision) + if err != nil { + return nil, togRPCError(err) + } + + resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev} + ms.hdr.fill(resp.Header) + return resp, nil +} diff --git a/etcd/etcdserver/api/v3rpc/over_grpc.go b/etcd/etcdserver/api/v3rpc/over_grpc.go new file mode 100644 index 00000000000..e2e69b22ec8 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/over_grpc.go @@ -0,0 +1,87 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "crypto/tls" + "math" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 + maxSendBytes = math.MaxInt32 +) + +func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server { + var opts []grpc.ServerOption + opts = append(opts, grpc.CustomCodec(&codec{})) + if tls != nil { + bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls}) + opts = append(opts, grpc.Creds(bundle.TransportCredentials())) + } + // 单次通信 + chainUnaryInterceptors := []grpc.UnaryServerInterceptor{ + newUnaryInterceptor(s), // 元信息校验 + grpc_prometheus.UnaryServerInterceptor, + } + if interceptor != nil { + chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor) + } + // 流式通信 + chainStreamInterceptors := []grpc.StreamServerInterceptor{ + newStreamInterceptor(s), + grpc_prometheus.StreamServerInterceptor, + } + + if s.Cfg.ExperimentalEnableDistributedTracing { // 默认false + chainUnaryInterceptors = append(chainUnaryInterceptors, otelgrpc.UnaryServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) + chainStreamInterceptors = append(chainStreamInterceptors, otelgrpc.StreamServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) + } + + opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(chainUnaryInterceptors...))) + opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(chainStreamInterceptors...))) + + opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) + + grpcServer := grpc.NewServer(append(opts, gopts...)...) + + pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) // kv存储 + pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) // 监听 + pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) // 租约 + pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) // 集群 + pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) // 认证 + pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) // 维护 + + hsrv := health.NewServer() + hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) // 设置初始状态 + healthpb.RegisterHealthServer(grpcServer, hsrv) + grpc_prometheus.Register(grpcServer) + + return grpcServer +} diff --git a/etcd/etcdserver/api/v3rpc/over_lease.go b/etcd/etcdserver/api/v3rpc/over_lease.go new file mode 100644 index 00000000000..d5666b711c2 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/over_lease.go @@ -0,0 +1,173 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "io" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" +) + +type LeaseServer struct { + lg *zap.Logger + hdr header + le etcdserver.Lessor +} + +func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { + srv := &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)} + if srv.lg == nil { + srv.lg = zap.NewNop() + } + return srv +} + +func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + resp, err := ls.le.LeaseTimeToLive(ctx, rr) + if err != nil && err != lease.ErrLeaseNotFound { + return nil, togRPCError(err) + } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: rr.ID, + TTL: -1, + } + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +// LeaseLeases 获取当前节点上的所有租约 +func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { + resp, err := ls.le.LeaseLeases(ctx, rr) + if err != nil && err != lease.ErrLeaseNotFound { + return nil, togRPCError(err) + } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseLeasesResponse{ + Header: &pb.ResponseHeader{}, + Leases: []*pb.LeaseStatus{}, + } + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +// LeaseKeepAlive OK +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { + errc := make(chan error, 1) + go func() { + errc <- ls.leaseKeepAlive(stream) + }() + select { + case err = <-errc: + case <-stream.Context().Done(): + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + return err +} + +func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { + for { + req, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + if isClientCtxErr(stream.Context().Err(), err) { + ls.lg.Debug("从gRPC流获取lease keepalive请求失败", zap.Error(err)) + } else { + ls.lg.Warn("从gRPC流获取lease keepalive请求失败", zap.Error(err)) + } + return err + } + + // 在发送更新请求之前创建报头.这可以确保修订严格小于或等于本地etcd(当本地etcd是leader时)或远端leader发生keepalive. + // 如果没有这个,租约可能在rev 3被撤销,但客户端可以看到在rev 4成功的keepalive. + resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} + ls.hdr.fill(resp.Header) + + ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) + if err == lease.ErrLeaseNotFound { + err = nil + ttl = 0 + } + + if err != nil { + return togRPCError(err) + } + + resp.TTL = ttl + err = stream.Send(resp) + if err != nil { + if isClientCtxErr(stream.Context().Err(), err) { + ls.lg.Debug("往grpc Stream发送lease Keepalive响应失败", zap.Error(err)) + } else { + ls.lg.Warn("往grpc Stream发送lease Keepalive响应失败", zap.Error(err)) + } + return err + } + } +} + +type quotaLeaseServer struct { + pb.LeaseServer + qa quotaAlarmer +} + +// LeaseGrant 创建租约 +func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + if err := s.qa.check(ctx, cr); err != nil { // 检查存储空间是否还有空余,以及抛出警报 + return nil, err + } + return s.LeaseServer.LeaseGrant(ctx, cr) +} + +// LeaseGrant 创建租约 +func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + resp, err := ls.le.LeaseGrant(ctx, cr) + if err != nil { + return nil, togRPCError(err) + } + ls.hdr.fill(resp.Header) + return resp, nil +} + +func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { + return "aLeaseServer{ + NewLeaseServer(s), + quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()}, + } +} + +// LeaseRevoke OK +func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + resp, err := ls.le.LeaseRevoke(ctx, rr) + if err != nil { + return nil, togRPCError(err) + } + ls.hdr.fill(resp.Header) + return resp, nil +} diff --git a/etcd/etcdserver/api/v3rpc/over_member.go b/etcd/etcdserver/api/v3rpc/over_member.go new file mode 100644 index 00000000000..71e8509bf0c --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/over_member.go @@ -0,0 +1,143 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type ClusterServerInterFace interface { + MemberList(ctx context.Context, response *pb.MemberListRequest) (*pb.MemberListResponse, error) + MemberAdd(ctx context.Context, response *pb.MemberAddRequest) (*pb.MemberAddResponse, error) + MemberRemove(ctx context.Context, response *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) + MemberUpdate(ctx context.Context, response *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) + MemberPromote(ctx context.Context, response *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) +} + +var _ ClusterServerInterFace = &ClusterServer{} + +type ClusterServer struct { + cluster api.Cluster + server *etcdserver.EtcdServer +} + +func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { + return &ClusterServer{ + cluster: s.Cluster(), + server: s, + } +} + +// MemberList OK +func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { + if r.Linearizable { + if err := cs.server.LinearizableReadNotify(ctx); err != nil { + return nil, togRPCError(err) + } + } + membs := membersToProtoMembers(cs.cluster.Members()) + return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil +} + +// MemberAdd ok +func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { + urls, err := types.NewURLs(r.PeerURLs) + if err != nil { + return nil, rpctypes.ErrGRPCMemberBadURLs + } + + now := time.Now() + var m *membership.Member + if r.IsLearner { + m = membership.NewMemberAsLearner("", urls, "", &now) + } else { + m = membership.NewMember("", urls, "", &now) + } + membs, merr := cs.server.AddMember(ctx, *m) + if merr != nil { + return nil, togRPCError(merr) + } + + return &pb.MemberAddResponse{ + Header: cs.header(), + Member: &pb.Member{ + ID: uint64(m.ID), + PeerURLs: m.PeerURLs, + IsLearner: m.IsLearner, + }, + Members: membersToProtoMembers(membs), + }, nil +} + +func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { + membs, err := cs.server.RemoveMember(ctx, r.ID) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil +} + +func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { + m := membership.Member{ + ID: types.ID(r.ID), + RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, + } + membs, err := cs.server.UpdateMember(ctx, m) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil +} + +func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { + membs, err := cs.server.PromoteMember(ctx, r.ID) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberPromoteResponse{ + Header: cs.header(), + Members: membersToProtoMembers(membs), + }, nil +} + +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(cs.cluster.ID()), + MemberId: uint64(cs.server.ID()), + RaftTerm: cs.server.Term(), + } +} + +func membersToProtoMembers(membs []*membership.Member) []*pb.Member { + protoMembs := make([]*pb.Member, len(membs)) + for i := range membs { + protoMembs[i] = &pb.Member{ + Name: membs[i].Name, + ID: uint64(membs[i].ID), + PeerURLs: membs[i].PeerURLs, + ClientURLs: membs[i].ClientURLs, + IsLearner: membs[i].IsLearner, + } + } + return protoMembs +} diff --git a/etcd/etcdserver/api/v3rpc/over_quota.go b/etcd/etcdserver/api/v3rpc/over_quota.go new file mode 100644 index 00000000000..e5d07f3cfcf --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/over_quota.go @@ -0,0 +1,73 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +// 配额 + +type quotaKVServer struct { + pb.KVServer + qa quotaAlarmer +} + +type quotaAlarmer struct { + q etcdserver.Quota // 配额计算 + a Alarmer + id types.ID +} + +// check 请求是否满足配额.如果没有足够的空间.忽略请求并发出自由空间警报. +func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { + if qa.q.Available(r) { // 检查存储空间 + return nil + } + // 没有存储空间 + req := &pb.AlarmRequest{ + MemberID: uint64(qa.id), + Action: pb.AlarmRequest_ACTIVATE, // check + Alarm: pb.AlarmType_NOSPACE, + } + qa.a.Alarm(ctx, req) + return rpctypes.ErrGRPCNoSpace +} + +func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { + return "aKVServer{ + NewKVServer(s), + quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()}, + } +} + +func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + if err := s.qa.check(ctx, r); err != nil { + return nil, err + } + return s.KVServer.Put(ctx, r) +} + +func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if err := s.qa.check(ctx, r); err != nil { + return nil, err + } + return s.KVServer.Txn(ctx, r) +} diff --git a/etcd/etcdserver/api/v3rpc/over_watch.go b/etcd/etcdserver/api/v3rpc/over_watch.go new file mode 100644 index 00000000000..d7a5e636481 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/over_watch.go @@ -0,0 +1,522 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "io" + "math/rand" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" +) + +const minWatchProgressInterval = 100 * time.Millisecond + +type watchServer struct { + lg *zap.Logger + clusterID int64 + memberID int64 + maxRequestBytes int + sg etcdserver.RaftStatusGetter + watchable mvcc.WatchableKV + ag AuthGetter +} + +var ( + progressReportInterval = 10 * time.Minute + progressReportIntervalMu sync.RWMutex +) + +// SetProgressReportInterval 更新进度汇报间隔 +func SetProgressReportInterval(newTimeout time.Duration) { + progressReportIntervalMu.Lock() + progressReportInterval = newTimeout + progressReportIntervalMu.Unlock() +} + +// We send ctrl response inside the read loop. We do not want +// send to block read, but we still want ctrl response we sent to +// be serialized. Thus we use a buffered chan to solve the problem. +// A small buffer should be OK for most cases, since we expect the +// ctrl requests are infrequent. +const ctrlStreamBufLen = 16 + +// serverWatchStream is an etcd etcd side stream. It receives requests +// from client side gRPC stream. It receives watch events from mvcc.WatchStream, +// and creates responses that forwarded to gRPC stream. +// It also forwards control message like watch created and canceled. +type serverWatchStream struct { + lg *zap.Logger + clusterID int64 + memberID int64 + maxRequestBytes int + sg etcdserver.RaftStatusGetter + watchable mvcc.WatchableKV + ag AuthGetter + gRPCStream pb.Watch_WatchServer // 与客户端进行连接的 Stream + watchStream mvcc.WatchStream // key 变动的消息管道 + ctrlStream chan *pb.WatchResponse // 用来发送控制响应的Chan,比如watcher创建和取消. + + // mu protects progress, prevKV, fragment + mu sync.RWMutex + // tracks the watchID that stream might need to send progress to + // TODO: combine progress and prevKV into a single struct? + progress map[mvcc.WatchID]bool // 该类型的 watch,服务端会定时发送类似心跳消息 + prevKV map[mvcc.WatchID]bool // 该类型表明,对于/a/b 这样的监听范围, 如果 b 变化了, 前缀/a也需要通知 + fragment map[mvcc.WatchID]bool // 该类型表明,传输数据量大于阈值,需要拆分发送 + closec chan struct{} + wg sync.WaitGroup // 等待send loop 完成 +} + +// Watch 创建一个watcher stream +func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { + sws := serverWatchStream{ + lg: ws.lg, + clusterID: ws.clusterID, + memberID: ws.memberID, + maxRequestBytes: ws.maxRequestBytes, + sg: ws.sg, // 获取状态 + watchable: ws.watchable, + ag: ws.ag, // 认证服务 + gRPCStream: stream, // + watchStream: ws.watchable.NewWatchStream(), + ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), // 用来发送控制响应的Chan,比如watcher创建和取消. + progress: make(map[mvcc.WatchID]bool), + prevKV: make(map[mvcc.WatchID]bool), + fragment: make(map[mvcc.WatchID]bool), + closec: make(chan struct{}), + } + + sws.wg.Add(1) + go func() { + sws.sendLoop() // 回复变更事件、阻塞 + sws.wg.Done() + }() + + errc := make(chan error, 1) + // 理想情况下,recvLoop也会使用sws.wg.当stream. context (). done()被关闭时,流的recv可能会继续阻塞,因为它使用了不同的上下文,导致调用sws.close()时死锁. + go func() { + if rerr := sws.recvLoop(); rerr != nil { + if isClientCtxErr(stream.Context().Err(), rerr) { + sws.lg.Debug("从gRPC流接收watch请求失败", zap.Error(rerr)) + } else { + sws.lg.Warn("从gRPC流接收watch请求失败", zap.Error(rerr)) + } + errc <- rerr + } + }() + select { + case err = <-errc: + if err == context.Canceled { + err = rpctypes.ErrGRPCWatchCanceled + } + close(sws.ctrlStream) + case <-stream.Context().Done(): + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCWatchCanceled + } + } + + sws.close() + return err +} + +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + return sws.ag.AuthStore().IsRangePermitted(authInfo, []byte(wcr.Key), []byte(wcr.RangeEnd)) == nil +} + +// 接收watch请求,可以是创建、取消、和 +func (sws *serverWatchStream) recvLoop() error { + for { + // 同一个连接,可以接收多次不同的消息 + req, err := sws.gRPCStream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if req.WatchRequest_CreateRequest != nil { // 创建watcher ✅ + uv := &pb.WatchRequest_CreateRequest{} + uv = req.WatchRequest_CreateRequest + if uv.CreateRequest == nil { + continue + } + + creq := uv.CreateRequest + if len(creq.Key) == 0 { // a + // \x00 is the smallest key + creq.Key = string([]byte{0}) + } + if len(creq.RangeEnd) == 0 { + // force nil since watchstream.Watch distinguishes + // between nil and []byte{} for single key / >= + creq.RangeEnd = "" + } + if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { + // support >= key queries + creq.RangeEnd = string([]byte{}) + } + // 权限校验 + if !sws.isWatchPermitted(creq) { // 当前请求 权限不允许 + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: creq.WatchId, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + continue + case <-sws.closec: + return nil + } + } + + filters := FiltersFromRequest(creq) // server端 从watch请求中 获取一些过滤调价 + + wsrev := sws.watchStream.Rev() // 获取当前kv的修订版本 + rev := creq.StartRevision // 监听从哪个修订版本之后的变更,没穿就是当前 + if rev == 0 { + rev = wsrev + 1 + } + id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), []byte(creq.Key), []byte(creq.RangeEnd), rev, filters...) + if err == nil { + sws.mu.Lock() + if creq.ProgressNotify { // 默认FALSE + sws.progress[id] = true + } + if creq.PrevKv { // 默认FALSE + sws.prevKV[id] = true + } + if creq.Fragment { // 拆分大的事件 + sws.fragment[id] = true + } + sws.mu.Unlock() + } + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(wsrev), // + WatchId: int64(id), + Created: true, + Canceled: err != nil, + } + if err != nil { + wr.CancelReason = err.Error() + } + select { + case sws.ctrlStream <- wr: // 客户端创建watch的响应 + case <-sws.closec: + return nil + } + } + if req.WatchRequest_CancelRequest != nil { // 删除watcher ✅ + uv := &pb.WatchRequest_CancelRequest{} + uv = req.WatchRequest_CancelRequest + if uv.CancelRequest != nil { + id := uv.CancelRequest.WatchId + err := sws.watchStream.Cancel(mvcc.WatchID(id)) + if err == nil { + sws.ctrlStream <- &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: id, + Canceled: true, + } + sws.mu.Lock() + delete(sws.progress, mvcc.WatchID(id)) + delete(sws.prevKV, mvcc.WatchID(id)) + delete(sws.fragment, mvcc.WatchID(id)) + sws.mu.Unlock() + } + } + } + if req.WatchRequest_ProgressRequest != nil { + uv := &pb.WatchRequest_ProgressRequest{} + uv = req.WatchRequest_ProgressRequest + if uv.ProgressRequest != nil { + sws.ctrlStream <- &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, // 如果发送了密钥更新,则忽略下一次进度更新 + } + } + } + } +} + +// 往watch stream 发送消息 +func (sws *serverWatchStream) sendLoop() { + // 当前活动的watcher + ids := make(map[mvcc.WatchID]struct{}) + // TODO 同一个流,可能会有不同的watcher? + pending := make(map[mvcc.WatchID][]*pb.WatchResponse) + + interval := GetProgressReportInterval() // interval 10m44s + progressTicker := time.NewTicker(interval) + + defer func() { + progressTicker.Stop() + }() + + for { + select { + case wresp, ok := <-sws.watchStream.Chan(): // watchStream Channel中提取event发送 + if !ok { + return + } + evs := wresp.Events + events := make([]*mvccpb.Event, len(evs)) + sws.mu.RLock() + needPrevKV := sws.prevKV[wresp.WatchID] + sws.mu.RUnlock() + for i := range evs { + events[i] = &evs[i] + if needPrevKV && !IsCreateEvent(evs[i]) { + opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} + r, err := sws.watchable.Range(context.TODO(), []byte(evs[i].Kv.Key), nil, opt) + if err == nil && len(r.KVs) != 0 { + events[i].PrevKv = &(r.KVs[0]) + } + } + } + + canceled := wresp.CompactRevision != 0 + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(wresp.Revision), + WatchId: int64(wresp.WatchID), + Events: events, + CompactRevision: wresp.CompactRevision, + Canceled: canceled, + } + _, okID := ids[wresp.WatchID] + if !okID { // 当前id 不活跃 + // 缓冲,如果ID尚未公布 + wrs := append(pending[wresp.WatchID], wr) + pending[wresp.WatchID] = wrs + continue + } + + sws.mu.RLock() + fragmented, ok := sws.fragment[wresp.WatchID] // 是否 拆分大的数据 + sws.mu.RUnlock() + + var serr error + if !fragmented && !ok { + serr = sws.gRPCStream.Send(wr) + } else { + serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send) + } + + if serr != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) { + sws.lg.Debug("未能向gRPC流发送watch响应", zap.Error(serr)) + } else { + sws.lg.Warn("向gRPC流发送watch响应失败", zap.Error(serr)) + } + return + } + + sws.mu.Lock() + if len(evs) > 0 && sws.progress[wresp.WatchID] { + // 如果发送了密钥更新,则忽略下一次进度更新 + sws.progress[wresp.WatchID] = false + } + sws.mu.Unlock() + + case c, ok := <-sws.ctrlStream: // 流控制信号 ✅ + // 给client回复的响应 + if !ok { + return // channel关闭了 + } + + if err := sws.gRPCStream.Send(c); err != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { + sws.lg.Debug("未能向gRPC流发送watch控制响应", zap.Error(err)) + } else { + sws.lg.Warn("向gRPC流发送watch控制响应失败", zap.Error(err)) + } + return + } + + // 创建 追踪id + wid := mvcc.WatchID(c.WatchId) // 第一次创建watcher ,id 是0 + if c.Canceled { + delete(ids, wid) + continue + } + if c.Created { + ids[wid] = struct{}{} + for _, v := range pending[wid] { + if err := sws.gRPCStream.Send(v); err != nil { + if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { + sws.lg.Debug("未能向gRPC流发送待处理的watch响应", zap.Error(err)) + } else { + sws.lg.Warn("未能向gRPC流发送待处理的watch响应", zap.Error(err)) + } + return + } + } + delete(pending, wid) + } + + case <-progressTicker.C: // 定时同步状态 + sws.mu.Lock() + for id, ok := range sws.progress { + if ok { + sws.watchStream.RequestProgress(id) + } + sws.progress[id] = true + } + sws.mu.Unlock() + + case <-sws.closec: + return + } + } +} + +func sendFragments(wr *pb.WatchResponse, maxRequestBytes int, sendFunc func(*pb.WatchResponse) error) error { + // no need to fragment if total request size is smaller + // than max request limit or response contains only one event + if wr.Size() < maxRequestBytes || len(wr.Events) < 2 { + return sendFunc(wr) + } + + ow := *wr + ow.Events = make([]*mvccpb.Event, 0) + ow.Fragment = true + + var idx int + for { + cur := ow + for _, ev := range wr.Events[idx:] { + cur.Events = append(cur.Events, ev) + if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes { + cur.Events = cur.Events[:len(cur.Events)-1] + break + } + idx++ + } + if idx == len(wr.Events) { + // last response has no more fragment + cur.Fragment = false + } + if err := sendFunc(&cur); err != nil { + return err + } + if !cur.Fragment { + break + } + } + return nil +} + +// NewWatchServer 运行初 运行一次 +func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { + srv := &watchServer{ + lg: s.Cfg.Logger, + clusterID: int64(s.Cluster().ID()), + memberID: int64(s.ID()), + maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), + sg: s, + watchable: s.Watchable(), + ag: s, + } + if srv.lg == nil { + srv.lg = zap.NewNop() + } + if s.Cfg.WatchProgressNotifyInterval > 0 { + if s.Cfg.WatchProgressNotifyInterval < minWatchProgressInterval { + srv.lg.Warn("将watch 进度通知时间间隔调整为最小周期", zap.Duration("min-watch-progress-notify-interval", minWatchProgressInterval)) + s.Cfg.WatchProgressNotifyInterval = minWatchProgressInterval + } + SetProgressReportInterval(s.Cfg.WatchProgressNotifyInterval) + } + return srv +} + +func GetProgressReportInterval() time.Duration { + progressReportIntervalMu.RLock() + interval := progressReportInterval + progressReportIntervalMu.RUnlock() + + // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not + // send progress notifications to watchers around the same time even when watchers + // are created around the same time (which is common when a client restarts itself). + jitter := time.Duration(rand.Int63n(int64(interval) / 10)) + + return interval + jitter +} + +func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { + filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) + for _, ft := range creq.Filters { + switch ft { + case pb.WatchCreateRequest_NOPUT: + filters = append(filters, filterNoPut) + case pb.WatchCreateRequest_NODELETE: + filters = append(filters, filterNoDelete) + default: + } + } + return filters +} + +// 当前的修订版本 +func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(sws.clusterID), + MemberId: uint64(sws.memberID), + Revision: rev, + RaftTerm: sws.sg.Term(), + } +} + +func IsCreateEvent(e mvccpb.Event) bool { + return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision +} + +func (sws *serverWatchStream) close() { + sws.watchStream.Close() + close(sws.closec) + sws.wg.Wait() +} + +func filterNoDelete(e mvccpb.Event) bool { + return e.Type == mvccpb.DELETE +} + +func filterNoPut(e mvccpb.Event) bool { + return e.Type == mvccpb.PUT +} diff --git a/etcd/etcdserver/api/v3rpc/util.go b/etcd/etcdserver/api/v3rpc/util.go new file mode 100644 index 00000000000..ed3dcf76aa9 --- /dev/null +++ b/etcd/etcdserver/api/v3rpc/util.go @@ -0,0 +1,148 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3rpc + +import ( + "context" + "strings" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var toGRPCErrorMap = map[error]error{ + membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, + membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, + membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, + membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, + etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, + + mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, + mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, + etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, + etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace, + etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests, + + etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader, + etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader, + etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, + etcdserver.ErrStopped: rpctypes.ErrGRPCStopped, + etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout, + etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, + etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, + etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, + etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, + etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, + etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, + + etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, + etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, + etcdserver.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, + etcdserver.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, + etcdserver.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, + + lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, + lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, + lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge, + + auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist, + auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist, + auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist, + auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty, + auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound, + auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist, + auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound, + auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty, + auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed, + auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven, + auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied, + auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted, + auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted, + auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled, + auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken, + auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt, + auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision, + + // In sync with status.FromContextError + context.Canceled: rpctypes.ErrGRPCCanceled, + context.DeadlineExceeded: rpctypes.ErrGRPCDeadlineExceeded, +} + +func togRPCError(err error) error { + // let gRPC etcd convert to codes.Canceled, codes.DeadlineExceeded + if err == context.Canceled || err == context.DeadlineExceeded { + return err + } + grpcErr, ok := toGRPCErrorMap[err] + if !ok { + return status.Error(codes.Unknown, err.Error()) + } + return grpcErr +} + +func isClientCtxErr(ctxErr error, err error) bool { + if ctxErr != nil { + return true + } + + ev, ok := status.FromError(err) + if !ok { + return false + } + + switch ev.Code() { + case codes.Canceled, codes.DeadlineExceeded: + // client-side context cancel or deadline exceeded + // "rpc error: code = Canceled desc = context canceled" + // "rpc error: code = DeadlineExceeded desc = context deadline exceeded" + return true + case codes.Unavailable: + msg := ev.Message() + // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected") + // "rpc error: code = Unavailable desc = client disconnected" + if msg == "client disconnected" { + return true + } + // "grpc/transport.ClientTransport.CloseStream" on canceled streams + // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL") + if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") { + return true + } + } + return false +} + +// 在v3.4中learner 被允许提供可序列化的读取和端点状态服务. +func isRPCSupportedForLearner(req interface{}) bool { + switch r := req.(type) { + case *pb.StatusRequest: + return true + case *pb.RangeRequest: + return r.Serializable + default: + return false + } +} diff --git a/etcd/etcdserver/api_downgrade.go b/etcd/etcdserver/api_downgrade.go new file mode 100644 index 00000000000..f63032c0f4d --- /dev/null +++ b/etcd/etcdserver/api_downgrade.go @@ -0,0 +1,56 @@ +package etcdserver + +import ( + "context" + "fmt" + "net/http" + "strconv" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "go.uber.org/zap" +) + +func (s *EtcdServer) DowngradeEnabledHandler() http.Handler { + return &downgradeEnabledHandler{ + lg: s.Logger(), + cluster: s.cluster, + server: s, + } +} + +type downgradeEnabledHandler struct { + lg *zap.Logger + cluster api.Cluster + server *EtcdServer +} + +func (h *downgradeEnabledHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.Header().Set("Allow", http.MethodGet) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + if r.URL.Path != DowngradeEnabledPath { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), h.server.Cfg.ReqTimeout()) + defer cancel() + + // serve with linearized downgrade info + if err := h.server.linearizeReadNotify(ctx); err != nil { + http.Error(w, fmt.Sprintf("failed linearized read: %v", err), + http.StatusInternalServerError) + return + } + enabled := h.server.DowngradeInfo().Enabled + w.Header().Set("Content-Type", "text/plain") + w.Write([]byte(strconv.FormatBool(enabled))) +} + +func (s *EtcdServer) DowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() } diff --git a/etcd/etcdserver/api_hashkv.go b/etcd/etcdserver/api_hashkv.go new file mode 100644 index 00000000000..48bbe166806 --- /dev/null +++ b/etcd/etcdserver/api_hashkv.go @@ -0,0 +1,120 @@ +package etcdserver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "go.uber.org/zap" +) + +const PeerHashKVPath = "/members/hashkv" + +type hashKVHandler struct { + lg *zap.Logger + server *EtcdServer +} + +func (s *EtcdServer) HashKVHandler() http.Handler { + return &hashKVHandler{lg: s.Logger(), server: s} +} + +func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.Header().Set("Allow", http.MethodGet) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + if r.URL.Path != PeerHashKVPath { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, "读取body失败", http.StatusBadRequest) + return + } + + req := &pb.HashKVRequest{} + if err := json.Unmarshal(b, req); err != nil { + h.lg.Warn("反序列化请求数据失败", zap.Error(err)) + http.Error(w, "反序列化请求数据失败", http.StatusBadRequest) + return + } + hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision) + if err != nil { + h.lg.Warn( + "获取hash值失败", + zap.Int64("requested-revision", req.Revision), + zap.Error(err), + ) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev} + respBytes, err := json.Marshal(resp) + if err != nil { + h.lg.Warn("failed to marshal hashKV response", zap.Error(err)) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String()) + w.Header().Set("Content-Type", "application/json") + w.Write(respBytes) +} + +// getPeerHashKVHTTP 通过对给定网址的http调用在给定的rev中获取kv存储的哈希值. +func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) { + cc := &http.Client{Transport: s.peerRt} + hashReq := &pb.HashKVRequest{Revision: rev} // revision是哈希操作的键值存储修订版. + hashReqBytes, err := json.Marshal(hashReq) + if err != nil { + return nil, err + } + requestUrl := url + PeerHashKVPath + req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes)) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/json") + req.Cancel = ctx.Done() + + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusBadRequest { + if strings.Contains(string(b), mvcc.ErrCompacted.Error()) { + return nil, rpctypes.ErrCompacted + } + if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) { + return nil, rpctypes.ErrFutureRev + } + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unknown error: %s", string(b)) + } + + hashResp := &pb.HashKVResponse{} + if err := json.Unmarshal(b, hashResp); err != nil { + return nil, err + } + return hashResp, nil +} diff --git a/etcd/etcdserver/api_lease.go b/etcd/etcdserver/api_lease.go new file mode 100644 index 00000000000..04247950e11 --- /dev/null +++ b/etcd/etcdserver/api_lease.go @@ -0,0 +1,15 @@ +package etcdserver + +import ( + "net/http" + + "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp" +) + +func (s *EtcdServer) LeaseHandler() http.Handler { + if s.lessor == nil { + return nil + } + return leasehttp.NewHandler(s.lessor, s.ApplyWait) +} +func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } diff --git a/etcd/etcdserver/api_raft_status_getter.go b/etcd/etcdserver/api_raft_status_getter.go new file mode 100644 index 00000000000..f0989fb971a --- /dev/null +++ b/etcd/etcdserver/api_raft_status_getter.go @@ -0,0 +1,23 @@ +package etcdserver + +import "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + +type RaftStatusGetter interface { + ID() types.ID + Leader() types.ID + CommittedIndex() uint64 + AppliedIndex() uint64 + Term() uint64 +} + +func (s *EtcdServer) ID() types.ID { return s.id } + +func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } + +func (s *EtcdServer) Lead() uint64 { return s.getLead() } + +func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() } + +func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() } + +func (s *EtcdServer) Term() uint64 { return s.getTerm() } diff --git a/etcd/etcdserver/apply_auth.go b/etcd/etcdserver/apply_auth.go new file mode 100644 index 00000000000..c8644dd09c1 --- /dev/null +++ b/etcd/etcdserver/apply_auth.go @@ -0,0 +1,248 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "sync" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type authApplierV3 struct { + applierV3 // applierV3backend + as auth.AuthStore // 内循环时,提供认证token + lessor lease.Lessor // 租约管理者 + // mu serializes Apply so that user isn't corrupted and so that + // serialized requests don't leak data from TOCTOU errors + mu sync.Mutex + authInfo auth.AuthInfo +} + +func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 { + return &authApplierV3{applierV3: base, as: as, lessor: lessor} +} + +func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { + aa.mu.Lock() + defer aa.mu.Unlock() + if r.Header != nil { + // 当internalRaftRequest没有header时,向后兼容3.0之前的版本 + aa.authInfo.Username = r.Header.Username + aa.authInfo.Revision = r.Header.AuthRevision + } + if needAdminPermission(r) { + if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &applyResult{err: err} + } + } + ret := aa.applierV3.Apply(r, shouldApplyV3) + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return ret +} + +func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(r.Key)); err != nil { + return nil, nil, err + } + + if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil { + // The specified lease is already attached with a key that cannot + // backend written by this user. It means the user cannot revoke the + // lease so attaching the lease to the newly written key should + // backend forbidden. + return nil, nil, err + } + + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), nil) + if err != nil { + return nil, nil, err + } + } + return aa.applierV3.Put(ctx, txn, r) +} + +func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)); err != nil { + return nil, err + } + return aa.applierV3.Range(ctx, txn, r) +} + +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)); err != nil { + return nil, err + } + if r.PrevKv { + err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)) // {a,b true} + if err != nil { + return nil, err + } + } + + return aa.applierV3.DeleteRange(txn, r) +} + +func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + if requ.RequestOp_RequestRange != nil { + tv := requ.RequestOp_RequestRange + if tv.RequestRange == nil { + continue + } + if err := as.IsRangePermitted(ai, []byte(tv.RequestRange.Key), []byte(tv.RequestRange.RangeEnd)); err != nil { + return err + } + + } + if requ.RequestOp_RequestPut != nil { + tv := requ.RequestOp_RequestPut + if tv.RequestPut == nil { + continue + } + + if err := as.IsPutPermitted(ai, []byte(tv.RequestPut.Key)); err != nil { + return err + } + + } + if requ.RequestOp_RequestDeleteRange != nil { + tv := requ.RequestOp_RequestDeleteRange + if tv.RequestDeleteRange == nil { + continue + } + + if tv.RequestDeleteRange.PrevKv { + err := as.IsRangePermitted(ai, []byte(tv.RequestDeleteRange.Key), []byte(tv.RequestDeleteRange.RangeEnd)) + if err != nil { + return err + } + } + + err := as.IsDeleteRangePermitted(ai, []byte(tv.RequestDeleteRange.Key), []byte(tv.RequestDeleteRange.RangeEnd)) + if err != nil { + return err + } + } + } + + return nil +} + +func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { + for _, c := range rt.Compare { + if err := as.IsRangePermitted(ai, []byte(c.Key), []byte(c.RangeEnd)); err != nil { + return err + } + } + if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { + return err + } + return checkTxnReqsPermission(as, ai, rt.Failure) +} + +func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { + return nil, nil, err + } + return aa.applierV3.Txn(ctx, rt) +} + +func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil { // 检查租约是否存在 + return nil, err + } + return aa.applierV3.LeaseRevoke(lc) +} + +// 检查租约更新的key是否有权限操作 +func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error { + lease := aa.lessor.Lookup(leaseID) + if lease != nil { + for _, key := range lease.Keys() { + if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil { + return err + } + } + } + + return nil +} + +func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + err := aa.as.IsAdminPermitted(&aa.authInfo) + if err != nil && r.Name != aa.authInfo.Username { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &pb.AuthUserGetResponse{}, err + } + + return aa.applierV3.UserGet(r) +} + +func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + err := aa.as.IsAdminPermitted(&aa.authInfo) + if err != nil && !aa.as.UserHasRole(aa.authInfo.Username, r.Role) { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &pb.AuthRoleGetResponse{}, err + } + + return aa.applierV3.RoleGet(r) +} + +func needAdminPermission(r *pb.InternalRaftRequest) bool { + switch { + case r.AuthEnable != nil: + return true + case r.AuthDisable != nil: + return true + case r.AuthStatus != nil: + return true + case r.AuthUserAdd != nil: + return true + case r.AuthUserDelete != nil: + return true + case r.AuthUserChangePassword != nil: + return true + case r.AuthUserGrantRole != nil: + return true + case r.AuthUserRevokeRole != nil: + return true + case r.AuthRoleAdd != nil: + return true + case r.AuthRoleGrantPermission != nil: + return true + case r.AuthRoleRevokePermission != nil: + return true + case r.AuthRoleDelete != nil: + return true + case r.AuthUserList != nil: + return true + case r.AuthRoleList != nil: + return true + default: + return false + } +} diff --git a/server/etcdserver/apply_v2.go b/etcd/etcdserver/apply_v2.go similarity index 80% rename from server/etcdserver/apply_v2.go rename to etcd/etcdserver/apply_v2.go index c9e4c3e87b0..9f019cca0fc 100644 --- a/server/etcdserver/apply_v2.go +++ b/etcd/etcdserver/apply_v2.go @@ -16,26 +16,18 @@ package etcdserver import ( "encoding/json" - "fmt" "path" "time" - "unicode/utf8" "github.com/coreos/go-semver/semver" - - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/etcdserver/txn" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/pkg/pbutil" "go.uber.org/zap" ) -const v2Version = "v2" - -// ApplierV2 is the interface for processing V2 raft messages type ApplierV2 interface { Delete(r *RequestV2) Response Post(r *RequestV2) Response @@ -100,7 +92,7 @@ func (a *applierV2store) Put(r *RequestV2, shouldApplyV3 membership.ShouldApplyV // TODO remove v2 version set to avoid the conflict between v2 and v3 in etcd 3.6 if r.Path == membership.StoreClusterVersionKey() { if a.cluster != nil { - // persist to backend given v2store can be very stale + // persist to backend given v2store can backend very stale a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability, shouldApplyV3) } return Response{} @@ -121,20 +113,6 @@ func (a *applierV2store) Sync(r *RequestV2) Response { // applyV2Request interprets r as a call to v2store.X // and returns a Response interpreted from v2store.Event func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) (resp Response) { - stringer := panicAlternativeStringer{ - stringer: r, - alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) }, - } - defer func(start time.Time) { - if !utf8.ValidString(r.Method) { - s.lg.Info("method is not valid utf-8") - return - } - success := resp.Err == nil - txn.ApplySecObserve(v2Version, r.Method, success, time.Since(start)) - txn.WarnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil) - }(time.Now()) - switch r.Method { case "POST": return s.applyV2.Post(r) @@ -147,8 +125,8 @@ func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.Shoul case "SYNC": return s.applyV2.Sync(r) default: - // This should never be reached, but just in case: - return Response{Err: errors.ErrUnknownMethod} + // This should never backend reached, but just in case: + return Response{Err: ErrUnknownMethod} } } diff --git a/etcd/etcdserver/apply_v3.go b/etcd/etcdserver/apply_v3.go new file mode 100644 index 00000000000..1141d196a2b --- /dev/null +++ b/etcd/etcdserver/apply_v3.go @@ -0,0 +1,1025 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "context" + "fmt" + "sort" + + "github.com/coreos/go-semver/semver" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + + "github.com/gogo/protobuf/proto" + "go.uber.org/zap" +) + +type applyResult struct { + resp proto.Message + err error + physc <-chan struct{} // disk、内存都写好数据了 + trace *traceutil.Trace +} + +// applierV3Internal 内部v3 raft 请求 +type applierV3Internal interface { + ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) + ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) + DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) +} + +type applierV3 interface { + Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult + Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) + Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) + Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) + LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) + LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) + Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) + Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) + AuthEnable() (*pb.AuthEnableResponse, error) + AuthDisable() (*pb.AuthDisableResponse, error) + AuthStatus() (*pb.AuthStatusResponse, error) + UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error + +type applierV3backend struct { + s *EtcdServer + checkPut checkReqFunc + checkRange checkReqFunc +} + +func (s *EtcdServer) newApplierV3Backend() applierV3 { + base := &applierV3backend{s: s} + base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error { + return base.checkRequestPut(rv, req) + } + base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error { + return base.checkRequestRange(rv, req) + } + return base +} + +func (s *EtcdServer) newApplierV3Internal() applierV3Internal { + base := &applierV3backend{s: s} + return base +} + +func (s *EtcdServer) newApplierV3() applierV3 { + return newAuthApplierV3(s.AuthStore(), newQuotaApplierV3(s, s.newApplierV3Backend()), s.lessor) +} + +// Put raft 传递之后 实际上将k,v存储到应用内的逻辑 +func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { + resp = &pb.PutResponse{} + resp.Header = &pb.ResponseHeader{} + trace = traceutil.Get(ctx) + // 如果上下文中的trace为空,则创建put跟踪 + if trace.IsEmpty { + trace = traceutil.New("put", + a.s.Logger(), + traceutil.Field{Key: "key", Value: string([]byte(p.Key))}, + traceutil.Field{Key: "req_size", Value: p.Size()}, + ) + } + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { // 写事务 + if leaseID != lease.NoLease { + if l := a.s.lessor.Lookup(leaseID); l == nil { // 查找租约 + return nil, nil, lease.ErrLeaseNotFound + } + } + // watchableStoreTxnWrite[storeTxnWrite] + txn = a.s.KV().Write(trace) + defer txn.End() + } + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + trace.StepWithFunction(func() { + rr, err = txn.Range(context.TODO(), []byte(p.Key), nil, mvcc.RangeOptions{}) + }, "得到之前的kv对") + + if err != nil { + return nil, nil, err + } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + resp.Header.Revision = txn.Put([]byte(p.Key), []byte(val), leaseID) + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) + return resp, trace, nil +} + +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp := &pb.DeleteRangeResponse{} + resp.Header = &pb.ResponseHeader{} + end := mkGteRange([]byte(dr.RangeEnd)) + + if txn == nil { + txn = a.s.kv.Write(traceutil.TODO()) // 创建写事务 + defer txn.End() + } + + if dr.PrevKv { // + rr, err := txn.Range(context.TODO(), []byte(dr.Key), end, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + if rr != nil { + resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + resp.PrevKvs[i] = &rr.KVs[i] + } + } + } + // storeTxnWrite + resp.Deleted, resp.Header.Revision = txn.DeleteRange([]byte(dr.Key), end) + return resp, nil +} + +func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + trace := traceutil.Get(ctx) + if trace.IsEmpty { + trace = traceutil.New("transaction", a.s.Logger()) + ctx = context.WithValue(ctx, traceutil.TraceKey, trace) + } + isWrite := !isTxnReadonly(rt) + + // When the transaction contains write operations, we use ReadTx instead of + // ConcurrentReadTx to avoid extra overhead of copying buffer. + var txn mvcc.TxnWrite + if isWrite && a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer { + txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.SharedBufReadTxMode, trace)) + } else { + txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.ConcurrentReadTxMode, trace)) + } + + var txnPath []bool + trace.StepWithFunction( + func() { + txnPath = compareToPath(txn, rt) + }, + "compare", + ) + + if isWrite { + trace.AddField(traceutil.Field{Key: "read_only", Value: false}) + if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil { + txn.End() + return nil, nil, err + } + } + if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil { + txn.End() + return nil, nil, err + } + trace.Step("check requests") + txnResp, _ := newTxnResp(rt, txnPath) + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // backend the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write(trace) + } + a.applyTxn(ctx, txn, rt, txnPath, txnResp) + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ + } + txn.End() + + txnResp.Header.Revision = rev + trace.AddField( + traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, + traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, + ) + return txnResp, trace, nil +} + +// newTxnResp allocates a txn response for a txn request given a path. +func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp = &pb.TxnResponse{ + Responses: resps, + Succeeded: txnPath[0], + Header: &pb.ResponseHeader{}, + } + for i, req := range reqs { + if req.RequestOp_RequestRange != nil { + resps[i] = &pb.ResponseOp{ResponseOp_ResponseRange: &pb.ResponseOp_ResponseRange{}} + } + if req.RequestOp_RequestPut != nil { + resps[i] = &pb.ResponseOp{ResponseOp_ResponsePut: &pb.ResponseOp_ResponsePut{}} + } + if req.RequestOp_RequestDeleteRange != nil { + resps[i] = &pb.ResponseOp{ResponseOp_ResponseDeleteRange: &pb.ResponseOp_ResponseDeleteRange{}} + } + if req.RequestOp_RequestTxn != nil { + resp, txns := newTxnResp(req.RequestOp_RequestTxn.RequestTxn, txnPath[1:]) + resps[i] = &pb.ResponseOp{ResponseOp_ResponseTxn: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} + txnPath = txnPath[1+txns:] + txnCount += txns + 1 + } + + } + return txnResp, txnCount +} + +func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { + txnPath := make([]bool, 1) + ops := rt.Success + if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { + ops = rt.Failure + } + for _, op := range ops { + tv := op.RequestOp_RequestTxn + if tv == nil || tv.RequestTxn == nil { + continue + } + + txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) + } + return txnPath +} + +func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { + for _, c := range cmps { + if !applyCompare(rv, c) { + return false + } + } + return true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + // TODO: possible optimizations + // * chunk reads for large ranges to conserve memory + // * rewrite rules for common patterns: + // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" + // * caching + rr, err := rv.Range(context.TODO(), []byte(c.Key), mkGteRange([]byte(c.RangeEnd)), mvcc.RangeOptions{}) + if err != nil { + return false + } + if len(rr.KVs) == 0 { + if c.Target == pb.Compare_VALUE { + // Always fail if comparing a value on a key/keys that doesn't exist; + // nil == empty string in grpc; no way to represent missing value + return false + } + return compareKV(c, mvccpb.KeyValue{}) + } + for _, kv := range rr.KVs { + if !compareKV(c, kv) { + return false + } + } + return true +} + +func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { + var result int + rev := int64(0) + switch c.Target { + case pb.Compare_VALUE: + v := []byte{} + + if c.Compare_Value != nil { + v = []byte(c.Compare_Value.Value) + } + + result = bytes.Compare([]byte(ckv.Value), v) + case pb.Compare_CREATE: + if c.Compare_CreateRevision != nil { + rev = c.Compare_CreateRevision.CreateRevision + } + result = compareInt64(ckv.CreateRevision, rev) + case pb.Compare_MOD: + if c.Compare_ModRevision != nil { + rev = c.Compare_ModRevision.ModRevision + } + result = compareInt64(ckv.ModRevision, rev) + case pb.Compare_VERSION: + if c.Compare_Version != nil { + rev = c.Compare_Version.Version + } + result = compareInt64(ckv.Version, rev) + case pb.Compare_LEASE: + if c.Compare_Lease != nil { + rev = c.Compare_Lease.Lease + } + result = compareInt64(ckv.Lease, rev) + } + switch c.Result { + case pb.Compare_EQUAL: + return result == 0 + case pb.Compare_NOT_EQUAL: + return result != 0 + case pb.Compare_GREATER: + return result > 0 + case pb.Compare_LESS: + return result < 0 + } + return true +} + +func (a *applierV3backend) applyTxn(ctx context.Context, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { + trace := traceutil.Get(ctx) + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + + lg := a.s.Logger() + for i, req := range reqs { + + if req.RequestOp_RequestRange != nil { + respi := tresp.Responses[i].ResponseOp_ResponseRange + tv := req.RequestOp_RequestRange + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "range"}, + traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, + traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) + resp, err := a.Range(ctx, txn, tv.RequestRange) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.ResponseRange = resp + trace.StopSubTrace() + + } + if req.RequestOp_RequestPut != nil { + respi := tresp.Responses[i].ResponseOp_ResponsePut + tv := req.RequestOp_RequestPut + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "put"}, + traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, + traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) + resp, _, err := a.Put(ctx, txn, tv.RequestPut) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.ResponsePut = resp + trace.StopSubTrace() + } + + if req.RequestOp_RequestDeleteRange != nil { + respi := tresp.Responses[i].ResponseOp_ResponseDeleteRange + tv := req.RequestOp_RequestDeleteRange + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.ResponseDeleteRange = resp + } + if req.RequestOp_RequestTxn != nil { + resp := tresp.Responses[i].ResponseOp_ResponseTxn.ResponseTxn + tv := req.RequestOp_RequestTxn + applyTxns := a.applyTxn(ctx, txn, tv.RequestTxn, txnPath[1:], resp) + txns += applyTxns + 1 + txnPath = txnPath[applyTxns+1:] + } + + } + return txns +} + +// Compaction 移除kv 历史事件 +func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + resp := &pb.CompactionResponse{} + resp.Header = &pb.ResponseHeader{} + trace := traceutil.New("compact", + a.s.Logger(), + traceutil.Field{Key: "revision", Value: compaction.Revision}, + ) + + ch, err := a.s.KV().Compact(trace, compaction.Revision) + if err != nil { + return nil, ch, nil, err + } + // 获得当前版本.拿哪把key并不重要. + rr, _ := a.s.KV().Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) + resp.Header.Revision = rr.Rev + return resp, ch, trace, err +} + +func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3) +} + +func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + a.s.cluster.UpdateAttributes( + types.ID(r.Member_ID), + membership.Attributes{ + Name: r.MemberAttributes.Name, + ClientURLs: r.MemberAttributes.ClientUrls, + }, + shouldApplyV3, + ) +} + +func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) { + d := membership.DowngradeInfo{Enabled: false} + if r.Enabled { + d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} + } + a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3) +} + +func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + ok := a.q.Available(rt) + resp, trace, err := a.applierV3.Txn(ctx, rt) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, trace, err +} + +func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { + txnCount := 0 + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + for _, req := range reqs { + // tv, ok := req.Request.(*pb.RequestOp_RequestTxn) + tv := req.RequestOp_RequestTxn + if req.RequestOp_RequestTxn != nil && req.RequestOp_RequestTxn.RequestTxn != nil { + txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) + if err != nil { + return 0, err + } + txnCount += txns + 1 + txnPath = txnPath[txns+1:] + continue + } + if err := f(rv, req); err != nil { + return 0, err + } + } + return txnCount, nil +} + +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error { + if reqOp.RequestOp_RequestPut == nil { + return nil + } + if reqOp.RequestOp_RequestPut.RequestPut == nil { + return nil + } + + req := reqOp.RequestOp_RequestPut.RequestPut + if req.IgnoreValue || req.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(context.TODO(), []byte(req.Key), nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(req.Lease) != lease.NoLease { + if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { + return lease.ErrLeaseNotFound + } + } + return nil +} + +func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { + if reqOp.RequestOp_RequestRange == nil { + return nil + } + if reqOp.RequestOp_RequestRange.RequestRange == nil { + return nil + } + + req := reqOp.RequestOp_RequestRange.RequestRange + switch { + case req.Revision == 0: + return nil + case req.Revision > rv.Rev(): + return mvcc.ErrFutureRev + case req.Revision < rv.FirstRev(): + return mvcc.ErrCompacted + } + return nil +} + +func noSideEffect(r *pb.InternalRaftRequest) bool { + return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil +} + +func removeNeedlessRangeReqs(txn *pb.TxnRequest) { + f := func(ops []*pb.RequestOp) []*pb.RequestOp { + j := 0 + for i := 0; i < len(ops); i++ { + if ops[i].RequestOp_RequestRange != nil { + continue + } + ops[j] = ops[i] + j++ + } + + return ops[:j] + } + + txn.Success = f(txn.Success) + txn.Failure = f(txn.Failure) +} + +// ---------------------------------------- OVER ------------------------------------------------------------ + +func newHeader(s *EtcdServer) *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(s.Cluster().ID()), + MemberId: uint64(s.ID()), + Revision: s.KV().Rev(), // 在打开txn时返回KV的修订 + RaftTerm: s.Term(), + } +} + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +// 修剪查询到的数据 +func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { + j := 0 + for i := range rr.KVs { + rr.KVs[j] = rr.KVs[i] + if !isPrunable(&rr.KVs[i]) { + j++ + } + } + rr.KVs = rr.KVs[:j] +} + +// Range 👌🏻 +func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.Get(ctx) + resp := &pb.RangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace) // 并发读取,获取事务 + defer txn.End() + } + + limit := r.Limit + // 有序 + if r.SortOrder != pb.RangeRequest_NONE || r.MinModRevision != 0 || r.MaxModRevision != 0 || r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { + // 最大、最小 创建版本、修订版本 + // 获取一切;然后进行排序和截断 + limit = 0 + } + if limit > 0 { + // 获取一个额外的'more'标志 + limit = limit + 1 + } + + ro := mvcc.RangeOptions{ + Limit: limit, // 0 + Rev: r.Revision, // 0 + Count: r.CountOnly, // false + } + // 主要逻辑 + rr, err := txn.Range(ctx, []byte(r.Key), mkGteRange([]byte(r.RangeEnd)), ro) + if err != nil { + return nil, err + } + // 修剪查询到的数据 + if r.MaxModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } + pruneKVs(rr, f) + } + if r.MinModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } + pruneKVs(rr, f) + } + if r.MaxCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } + pruneKVs(rr, f) + } + if r.MinCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } + pruneKVs(rr, f) + } + + sortOrder := r.SortOrder // 默认不排序 + // 默认是请求的key + if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { + // 因为当前mvcc.Range实现返回按字序升序排序的结果,默认情况下,只有当target不是'KEY'时,排序才会升序. + sortOrder = pb.RangeRequest_ASCEND + } + if sortOrder != pb.RangeRequest_NONE { + var sorter sort.Interface + switch { + case r.SortTarget == pb.RangeRequest_KEY: + sorter = &kvSortByKey{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VERSION: + sorter = &kvSortByVersion{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_CREATE: + sorter = &kvSortByCreate{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_MOD: + sorter = &kvSortByMod{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VALUE: + sorter = &kvSortByValue{&kvSort{rr.KVs}} + } + switch { + case sortOrder == pb.RangeRequest_ASCEND: + sort.Sort(sorter) + case sortOrder == pb.RangeRequest_DESCEND: + sort.Sort(sort.Reverse(sorter)) + } + } + + if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { + rr.KVs = rr.KVs[:r.Limit] + resp.More = true + } + trace.Step("筛选键值对并对其排序") + resp.Header.Revision = rr.Rev + resp.Count = int64(rr.Count) + resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + if r.KeysOnly { + rr.KVs[i].Value = "" + } + resp.Kvs[i] = &rr.KVs[i] + } + trace.Step("组装响应") + return resp, nil +} + +func mkGteRange(rangeEnd []byte) []byte { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + return []byte{} + } + return rangeEnd +} + +// 根据不同字段进行比较时,使用不同字段 +type kvSort struct{ kvs []mvccpb.KeyValue } + +func (s *kvSort) Swap(i, j int) { + t := s.kvs[i] + s.kvs[i] = s.kvs[j] + s.kvs[j] = t +} +func (s *kvSort) Len() int { return len(s.kvs) } + +type kvSortByKey struct{ *kvSort } + +func (s *kvSortByKey) Less(i, j int) bool { + return bytes.Compare([]byte(s.kvs[i].Key), []byte(s.kvs[j].Key)) < 0 +} + +type kvSortByVersion struct{ *kvSort } + +func (s *kvSortByVersion) Less(i, j int) bool { + return (s.kvs[i].Version - s.kvs[j].Version) < 0 +} + +type kvSortByCreate struct{ *kvSort } + +func (s *kvSortByCreate) Less(i, j int) bool { + return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 +} + +type kvSortByMod struct{ *kvSort } + +func (s *kvSortByMod) Less(i, j int) bool { + return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 +} + +type kvSortByValue struct{ *kvSort } + +func (s *kvSortByValue) Less(i, j int) bool { + return bytes.Compare([]byte(s.kvs[i].Value), []byte(s.kvs[j].Value)) < 0 +} + +func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp := &pb.AlarmResponse{} + oldCount := len(a.s.alarmStore.Get(ar.Alarm)) // 获取指定类型的警报数量 + + lg := a.s.Logger() + switch ar.Action { + case pb.AlarmRequest_GET: + resp.Alarms = a.s.alarmStore.Get(ar.Alarm) + case pb.AlarmRequest_ACTIVATE: + if ar.Alarm == pb.AlarmType_NONE { + break + } + m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) // 记录、入库警报 + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 + if !activated { + break + } + lg.Warn("发生警报", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + switch m.Alarm { + case pb.AlarmType_CORRUPT: + a.s.applyV3 = newApplierV3Corrupt(a) + case pb.AlarmType_NOSPACE: + a.s.applyV3 = newApplierV3Capped(a) + default: + lg.Panic("未实现的警报", zap.String("alarm", fmt.Sprintf("%+v", m))) + } + case pb.AlarmRequest_DEACTIVATE: + m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) + if m == nil { + break + } + resp.Alarms = append(resp.Alarms, m) + deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 + if !deactivated { + break + } + + switch m.Alarm { + case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT: + lg.Warn("警报解除", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + a.s.applyV3 = a.s.newApplierV3() + default: + lg.Warn("未实现的警报解除类型", zap.String("alarm", fmt.Sprintf("%+v", m))) + } + default: + return nil, nil + } + return resp, nil +} + +// RoleList ok +func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := a.s.AuthStore().RoleList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// RoleGet ok +func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := a.s.AuthStore().RoleGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// RoleDelete ok +func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := a.s.AuthStore().RoleDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// RoleAdd ok +func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := a.s.AuthStore().RoleAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := a.s.AuthStore().RoleGrantPermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := a.s.AuthStore().RoleRevokePermission(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + resp, err := a.s.AuthStore().UserAdd(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// UserDelete ok +func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := a.s.AuthStore().UserDelete(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + resp, err := a.s.AuthStore().UserChangePassword(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := a.s.AuthStore().UserGrantRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := a.s.AuthStore().UserGet(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := a.s.AuthStore().UserRevokeRole(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := a.s.AuthStore().UserList(r) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// AuthEnable ok +func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { + err := a.s.AuthStore().AuthEnable() + if err != nil { + return nil, err + } + return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil +} + +// AuthDisable ok +func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { + a.s.AuthStore().AuthDisable() + return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil +} + +// AuthStatus ok +func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) { + enabled := a.s.AuthStore().IsAuthEnabled() + authRevision := a.s.AuthStore().Revision() + return &pb.AuthStatusResponse{Header: newHeader(a.s), Enabled: enabled, AuthRevision: authRevision}, nil +} + +func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { + ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) + resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) + if resp != nil { + resp.Header = newHeader(a.s) + } + return resp, err +} + +// LeaseGrant 创建租约 +func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) + resp := &pb.LeaseGrantResponse{} + if err == nil { + resp.ID = int64(l.ID) + resp.TTL = l.TTL() + resp.Header = newHeader(a.s) + } + return resp, err +} + +// LeaseRevoke ok +func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + fmt.Println("LeaseRevoke", lc) + err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) + return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err +} + +// LeaseCheckpoint 避免 leader 变更时,导致的租约重置 +func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { + fmt.Println("接收到checkpoint消息", lc.Checkpoints) + for _, c := range lc.Checkpoints { + err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.RemainingTtl) + if err != nil { + return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err + } + } + return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil +} + +// LeaseGrant 检查空间\创建租约 +func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + ok := a.q.Available(lc) + resp, err := a.applierV3.LeaseGrant(lc) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, err +} + +type quotaApplierV3 struct { + applierV3 // applierV3backend + q Quota +} + +func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { + return "aApplierV3{app, NewBackendQuota(s, "v3-applier")} +} + +func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + ok := a.q.Available(p) // 判断给定的请求是否符合配额要求 + resp, trace, err := a.applierV3.Put(ctx, txn, p) + if err == nil && !ok { + err = ErrNoSpace + } + return resp, trace, err +} diff --git a/etcd/etcdserver/backend.go b/etcd/etcdserver/backend.go new file mode 100644 index 00000000000..6d26404c004 --- /dev/null +++ b/etcd/etcdserver/backend.go @@ -0,0 +1,109 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" +) + +func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.BackendPath() + bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync + if cfg.BackendBatchLimit != 0 { + bcfg.BatchLimit = cfg.BackendBatchLimit + if cfg.Logger != nil { + cfg.Logger.Info("设置后端batch限制", zap.Int("batch limit", cfg.BackendBatchLimit)) + } + } + if cfg.BackendBatchInterval != 0 { + bcfg.BatchInterval = cfg.BackendBatchInterval + if cfg.Logger != nil { + cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval)) + } + } + bcfg.BackendFreelistType = cfg.BackendFreelistType + bcfg.Logger = cfg.Logger + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + bcfg.Mlock = cfg.ExperimentalMemoryMlock + bcfg.Hooks = hooks + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks backend.Hooks) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) + } + if err := os.Rename(snapPath, cfg.BackendPath()); err != nil { + return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) + } + return openBackend(cfg, hooks), nil +} + +// openBackend 返回一个使用当前etcd数据库的后端. +func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { + fn := cfg.BackendPath() // default.etcd/member/snap/db + + now, beOpened := time.Now(), make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg, hooks) + }() + + select { + case be := <-beOpened: + cfg.Logger.Info("打开后台数据库", zap.String("path", fn), zap.Duration("took", time.Since(now))) + return be + + case <-time.After(10 * time.Second): + cfg.Logger.Info( + "db文件被另一个进程占用,或占用时间过长", + zap.String("path", fn), + zap.Duration("took", time.Since(now)), + ) + } + + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) { + consistentIndex := uint64(0) + if beExist { + consistentIndex, _ = cindex.ReadConsistentIndex(oldbe.BatchTx()) + } + if snapshot.Metadata.Index <= consistentIndex { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) +} diff --git a/etcd/etcdserver/cindex/cindex.go b/etcd/etcdserver/cindex/cindex.go new file mode 100644 index 00000000000..6486359aab0 --- /dev/null +++ b/etcd/etcdserver/cindex/cindex.go @@ -0,0 +1,185 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cindex + +import ( + "encoding/binary" + "sync" + "sync/atomic" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" +) + +type Backend interface { + BatchTx() backend.BatchTx +} + +// ConsistentIndexer 用于处理boltdb和raftlog之间的幂等性. +type ConsistentIndexer interface { + ConsistentIndex() uint64 // 返回当前执行条目的一致索引 + SetConsistentIndex(v uint64, term uint64) // 设置当前执行条目的一致索引 + UnsafeSave(tx backend.BatchTx) // 必须在持有tx上的锁的情况下被调用. 它将一致索引保存到底层稳定存储中. + SetBackend(be Backend) // 为ConsistentIndexer设置可用的backend.BatchTx. +} + +// 当boltdb用作状态机的时候,wal和boltdb作为两个不同的实体,很有可能存在不一致的情况. +// 所以etcd在boltdb中存储一条记录consistent-index,来代表已经apply到bolt-db上成功的log index, +// 这样当根据wal恢复bolt-db的时候,就可以判断log index是不是已经被apply过. + +// consistentIndex implements the ConsistentIndexer interface. +type consistentIndex struct { + // consistentIndex represents the offset of an entry in a consistent replica log. + // It caches the "consistent_index" key's value. + // Accessed through atomics so必须是64-bit aligned. + consistentIndex uint64 + // term represents the RAFT term of committed entry in a consistent replica log. + // Accessed through atomics so必须是64-bit aligned. + // The value is being persisted in the backend since v3.5. + term uint64 + + // be is used for initial read consistentIndex + be Backend + // mutex is protecting be. + mutex sync.Mutex +} + +// NewConsistentIndex 返回一个一致性索引 +// 如果be is nil,必须在首次调用前执行ConsistentIndex方法 +func NewConsistentIndex(be Backend) ConsistentIndexer { + return &consistentIndex{be: be} +} + +func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) { + index := atomic.LoadUint64(&ci.consistentIndex) + term := atomic.LoadUint64(&ci.term) + UnsafeUpdateConsistentIndex(tx, index, term, true) +} + +func (ci *consistentIndex) SetBackend(be Backend) { + ci.mutex.Lock() + defer ci.mutex.Unlock() + ci.be = be + // After the backend is changed, the first access should re-read it. + ci.SetConsistentIndex(0, 0) +} + +func NewFakeConsistentIndex(index uint64) ConsistentIndexer { + return &fakeConsistentIndex{index: index} +} + +type fakeConsistentIndex struct { + index uint64 + term uint64 +} + +func (f *fakeConsistentIndex) ConsistentIndex() uint64 { return f.index } + +func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) { + atomic.StoreUint64(&f.index, index) + atomic.StoreUint64(&f.term, term) +} + +func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {} +func (f *fakeConsistentIndex) SetBackend(_ Backend) {} + +func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { + if index == 0 { + // Never save 0 as it means that we didn't loaded the real index yet. + return + } + + if onlyGrow { + oldi, oldTerm := unsafeReadConsistentIndex(tx) + if term < oldTerm { + return + } + if term == oldTerm && index <= oldi { + return + } + } + + bs1 := make([]byte, 8) + binary.BigEndian.PutUint64(bs1, index) + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(buckets.Meta, buckets.MetaConsistentIndexKeyName, bs1) + if term > 0 { + bs2 := make([]byte, 8) + binary.BigEndian.PutUint64(bs2, term) + tx.UnsafePut(buckets.Meta, buckets.MetaTermKeyName, bs2) + } +} + +// ----------------------------------------- OVER ----------------------------------------------- + +func (ci *consistentIndex) ConsistentIndex() uint64 { + if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 { + return index + } + ci.mutex.Lock() + defer ci.mutex.Unlock() + + v, term := ReadConsistentIndex(ci.be.BatchTx()) + ci.SetConsistentIndex(v, term) + return v +} + +func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) { + atomic.StoreUint64(&ci.consistentIndex, v) + atomic.StoreUint64(&ci.term, term) +} + +func UnsafeCreateMetaBucket(tx backend.BatchTx) { + tx.UnsafeCreateBucket(buckets.Meta) +} + +// CreateMetaBucket 创建meta bucket,如果不存在 +func CreateMetaBucket(tx backend.BatchTx) { + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(buckets.Meta) +} + +// 从bolt.db 加载一致的索引和任期 +func unsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { + // consistent_index + _, vs := tx.UnsafeRange(buckets.Meta, buckets.MetaConsistentIndexKeyName, nil, 0) + if len(vs) == 0 { + return 0, 0 + } + v := binary.BigEndian.Uint64(vs[0]) + // term + _, ts := tx.UnsafeRange(buckets.Meta, buckets.MetaTermKeyName, nil, 0) + if len(ts) == 0 { + return v, 0 + } + t := binary.BigEndian.Uint64(ts[0]) + return v, t +} + +// ReadConsistentIndex 从给定的tx中加载一致的索引和任期.如果没有找到数据,返回0. +func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { + tx.Lock() + defer tx.Unlock() + return unsafeReadConsistentIndex(tx) +} + +// UpdateConsistentIndex 会写到bolt.db meta库 +func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { + tx.Lock() + defer tx.Unlock() + UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow) +} diff --git a/etcd/etcdserver/cindex/doc.go b/etcd/etcdserver/cindex/doc.go new file mode 100644 index 00000000000..1631be23fde --- /dev/null +++ b/etcd/etcdserver/cindex/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cindex 提供了一个获取/保存一致索引的接口和实现. +package cindex diff --git a/etcd/etcdserver/cluster_util.go b/etcd/etcdserver/cluster_util.go new file mode 100644 index 00000000000..2bb7e52e25c --- /dev/null +++ b/etcd/etcdserver/cluster_util.go @@ -0,0 +1,478 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +// isMemberBootstrapped 试图检查给定的成员是否已经在给定的集群中被引导了. +func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { + // 获取非本机的peer urls + rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) // 从远端节点获取到的集群节点信息 + if err != nil { + // 初始化时,会有err此时member 是节点名字,而cl.member里的是hash之后的值 + return false + } + id := cl.MemberByName(member).ID + m := rcl.Member(id) // 远端的 + if m == nil { + return false + } + if len(m.ClientURLs) > 0 { + return true + } + return false +} + +// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and +// attempts to construct a Cluster by accessing the members endpoint on one of +// these URLs. The first URL to provide a response is used. If no URLs provide +// a response, or a Cluster cannot backend successfully created from a received +// response, an error is returned. +// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, +// 10 second is enough for building connection and finishing request. +func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { + return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt) +} + +// 从远端节点获取到的集群节点信息 +func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { + if lg == nil { + lg = zap.NewNop() + } + cc := &http.Client{ + Transport: rt, + Timeout: timeout, + } + for _, u := range urls { + addr := u + "/members" + resp, err := cc.Get(addr) + if err != nil { + if logerr { + lg.Warn("获取集群响应失败", zap.String("address", addr), zap.Error(err)) + } + continue + } + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if logerr { + lg.Warn("读取集群响应失败", zap.String("address", addr), zap.Error(err)) + } + continue + } + var membs []*membership.Member + if err = json.Unmarshal(b, &membs); err != nil { + if logerr { + lg.Warn("反序列化集群响应失败", zap.String("address", addr), zap.Error(err)) + } + continue + } + id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) + if err != nil { + if logerr { + lg.Warn( + "无法解析集群ID", + zap.String("address", addr), + zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")), + zap.Error(err), + ) + } + continue + } + + if len(membs) > 0 { + return membership.NewClusterFromMembers(lg, id, membs), nil // Construct struct + } + return nil, fmt.Errorf("无法获取raft集群节点信息从远端节点") + } + return nil, fmt.Errorf("无法从给定的URL中检索到集群信息") +} + +// getRemotePeerURLs 获取非本机的peer urls +func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { + us := make([]string, 0) + for _, m := range cl.Members() { + if m.Name == local { + continue + } + us = append(us, m.PeerURLs...) + } + sort.Strings(us) + return us +} + +// getVersions returns the versions of the members in the given cluster. +// The key of the returned map is the member's ID. The value of the returned map +// is the semver versions string, including etcd and cluster. +// If it fails to get the version of a member, the key will backend nil. +func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { + members := cl.Members() + vers := make(map[string]*version.Versions) + for _, m := range members { + if m.ID == local { + cv := "not_decided" + if cl.Version() != nil { + cv = cl.Version().String() + } + vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} + continue + } + ver, err := getVersion(lg, m, rt) + if err != nil { + lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) + vers[m.ID.String()] = nil + } else { + vers[m.ID.String()] = ver + } + } + return vers +} + +// decideClusterVersion decides the cluster version based on the versions map. +// The returned version is the min etcd version in the map, or nil if the min +// version in unknown. +func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version { + var cv *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + lg.Warn( + "failed to parse etcd version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return nil + } + if lv.LessThan(*v) { + lg.Warn( + "leader found higher-versioned member", + zap.String("local-member-version", lv.String()), + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + ) + } + if cv == nil { + cv = v + } else if v.LessThan(*cv) { + cv = v + } + } + return cv +} + +// allowedVersionRange decides the available version range of the cluster that local etcd can join in; +// if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher] +// if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion] +func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *semver.Version) { + minV = semver.Must(semver.NewVersion(version.MinClusterVersion)) + maxV = semver.Must(semver.NewVersion(version.Version)) + maxV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor} + + if downgradeEnabled { + // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x) + maxV.Minor = maxV.Minor + 1 + minV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor} + } + return minV, maxV +} + +// isCompatibleWithCluster return true if the local member has a compatible version with +// the current running cluster. +// The version is considered as compatible when at least one of the other members in the cluster has a +// cluster version in the range of [MinV, MaxV] and no known members has a cluster version +// out of the range. +// We set this rule since when the local member joins, another member might backend offline. +func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + vers := getVersions(lg, cl, local, rt) + minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt)) + return isCompatibleWithVers(lg, vers, local, minV, maxV) +} + +func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { + var ok bool + for id, v := range vers { + // ignore comparison with local version + if id == local.String() { + continue + } + if v == nil { + continue + } + clusterv, err := semver.NewVersion(v.Cluster) + if err != nil { + lg.Warn( + "failed to parse cluster version of remote member", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", v.Cluster), + zap.Error(err), + ) + continue + } + if clusterv.LessThan(*minV) { + lg.Warn( + "cluster version of remote member is not compatible; too low", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + return false + } + if maxV.LessThan(*clusterv) { + lg.Warn( + "cluster version of remote member is not compatible; too high", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + return false + } + ok = true + } + return ok +} + +// getVersion returns the Versions of the given member via its +// peerURLs. Returns the last error if it fails to get the version. +func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { + cc := &http.Client{ + Transport: rt, + } + var ( + err error + resp *http.Response + ) + + for _, u := range m.PeerURLs { + addr := u + "/version" + resp, err = cc.Get(addr) + if err != nil { + lg.Warn( + "failed to reach the peer URL", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + var b []byte + b, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + lg.Warn( + "failed to read body of response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + var vers version.Versions + if err = json.Unmarshal(b, &vers); err != nil { + lg.Warn( + "failed to unmarshal response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + return &vers, nil + } + return nil, err +} + +func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) { + cc := &http.Client{Transport: peerRt} + // TODO: refactor member http handler code + // cannot import etcdhttp, so manually construct url + requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id) + req, err := http.NewRequest("POST", requestUrl, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrTimeout + } + if resp.StatusCode == http.StatusPreconditionFailed { + // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code + if strings.Contains(string(b), ErrLearnerNotReady.Error()) { + return nil, ErrLearnerNotReady + } + if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { + return nil, membership.ErrMemberNotLearner + } + return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + } + if resp.StatusCode == http.StatusNotFound { + return nil, membership.ErrIDNotFound + } + + if resp.StatusCode != http.StatusOK { // all other types of errors + return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + } + + var membs []*membership.Member + if err := json.Unmarshal(b, &membs); err != nil { + return nil, err + } + return membs, nil +} + +// getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster. +func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + members := cl.Members() + + for _, m := range members { + if m.ID == local { + continue + } + enable, err := getDowngradeEnabled(lg, m, rt) + if err != nil { + lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) + } else { + // Since the "/downgrade/enabled" serves linearized data, + // this function can return once it gets a non-error response from the endpoint. + return enable + } + } + return false +} + +// getDowngradeEnabled returns the downgrade enabled status of the given member +// via its peerURLs. Returns the last error if it fails to get it. +func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (bool, error) { + cc := &http.Client{ + Transport: rt, + } + var ( + err error + resp *http.Response + ) + + for _, u := range m.PeerURLs { + addr := u + DowngradeEnabledPath + resp, err = cc.Get(addr) + if err != nil { + lg.Warn( + "failed to reach the peer URL", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + var b []byte + b, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + lg.Warn( + "failed to read body of response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + var enable bool + if enable, err = strconv.ParseBool(string(b)); err != nil { + lg.Warn( + "failed to convert response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + continue + } + return enable, nil + } + return false, err +} + +// isMatchedVersions returns true if all etcd versions are equal to target version, otherwise return false. +// It can backend used to decide the whether the cluster finishes downgrading to target version. +func isMatchedVersions(lg *zap.Logger, targetVersion *semver.Version, vers map[string]*version.Versions) bool { + for mid, ver := range vers { + if ver == nil { + return false + } + v, err := semver.NewVersion(ver.Cluster) + if err != nil { + lg.Warn( + "failed to parse etcd version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return false + } + if !targetVersion.Equal(*v) { + lg.Warn("remotes etcd has mismatching etcd version", + zap.String("remote-member-id", mid), + zap.String("current-etcd-version", v.String()), + zap.String("target-version", targetVersion.String()), + ) + return false + } + } + return true +} + +func convertToClusterVersion(v string) (*semver.Version, error) { + ver, err := semver.NewVersion(v) + if err != nil { + // allow input version format Major.Minor + ver, err = semver.NewVersion(v + ".0") + if err != nil { + return nil, ErrWrongDowngradeVersionFormat + } + } + // cluster version only keeps major.minor, remove patch version + ver = &semver.Version{Major: ver.Major, Minor: ver.Minor} + return ver, nil +} diff --git a/etcd/etcdserver/errors.go b/etcd/etcdserver/errors.go new file mode 100644 index 00000000000..e82e539caa9 --- /dev/null +++ b/etcd/etcdserver/errors.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "errors" + "fmt" +) + +var ( + ErrUnknownMethod = errors.New("etcdserver: 未知的请求方法") + ErrStopped = errors.New("etcdserver: etcd停止") + ErrCanceled = errors.New("etcdserver: 请求取消") + ErrTimeout = errors.New("etcdserver: 请求超时") + ErrTimeoutDueToLeaderFail = errors.New("etcdserver: 请求超时,可能是由于之前的领导者失败了") + ErrTimeoutDueToConnectionLost = errors.New("etcdserver: 请求超时,可能是由于连接丢失") + ErrTimeoutLeaderTransfer = errors.New("etcdserver: 请求超时,领导者转移时间过长") + ErrLeaderChanged = errors.New("etcdserver: 领导者转移了") + ErrNotEnoughStartedMembers = errors.New("etcdserver: 由于启动的成员不足,重新配置失败") + ErrLearnerNotReady = errors.New("etcdserver: 只能提拔与leader同步的learner成员") + ErrNoLeader = errors.New("etcdserver: 没有leader") + ErrNotLeader = errors.New("etcdserver: 不是leader") + ErrRequestTooLarge = errors.New("etcdserver: 请求太多") + ErrNoSpace = errors.New("etcdserver: 没有空间") + ErrTooManyRequests = errors.New("etcdserver: 太多的请求") + ErrUnhealthy = errors.New("etcdserver: 集群不健康") + ErrKeyNotFound = errors.New("etcdserver: key没找到") + ErrCorrupt = errors.New("etcdserver: 损坏的集群") + ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") + ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") + ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") + ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") + ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") + ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") +) + +type DiscoveryError struct { + Op string + Err error +} + +func (e DiscoveryError) Error() string { + return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) +} diff --git a/etcd/etcdserver/kv.go b/etcd/etcdserver/kv.go new file mode 100644 index 00000000000..d5bbc5931c1 --- /dev/null +++ b/etcd/etcdserver/kv.go @@ -0,0 +1,276 @@ +package etcdserver + +import ( + "context" + "fmt" + "time" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "go.uber.org/zap" +) + +// CheckInitialHashKV 在提供任何对等/客户流量之前,将初始哈希值与peer进行比较.只有当哈希值在要求的修订版上不一样时,才会出现不匹配,而压缩的修订版是相同的. +func (s *EtcdServer) CheckInitialHashKV() error { + if !s.Cfg.InitialCorruptCheck { // 没有开启数据毁坏检测功能 + return nil + } + + lg := s.Logger() + + lg.Info( + "starting initial corruption check", + zap.String("local-member-id", s.ID().String()), + zap.Duration("timeout", s.Cfg.ReqTimeout()), + ) + + h, rev, crev, err := s.kv.HashByRev(0) + if err != nil { + return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err) + } + peers := s.getPeerHashKVs(rev) + mismatch := 0 + for _, p := range peers { + if p.resp != nil { + peerID := types.ID(p.resp.Header.MemberId) + fields := []zap.Field{ + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", peerID.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Int64("remote-peer-revision", p.resp.Header.Revision), + zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision), + zap.Uint32("remote-peer-hash", p.resp.Hash), + } + + if h != p.resp.Hash { + if crev == p.resp.CompactRevision { + lg.Warn("found different hash values from remote peer", fields...) + mismatch++ + } else { + lg.Warn("found different compact revision values from remote peer", fields...) + } + } + + continue + } + + if p.err != nil { + switch p.err { + case rpctypes.ErrFutureRev: + lg.Warn( + "cannot fetch hash from slow remote peer", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + case rpctypes.ErrCompacted: + lg.Warn( + "cannot fetch hash from remote peer; local member is behind", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + } + } + } + if mismatch > 0 { + return fmt.Errorf("%s found data inconsistency with peers", s.ID()) + } + + lg.Info( + "initial corruption checking passed; no corruption", + zap.String("local-member-id", s.ID().String()), + ) + return nil +} + +func (s *EtcdServer) monitorKVHash() { + t := s.Cfg.CorruptCheckTime + if t == 0 { + return + } + + lg := s.Logger() + lg.Info("启用损坏检查", zap.String("local-member-id", s.ID().String()), zap.Duration("interval", t)) + + for { + select { + case <-s.stopping: + return + case <-time.After(t): + } + if !s.isLeader() { + continue + } + if err := s.checkHashKV(); err != nil { + lg.Warn("failed to check hash KV", zap.Error(err)) + } + } +} + +func (s *EtcdServer) checkHashKV() error { + lg := s.Logger() + + h, rev, crev, err := s.kv.HashByRev(0) + if err != nil { + return err + } + peers := s.getPeerHashKVs(rev) + + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + err = s.linearizeReadNotify(ctx) + cancel() + if err != nil { + return err + } + + h2, rev2, crev2, err := s.kv.HashByRev(0) + if err != nil { + return err + } + + alarmed := false + mismatch := func(id uint64) { + if alarmed { + return + } + alarmed = true + a := &pb.AlarmRequest{ + MemberID: id, + Action: pb.AlarmRequest_ACTIVATE, // checkHashKV + Alarm: pb.AlarmType_CORRUPT, + } + s.GoAttach(func() { + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + }) + } + + if h2 != h && rev2 == rev && crev == crev2 { + lg.Warn( + "found hash mismatch", + zap.Int64("revision-1", rev), + zap.Int64("compact-revision-1", crev), + zap.Uint32("hash-1", h), + zap.Int64("revision-2", rev2), + zap.Int64("compact-revision-2", crev2), + zap.Uint32("hash-2", h2), + ) + mismatch(uint64(s.ID())) + } + + checkedCount := 0 + for _, p := range peers { + if p.resp == nil { + continue + } + checkedCount++ + id := p.resp.Header.MemberId + + // leader expects follower's latest revision less than or equal to leader's + if p.resp.Header.Revision > rev2 { + lg.Warn( + "revision from follower必须是less than or equal to leader's", + zap.Int64("leader-revision", rev2), + zap.Int64("follower-revision", p.resp.Header.Revision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + mismatch(id) + } + + // leader expects follower's latest compact revision less than or equal to leader's + if p.resp.CompactRevision > crev2 { + lg.Warn( + "compact revision from follower必须是less than or equal to leader's", + zap.Int64("leader-compact-revision", crev2), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + mismatch(id) + } + + // follower's compact revision is leader's old one, then hashes must match + if p.resp.CompactRevision == crev && p.resp.Hash != h { + lg.Warn( + "same compact revision then hashes must match", + zap.Int64("leader-compact-revision", crev2), + zap.Uint32("leader-hash", h), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.Uint32("follower-hash", p.resp.Hash), + zap.String("follower-peer-id", types.ID(id).String()), + ) + mismatch(id) + } + } + lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount)) + return nil +} + +type peerInfo struct { + id types.ID + eps []string +} + +type peerHashKVResp struct { + peerInfo + resp *pb.HashKVResponse + err error +} + +func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { + // TODO: handle the case when "s.cluster.Members" have not + // been populated (e.g. no snapshot to load from disk) + members := s.cluster.Members() + peers := make([]peerInfo, 0, len(members)) + for _, m := range members { + if m.ID == s.ID() { + continue + } + peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs}) + } + + lg := s.Logger() + + var resps []*peerHashKVResp + for _, p := range peers { + if len(p.eps) == 0 { + continue + } + + respsLen := len(resps) + var lastErr error + for _, ep := range p.eps { + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + resp, lastErr := s.getPeerHashKVHTTP(ctx, ep, rev) + cancel() + if lastErr == nil { + resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil}) + break + } + lg.Warn( + "failed hash kv request", + zap.String("local-member-id", s.ID().String()), + zap.Int64("requested-revision", rev), + zap.String("remote-peer-endpoint", ep), + zap.Error(lastErr), + ) + } + + // failed to get hashKV from all endpoints of this peer + if respsLen == len(resps) { + resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr}) + } + } + return resps +} diff --git a/etcd/etcdserver/over_alarms_method_overwirter.go b/etcd/etcdserver/over_alarms_method_overwirter.go new file mode 100644 index 00000000000..0137972c864 --- /dev/null +++ b/etcd/etcdserver/over_alarms_method_overwirter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/mvcc" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type applierV3Corrupt struct { + applierV3 +} + +func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } + +func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + return nil, nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + return nil, nil, nil, ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + return nil, ErrCorrupt +} + +type applierV3Capped struct { + applierV3 + q backendQuota +} + +func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } + +func (a *applierV3Capped) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, ErrNoSpace +} + +func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + if a.q.Cost(r) > 0 { + return nil, nil, ErrNoSpace + } + return a.applierV3.Txn(ctx, r) +} + +func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, ErrNoSpace +} diff --git a/etcd/etcdserver/over_httpserver_access_control.go b/etcd/etcdserver/over_httpserver_access_control.go new file mode 100644 index 00000000000..e5a5d64ba44 --- /dev/null +++ b/etcd/etcdserver/over_httpserver_access_control.go @@ -0,0 +1,55 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import "sync" + +// AccessController 控制 etcd http请求的访问控制 +type AccessController struct { + corsMu sync.RWMutex + CORS map[string]struct{} + hostWhitelistMu sync.RWMutex + HostWhitelist map[string]struct{} +} + +// OriginAllowed 是否允许跨域请求 +func (ac *AccessController) OriginAllowed(origin string) bool { + ac.corsMu.RLock() + defer ac.corsMu.RUnlock() + if len(ac.CORS) == 0 { // allow all + return true + } + _, ok := ac.CORS["*"] + if ok { + return true + } + _, ok = ac.CORS[origin] + return ok +} + +// IsHostWhitelisted 返回host在不在白名单里 +func (ac *AccessController) IsHostWhitelisted(host string) bool { + ac.hostWhitelistMu.RLock() + defer ac.hostWhitelistMu.RUnlock() + if len(ac.HostWhitelist) == 0 { // allow all + return true + } + _, ok := ac.HostWhitelist["*"] + if ok { + return true + } + _, ok = ac.HostWhitelist[host] + return ok +} diff --git a/etcd/etcdserver/over_linearize_read.go b/etcd/etcdserver/over_linearize_read.go new file mode 100644 index 00000000000..4c3ca48bf6b --- /dev/null +++ b/etcd/etcdserver/over_linearize_read.go @@ -0,0 +1,189 @@ +package etcdserver + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "strconv" + "time" + + "github.com/ls-2018/etcd_cn/pkg/traceutil" + "github.com/ls-2018/etcd_cn/raft" + "go.uber.org/zap" +) + +type notifier struct { + c chan struct{} + err error +} + +// 通知 +func newNotifier() *notifier { + return ¬ifier{ + c: make(chan struct{}), + } +} + +func (nc *notifier) notify(err error) { + nc.err = err + close(nc.c) +} + +// 线性一致性读,保证强一致性 , 阻塞,直到applyid >= 当前生成的ID +func (s *EtcdServer) linearizableReadLoop() { + for { + requestId := s.reqIDGen.Next() + leaderChangedNotifier := s.LeaderChangedNotify() + select { + case <-leaderChangedNotifier: + continue + case <-s.readwaitc: + // 在client发起一次Linearizable Read的时候,会向readwaitc写入一个空的结构体作为信号 + fmt.Println("开始一次linearizableRead") + case <-s.stopping: + return + } + + // 因为一个循环可以解锁多个读数所以从Txn或Range传播追踪不是很有用. + trace := traceutil.New("linearizableReadLoop", s.Logger()) + + s.readMu.Lock() + nr := s.readNotifier + s.readNotifier = newNotifier() + s.readMu.Unlock() + // 处理不同的消息 + // 这里会监听 readwaitc,发送MsgReadIndex 并等待 MsgReadIndexRsp + // 同时获取当前已提交的日志索引 + // 串行执行的 + confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId) // MsgReadIndex 携带requestId经过raft走一圈 + if isStopped(err) { + return + } + if err != nil { + nr.notify(err) + continue + } + + trace.Step("收到要读的索引") + trace.AddField(traceutil.Field{Key: "readStateIndex", Value: confirmedIndex}) + appliedIndex := s.getAppliedIndex() + trace.AddField(traceutil.Field{Key: "appliedIndex", Value: strconv.FormatUint(appliedIndex, 10)}) + // 此处是重点 等待 apply index >= read index + if appliedIndex < confirmedIndex { + select { + case <-s.applyWait.Wait(confirmedIndex): + case <-s.stopping: + return + } + } + // 发出可以进行读取状态机的信号 + nr.notify(nil) + trace.Step("applied 索引现在低于 readState.Index") + } +} + +// 请求当前索引 +func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) { + err := s.sendReadIndex(requestId) // 线性读生成的7587861540711705347 就是异步发送一条raft的消息 + if err != nil { + return 0, err + } + + lg := s.Logger() + errorTimer := time.NewTimer(s.Cfg.ReqTimeout()) + defer errorTimer.Stop() + retryTimer := time.NewTimer(readIndexRetryTime) // 500ms + defer retryTimer.Stop() + + firstCommitInTermNotifier := s.FirstCommitInTermNotify() + + for { + select { + case rs := <-s.r.readStateC: // err := s.sendReadIndex(requestId) 经由raft会往这里发一个信号 + requestIdBytes := uint64ToBigEndianBytes(requestId) + gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes) + // rs.RequestCtxrequestIdBytes 可能是高并发情景下,下一次get请求导致的 + if !gotOwnResponse { + // 前一个请求可能超时.现在我们应该忽略它的响应,继续等待当前请求的响应. + responseId := uint64(0) + if len(rs.RequestCtx) == 8 { + responseId = binary.BigEndian.Uint64(rs.RequestCtx) + } + lg.Warn( + "忽略过期的读索引响应;本地节点读取索引排队等待后端与leader同步", + zap.Uint64("sent-request-id", requestId), + zap.Uint64("received-request-id", responseId), + ) + continue + } + return rs.Index, nil // 返回的是leader已经committed的索引 + case <-leaderChangedNotifier: + return 0, ErrLeaderChanged + case <-firstCommitInTermNotifier: + firstCommitInTermNotifier = s.FirstCommitInTermNotify() + lg.Info("第一次提交:重发ReadIndex请求") + err := s.sendReadIndex(requestId) + if err != nil { + return 0, err + } + retryTimer.Reset(readIndexRetryTime) + continue + case <-retryTimer.C: + lg.Warn("等待ReadIndex响应时间过长,需要重新尝试", zap.Uint64("sent-request-id", requestId), zap.Duration("retry-timeout", readIndexRetryTime)) + err := s.sendReadIndex(requestId) + if err != nil { + return 0, err + } + retryTimer.Reset(readIndexRetryTime) + continue + case <-errorTimer.C: + lg.Warn("等待读索引响应时超时(本地节点可能有较慢的网络)", zap.Duration("timeout", s.Cfg.ReqTimeout())) + return 0, ErrTimeout + case <-s.stopping: + return 0, ErrStopped + } + } +} + +// etcdctl get 就是异步发送一条raft的消息 +func (s *EtcdServer) sendReadIndex(requestIndex uint64) error { + ctxToSend := uint64ToBigEndianBytes(requestIndex) + + cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + // 就是异步发送一条raft的消息 + err := s.r.ReadIndex(cctx, ctxToSend) // 发出去就完事了, 发到内存里 + cancel() + if err == raft.ErrStopped { + return err + } + if err != nil { + lg := s.Logger() + lg.Warn("未能从Raft获取读取索引", zap.Error(err)) + return err + } + return nil +} + +// 进行一次 线性读取准备 +func (s *EtcdServer) linearizeReadNotify(ctx context.Context) error { + s.readMu.RLock() + nc := s.readNotifier + s.readMu.RUnlock() + + select { + case s.readwaitc <- struct{}{}: // linearizableReadLoop就会开始结束阻塞开始工作 + default: + } + + // 等待读状态通知 + select { + case <-nc.c: + return nc.err + case <-ctx.Done(): + return ctx.Err() + case <-s.done: + return ErrStopped + } +} diff --git a/etcd/etcdserver/over_quota.go b/etcd/etcdserver/over_quota.go new file mode 100644 index 00000000000..784c5731d8f --- /dev/null +++ b/etcd/etcdserver/over_quota.go @@ -0,0 +1,152 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "sync" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" +) + +const ( + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB 是指在超过空间配额之前后端大小可能消耗的字节数. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB 是建议用于后端配额的最大字节数.较大的配额可能会导致性能下降. +) + +// Quota 代表一个针对任意请求的任意配额.每个请求要花费一定的费用;如果没有足够的剩余费用那么配额内可用的资源就太少了无法应用该请求. +type Quota interface { + Available(req interface{}) bool // 判断给定的请求是否符合配额要求. + Cost(req interface{}) int // 计算对某一请求的配额的开销. + Remaining() int64 // 剩余配额 +} + +type passthroughQuota struct{} + +func (*passthroughQuota) Available(interface{}) bool { return true } +func (*passthroughQuota) Cost(interface{}) int { return 0 } +func (*passthroughQuota) Remaining() int64 { return 1 } + +type backendQuota struct { + s *EtcdServer + maxBackendBytes int64 +} + +const ( + leaseOverhead = 64 // 是对租约物的存储成本的估计. + kvOverhead = 256 // 是对存储一个密钥的元数据的成本的估计. +) + +var ( + quotaLogOnce sync.Once + DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes)) + maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes)) +) + +// NewBackendQuota 创建一个具有给定存储限制的配额层. +func NewBackendQuota(s *EtcdServer, name string) Quota { + lg := s.Logger() + + if s.Cfg.QuotaBackendBytes < 0 { + quotaLogOnce.Do(func() { + lg.Info("禁用后端配额", zap.String("quota-name", name), zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes)) + }) + return &passthroughQuota{} + } + + if s.Cfg.QuotaBackendBytes == 0 { + quotaLogOnce.Do(func() { + if lg != nil { + lg.Info( + "启用后端配置默认值", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", DefaultQuotaBytes), + zap.String("quota-size", DefaultQuotaSize), + ) + } + }) + return &backendQuota{s, DefaultQuotaBytes} + } + + quotaLogOnce.Do(func() { + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + lg.Warn( + "配额超过了最大值", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), + zap.String("quota-maximum-size", maxQuotaSize), + ) + } + lg.Info( + "启用配额", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + ) + }) + return &backendQuota{s, s.Cfg.QuotaBackendBytes} +} + +// Available 粗略计算是否可以存储 +func (b *backendQuota) Available(v interface{}) bool { + return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes +} + +// Cost 操作的开销 +func (b *backendQuota) Cost(v interface{}) int { + switch r := v.(type) { + case *pb.PutRequest: + return costPut(r) + case *pb.TxnRequest: + return costTxn(r) + case *pb.LeaseGrantRequest: + return leaseOverhead + default: + panic("未知的 cost") + } +} + +func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } + +func costTxnReq(u *pb.RequestOp) int { + r := u.GetRequestPut() + if r == nil { + return 0 + } + return costPut(r) +} + +func costTxn(r *pb.TxnRequest) int { + sizeSuccess := 0 + for _, u := range r.Success { + sizeSuccess += costTxnReq(u) + } + sizeFailure := 0 + for _, u := range r.Failure { + sizeFailure += costTxnReq(u) + } + if sizeFailure > sizeSuccess { + return sizeFailure + } + return sizeSuccess +} + +func (b *backendQuota) Remaining() int64 { + return b.maxBackendBytes - b.s.Backend().Size() +} diff --git a/etcd/etcdserver/over_raft.go b/etcd/etcdserver/over_raft.go new file mode 100644 index 00000000000..8ee093cea73 --- /dev/null +++ b/etcd/etcdserver/over_raft.go @@ -0,0 +1,673 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "expvar" + "fmt" + "log" + "sort" + "sync" + "time" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/pkg/contention" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" +) + +const ( + maxSizePerMsg = 1 * 1024 * 1024 // 1M + maxInflightMsgs = 4096 / 8 // 512 +) + +var ( + // protects raftStatus + raftStatusMu sync.Mutex + // indirection for expvar func interface + // expvar panics when publishing duplicate name + // expvar does not support remove a registered name + // so only register a func that calls raftStatus + // and change raftStatus as we need. + raftStatus func() raft.Status +) + +func init() { + expvar.Publish("raft.status", expvar.Func(func() interface{} { + raftStatusMu.Lock() + defer raftStatusMu.Unlock() + if raftStatus == nil { + return nil + } + return raftStatus() + })) +} + +// apply contains entries, snapshot to backend applied. Once +// an apply is consumed, the entries will backend persisted to +// to raft storage concurrently; the application must read +// raftDone before assuming the raft messages are stable. +type apply struct { + entries []raftpb.Entry + snapshot raftpb.Snapshot + // notifyc synchronizes etcd etcd applies with the raft node + notifyc chan struct{} +} + +type raftNodeConfig struct { + lg *zap.Logger + isIDRemoved func(id uint64) bool // to check if msg receiver is removed from cluster + raft.RaftNodeInterFace + raftStorage *raft.MemoryStorage + storage Storage + heartbeat time.Duration // for logging + // transport specifies the transport to send and receive msgs to members. + // Sending messages MUST NOT block. It is okay to drop messages, since + // clients should timeout and reissue their messages. + // If transport is nil, etcd will panic. + transport rafthttp.Transporter +} + +func newRaftNode(cfg raftNodeConfig) *raftNode { + var lg raft.Logger + if cfg.lg != nil { + lg = NewRaftLoggerZap(cfg.lg) + } else { + lcfg := logutil.DefaultZapLoggerConfig + var err error + lg, err = NewRaftLogger(&lcfg) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } + raft.SetLogger(lg) + r := &raftNode{ + lg: cfg.lg, + tickMu: new(sync.Mutex), + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r +} + +// raft状态机,维护raft状态机的步进和状态迁移. +type raftNode struct { + lg *zap.Logger + tickMu *sync.Mutex + raftNodeConfig // 包含了node、storage等重要数据结构 + msgSnapC chan raftpb.Message // a chan to send/receive snapshot + applyc chan apply // a chan to send out apply + readStateC chan raft.ReadState // 发送readState的chan + ticker *time.Ticker // raft 中有两个时间计数器,它们分别是选举计数器 (Follower/Candidate)和心跳计数器 (Leader),它们都依靠 tick 来推进时钟 + td *contention.TimeoutDetector // contention detectors for raft heartbeat message + stopped chan struct{} + done chan struct{} +} + +// 启动节点 +func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.RaftNodeInterFace, s *raft.MemoryStorage, w *wal.WAL) { + var err error + member := cl.MemberByName(cfg.Name) + metadata := pbutil.MustMarshal( + &pb.Metadata{ + NodeID: uint64(member.ID), + ClusterID: uint64(cl.ID()), + }, + ) + if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { + cfg.Logger.Panic("创建WAL失败", zap.Error(err)) + } + if cfg.UnsafeNoFsync { // 非安全存储 默认是 false + w.SetUnsafeNoFsync() + } + peers := make([]raft.Peer, len(ids)) + for i, id := range ids { + var ctx []byte + ctx, err = json.Marshal((*cl).Member(id)) // 本机 + if err != nil { + cfg.Logger.Panic("序列化member失败", zap.Error(err)) + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + id = member.ID // 本机ID + cfg.Logger.Info( + "启动本节点", + zap.String("local-member-id", id.String()), + zap.String("cluster-id", cl.ID().String()), + ) + s = raft.NewMemoryStorage() // 创建内存存储 + c := &raft.Config{ + ID: uint64(id), // 本机ID + ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数 + HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数 + Storage: s, // 存储 memory ✅ + MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size + MaxInflightMsgs: maxInflightMsgs, // 512 + CheckQuorum: true, // 检查是否是leader + PreVote: cfg.PreVote, // true // 是否启用PreVote扩展,建议开启 + Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), + } + + _ = membership.NewClusterFromURLsMap + if len(peers) == 0 { + // 不会走这里 + n = raft.RestartNode(c) // 不会引导peers + } else { + n = raft.StartNode(c, peers) // ✅✈️ 🚗🚴🏻😁 + } + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + return id, n, s, w +} + +func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.RaftNodeInterFace, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) + + cfg.Logger.Info( + "restarting local member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + cl := membership.NewCluster(cfg.Logger) + cl.SetID(id, cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) // 来还原服务宕机前的状态. + } + s.SetHardState(st) // 从持久化的内存存储中恢复出状态 + s.Append(ents) // 从持久化的内存存储中恢复出日志 + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数 + HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数 + Storage: s, + MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + PreVote: cfg.PreVote, // PreVote 是否启用PreVote + Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), + } + + n := raft.RestartNode(c) + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + return id, cl, n, s, w +} + +func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.RaftNodeInterFace, *raft.MemoryStorage, *wal.WAL) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) + + // discard the previously uncommitted entries + for i, ent := range ents { + if ent.Index > st.Commit { + cfg.Logger.Info( + "discarding uncommitted WAL entries", + zap.Uint64("entry-index", ent.Index), + zap.Uint64("commit-index-from-wal", st.Commit), + zap.Int("number-of-discarded-entries", len(ents)-i), + ) + ents = ents[:i] + break + } + } + + // force append the configuration change entries + toAppEnts := createConfigChangeEnts( + cfg.Logger, + getIDs(cfg.Logger, snapshot, ents), + uint64(id), + st.Term, + st.Commit, + ) + ents = append(ents, toAppEnts...) + + // force commit newly appended entries + err := w.Save(raftpb.HardState{}, toAppEnts) + if err != nil { + cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) + } + if len(ents) != 0 { + st.Commit = ents[len(ents)-1].Index + } + + cfg.Logger.Info( + "forcing restart member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + + cl := membership.NewCluster(cfg.Logger) + cl.SetID(id, cid) + s := raft.NewMemoryStorage() + if snapshot != nil { + s.ApplySnapshot(*snapshot) // 来还原服务宕机前的状态. + } + s.SetHardState(st) // 从持久化的内存存储中恢复出状态 + s.Append(ents) // 从持久化的内存存储中恢复出日志 + c := &raft.Config{ + ID: uint64(id), + ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数 + HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数 + Storage: s, + MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + PreVote: cfg.PreVote, // PreVote 是否启用PreVote + Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), + } + + n := raft.RestartNode(c) + raftStatus = n.Status + return id, cl, n, s, w +} + +// getIDs returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain three kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will backend added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will backend removed from the set. +// - ConfChangeAddLearnerNode, in which the contained ID will backend added into the set. +func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]bool) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Voters { + ids[id] = true + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + var cc raftpb.ConfChangeV1 + pbutil.MustUnmarshal(&cc, e.Data) + switch cc.Type { + case raftpb.ConfChangeAddLearnerNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) + } + } + sids := make(types.Uint64Slice, 0, len(ids)) + for id := range ids { + sids = append(sids, id) + } + sort.Sort(sids) + return []uint64(sids) +} + +// createConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + found := false + for _, id := range ids { + if id == self { + found = true + } + } + + var ents []raftpb.Entry + next := index + 1 + + // NB: always add self first, then remove other nodes. Raft will panic if the + // set of voters ever becomes empty. + if !found { + m := membership.Member{ + ID: types.ID(self), + RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, + } + ctx, err := json.Marshal(m) + if err != nil { + lg.Panic("failed to marshal member", zap.Error(err)) + } + cc := &raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: string(ctx), + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), // ok + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + for _, id := range ids { + if id == self { + continue + } + cc := &raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + _ = cc.Marshal + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), // ok + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + return ents +} + +// raft.RaftNodeInterFace raft包中没有lock +func (r *raftNode) tick() { + r.tickMu.Lock() + r.Tick() + r.tickMu.Unlock() +} + +// 心跳触发EtcdServer定时触发 非常重要 +func (r *raftNode) start(rh *raftReadyHandler) { + internalTimeout := time.Second + + go func() { + defer r.onStop() + islead := false + + for { + select { + case <-r.ticker.C: // 推进心跳或者选举计时器 + r.tick() + // readyc = n.readyc size为0 + case rd := <-r.Ready(): // 调用Node.Ready(),从返回的channel中获取数据 + // 获取ready结构中的committedEntries,提交给Apply模块应用到后端存储中. + // ReadStates不为空的处理逻辑 + if rd.SoftState != nil { + // SoftState不为空的处理逻辑 + newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead + rh.updateLead(rd.SoftState.Lead) + islead = rd.RaftState == raft.StateLeader + rh.updateLeadership(newLeader) + r.td.Reset() + } + // ReadStates不为空的处理逻辑 ,线性一致性读的标志 + if len(rd.ReadStates) != 0 { + select { + case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: + case <-time.After(internalTimeout): + r.lg.Warn("发送读状态超时", zap.Duration("timeout", internalTimeout)) + case <-r.stopped: + return + } + } + + // 生成apply请求 + notifyc := make(chan struct{}, 1) + ap := apply{ + entries: rd.CommittedEntries, + snapshot: rd.Snapshot, + notifyc: notifyc, + } + // 更新etcdServer缓存的commitIndex为最新值 + updateCommittedIndex(&ap, rh) + + select { + case r.applyc <- ap: // 将已提交日志应用到状态机 + case <-r.stopped: + return + } + + // 如果是Leader发送消息给Follower + if islead { + // 一旦这里收到rd raft 就会调用acceptReady 将 rn.raft.msgs 置空 + r.transport.Send(r.processMessages(rd.Messages)) + } + + // 如果有snapshot + if !raft.IsEmptySnap(rd.Snapshot) { + // gofail: var raftBeforeSaveSnap struct{} + if err := r.storage.SaveSnap(rd.Snapshot); err != nil { + r.lg.Fatal("failed to save Raft snapshot", zap.Error(err)) + } + // gofail: var raftAfterSaveSnap struct{} + } + + // 将hardState和日志条目保存到WAL中 + if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { + r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) + } + if !raft.IsEmptyHardState(rd.HardState) { + } + // gofail: var raftAfterSave struct{} + + if !raft.IsEmptySnap(rd.Snapshot) { + // Force WAL to fsync its hard state before Release() releases + // old data from the WAL. Otherwise could get an error like: + // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost? + // See https://github.com/etcd-io/etcd/issues/10219 for more details. + if err := r.storage.Sync(); err != nil { // 强制wal日志落盘 + r.lg.Fatal("failed to sync Raft snapshot", zap.Error(err)) + } + + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + + // gofail: var raftBeforeApplySnap struct{} + r.raftStorage.ApplySnapshot(rd.Snapshot) // 从持久化的内存存储中恢复出快照 + r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index)) + // gofail: var raftAfterApplySnap struct{} + + if err := r.storage.Release(rd.Snapshot); err != nil { + r.lg.Fatal("failed to release Raft wal", zap.Error(err)) + } + // gofail: var raftAfterWALRelease struct{} + } + + r.raftStorage.Append(rd.Entries) // 从持久化的内存存储中恢复出日志 + + if !islead { + // 对消息封装成传输协议要求的格式,还会做超时控制 + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to backend applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to backend applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to backend in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + // 将响应数据返回给对端 + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} + } + // 更新raft模块的applied index和将日志从unstable转到stable中 + // 这里需要注意的是,在将已提交日志条目应用到状态机的操作是异步完成的,在Apply完成后,会将结果写到客户端调用进来时注册的channel中.这样一次完整的写操作就完成了. + r.Advance() + case <-r.stopped: + return + } + } + }() +} + +func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { + var ci uint64 + if len(ap.entries) != 0 { + ci = ap.entries[len(ap.entries)-1].Index + } + if ap.snapshot.Metadata.Index > ci { + ci = ap.snapshot.Metadata.Index + } + if ci != 0 { + rh.updateCommittedIndex(ci) + } +} + +// 对消息封装成传输协议要求的格式,还会做超时控制 +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { + sentAppResp := false + for i := len(ms) - 1; i >= 0; i-- { + if r.isIDRemoved(ms[i].To) { + ms[i].To = 0 + } + + if ms[i].Type == raftpb.MsgAppResp { + if sentAppResp { + ms[i].To = 0 + } else { + sentAppResp = true + } + } + + if ms[i].Type == raftpb.MsgSnap { + // There are two separate data store: the store for v2, and the KV for v3. + // The msgSnap only contains the most recent snapshot of store without KV. + // So we need to redirect the msgSnap to etcd etcd main loop for merging in the + // current store snapshot and KV snapshot. + select { + case r.msgSnapC <- ms[i]: + default: + // drop msgSnap if the inflight chan if full. + } + ms[i].To = 0 + } + if ms[i].Type == raftpb.MsgHeartbeat { + ok, exceed := r.td.Observe(ms[i].To) + if !ok { + // TODO: limit request rate. + r.lg.Warn( + "leader未能按时发出心跳,时间太长,可能是因为磁盘慢而过载", + zap.String("to", fmt.Sprintf("%x", ms[i].To)), + zap.Duration("heartbeat-interval", r.heartbeat), + zap.Duration("expected-duration", 2*r.heartbeat), + zap.Duration("exceeded-duration", exceed), + ) + } + } + } + return ms +} + +func (r *raftNode) apply() chan apply { + return r.applyc +} + +func (r *raftNode) stop() { + r.stopped <- struct{}{} + <-r.done +} + +func (r *raftNode) onStop() { + r.Stop() + r.ticker.Stop() + r.transport.Stop() + if err := r.storage.Close(); err != nil { + r.lg.Panic("failed to close Raft storage", zap.Error(err)) + } + close(r.done) +} + +// for testing +func (r *raftNode) pauseSending() { + p := r.transport.(rafthttp.Pausable) + p.Pause() +} + +func (r *raftNode) resumeSending() { + p := r.transport.(rafthttp.Pausable) + p.Resume() +} + +// advanceTicks advances ticks of Raft node. +// This can backend used for fast-forwarding election +// ticks in multi data-center deployments, thus +// speeding up election process. +func (r *raftNode) advanceTicks(ticks int) { + for i := 0; i < ticks; i++ { + r.tick() + } +} + +// Demo 凸(艹皿艹 ) 明明没有实现这个方法啊 +func (r *raftNode) Demo() { + _ = r.raftNodeConfig.RaftNodeInterFace + // 两层匿名结构体,该字段是个接口 + _ = r.Step + // var _ raft.RaftNodeInterFace = raftNode{} +} diff --git a/etcd/etcdserver/over_v3service_cluster.go b/etcd/etcdserver/over_v3service_cluster.go new file mode 100644 index 00000000000..bd5897bef22 --- /dev/null +++ b/etcd/etcdserver/over_v3service_cluster.go @@ -0,0 +1,189 @@ +package etcdserver + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" +) + +// LinearizableReadNotify 一致性读 +func (s *EtcdServer) LinearizableReadNotify(ctx context.Context) error { + return s.linearizeReadNotify(ctx) +} + +// AddMember ok +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + b, err := json.Marshal(memb) + if err != nil { + return nil, err + } + + lg := s.Logger() + // 默认情况下,StrictReconfigCheck是启用的;拒绝不健康的新成员. + if !s.Cfg.StrictReconfigCheck { + } else { + // 添加投票成员时保护法定人数 + if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() { + lg.Warn("拒绝成员添加申请;健康成员个数不足", zap.String("local-member-id", s.ID().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrNotEnoughStartedMembers), + ) + return nil, ErrNotEnoughStartedMembers + } + // 一个心跳间隔之前,是否与所有节点建立了链接 + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) { + lg.Warn( + "拒绝成员添加请求;本地成员尚未连接到所有对等体,请重新配置中断活动仲裁", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrUnhealthy), + ) + return nil, ErrUnhealthy + } + } + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeAddNode, + NodeID: uint64(memb.ID), + Context: string(b), + } + + if memb.IsLearner { + cc.Type = raftpb.ConfChangeAddLearnerNode + } + + return s.configureAndSendRaft(ctx, cc) +} + +// RemoveMember ok +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + if err := s.mayRemoveMember(types.ID(id)); err != nil { + return nil, err + } + + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + return s.configureAndSendRaft(ctx, cc) +} + +// UpdateMember ok +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + b, merr := json.Marshal(memb) + if merr != nil { + return nil, merr + } + + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeUpdateNode, + NodeID: uint64(memb.ID), + Context: string(b), + } + return s.configureAndSendRaft(ctx, cc) +} + +// PromoteMember 将learner节点提升为voter +func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + // 只有raft leader有信息,知道learner是否准备好. + resp, err := s.promoteMember(ctx, id) // raft已经同步消息了 + if err == nil { + return resp, nil + } + if err != ErrNotLeader { + return resp, err + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + // 转发到leader + for cctx.Err() == nil { + leader, err := s.waitLeader(cctx) + if err != nil { + return nil, err + } + for _, url := range leader.PeerURLs { + resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt) + if err == nil { + return resp, nil + } + if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { + return nil, err + } + } + } + + if cctx.Err() == context.DeadlineExceeded { + return nil, ErrTimeout + } + return nil, ErrCanceled +} + +// promoteMember +func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + if err := s.mayPromoteMember(types.ID(id)); err != nil { + return nil, err + } + + promoteChangeContext := membership.ConfigChangeContext{ + Member: membership.Member{ + ID: types.ID(id), + }, + IsPromote: true, + } + + b, err := json.Marshal(promoteChangeContext) + if err != nil { + return nil, err + } + + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeAddNode, + NodeID: id, + Context: string(b), + } + + return s.configureAndSendRaft(ctx, cc) +} + +// OK +func (s *EtcdServer) mayPromoteMember(id types.ID) error { + lg := s.Logger() + err := s.isLearnerReady(uint64(id)) // 检查learner的同步的数据有没有打到90% + if err != nil { + return err + } + + if !s.Cfg.StrictReconfigCheck { // 严格配置变更检查 + return nil + } + if !s.cluster.IsReadyToPromoteMember(uint64(id)) { + lg.Warn("拒绝成员提升申请;健康成员个数不足", zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove-id", id.String()), + zap.Error(ErrNotEnoughStartedMembers), + ) + return ErrNotEnoughStartedMembers + } + + return nil +} diff --git a/etcd/etcdserver/over_v3service_kv.go b/etcd/etcdserver/over_v3service_kv.go new file mode 100644 index 00000000000..2e98ace943f --- /dev/null +++ b/etcd/etcdserver/over_v3service_kv.go @@ -0,0 +1,149 @@ +package etcdserver + +import ( + "context" + "time" + + "github.com/ls-2018/etcd_cn/etcd/auth" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type RaftKV interface { + Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) + Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) + DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) + Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) +} + +func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + if isTxnReadonly(r) { + trace := traceutil.New("transaction", s.Logger(), traceutil.Field{Key: "read_only", Value: true}) + ctx = context.WithValue(ctx, traceutil.TraceKey, trace) + if !isTxnSerializable(r) { + err := s.linearizeReadNotify(ctx) + trace.Step("在线性读之前,保持raft节点间的一致性") + if err != nil { + return nil, err + } + } + var resp *pb.TxnResponse + var err error + chk := func(ai *auth.AuthInfo) error { + return checkTxnAuth(s.authStore, ai, r) + } + + get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + return nil, serr + } + return resp, err + } + + ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + if err != nil { + return nil, err + } + return resp.(*pb.TxnResponse), nil +} + +func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + if err != nil { + return nil, err + } + return resp.(*pb.DeleteRangeResponse), nil +} + +// Compact 压缩kv历史版本 +func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + startTime := time.Now() + result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) + trace := traceutil.TODO() + if result != nil && result.trace != nil { + trace = result.trace + applyStart := result.trace.GetStartTime() + result.trace.SetStartTime(startTime) + trace.InsertStep(0, applyStart, "处理raft请求") + } + if r.Physical && result != nil && result.physc != nil { + <-result.physc + // 压实工作已经完成,删除了键;现在哈希已经解决了,但数据不一定被提交.如果出现崩溃, + // 如果压实工作恢复,哈希值可能会恢复到压实完成前的哈希值.强制完成的压实到 提交,这样它就不会在崩溃后恢复. + s.backend.ForceCommit() + trace.Step("物理压实") + } + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + resp := result.resp.(*pb.CompactionResponse) + if resp == nil { + resp = &pb.CompactionResponse{} + } + if resp.Header == nil { + resp.Header = &pb.ResponseHeader{} + } + resp.Header.Revision = s.kv.Rev() + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) + return resp, nil +} + +// RaftRequest myself test +func (s *EtcdServer) RaftRequest(ctx context.Context, r pb.InternalRaftRequest) { + s.raftRequest(ctx, r) +} + +func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.New("range", s.Logger(), traceutil.Field{Key: "range_begin", Value: string(r.Key)}, traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)}) + ctx = context.WithValue(ctx, traceutil.TraceKey, trace) // trace + var resp *pb.RangeResponse + var err error + defer func(start time.Time) { + if resp != nil { + trace.AddField( + traceutil.Field{Key: "response_count", Value: len(resp.Kvs)}, + traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}, + ) + } + }(time.Now()) + // 如果需要线性一致性读,执行 linearizableReadNotify + // 此处将会一直阻塞直到 apply index >= read index + if !r.Serializable { + err = s.linearizeReadNotify(ctx) // 发准备信号,并等待结果 + trace.Step("在线性化读数之前,raft节点之间的一致.") + if err != nil { + return nil, err + } + } + // serializable read 会直接读取当前节点的数据返回给客户端,它并不能保证返回给客户端的数据是最新的 + chk := func(ai *auth.AuthInfo) error { + return s.authStore.IsRangePermitted(ai, []byte(r.Key), []byte(r.RangeEnd)) // health,nil + } + + get := func() { + _ = applierV3backend{} + // 执行到这里说明读请求的 apply index >= read index + // 可以安全地读 bbolt 进行 read 操作 + resp, err = s.applyV3Base.Range(ctx, nil, r) + } + if serr := s.doSerialize(ctx, chk, get); serr != nil { + err = serr + return nil, err + } + return resp, err +} + +// Put OK +func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) + if err != nil { + return nil, err + } + return resp.(*pb.PutResponse), nil +} diff --git a/etcd/etcdserver/over_v3service_lease.go b/etcd/etcdserver/over_v3service_lease.go new file mode 100644 index 00000000000..bc8ad1705d4 --- /dev/null +++ b/etcd/etcdserver/over_v3service_lease.go @@ -0,0 +1,141 @@ +package etcdserver + +import ( + "context" + "fmt" + "time" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +// 租约 续租 +// 检索租约信息 +// 显示所有存在的租约 + +type Lessor interface { + LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) // 创建租约 + LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) // 移除租约 + LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) // 租约 续租 + LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) // 检索租约信息. + LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) // 显示所有租约信息 +} + +// LeaseGrant 创建租约 +func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + // 没有提供租约ID,自己生成一个 + for r.ID == int64(lease.NoLease) { + // 只使用正的int64 id + r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) + } + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + fmt.Println("LeaseGrant--->:", resp) + + if err != nil { + return nil, err + } + return resp.(*pb.LeaseGrantResponse), nil +} + +// LeaseRevoke 移除租约 +func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + if err != nil { + return nil, err + } + return resp.(*pb.LeaseRevokeResponse), nil +} + +// LeaseRenew 租约 续租 +func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { + ttl, err := s.lessor.Renew(id) // 已经向主要出租人(领导人)提出请求 + if err == nil { // + return ttl, nil + } + if err != lease.ErrNotPrimary { + return -1, err + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // renew不通过raft;手动转发给leader + for cctx.Err() == nil && err != nil { + leader, lerr := s.waitLeader(cctx) + if lerr != nil { + return -1, lerr + } + for _, url := range leader.PeerURLs { + lurl := url + leasehttp.LeasePrefix + ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) + if err == nil || err == lease.ErrLeaseNotFound { + return ttl, err + } + } + // Throttle in case of e.g. connection problems. + time.Sleep(50 * time.Millisecond) + } + + if cctx.Err() == context.DeadlineExceeded { + return -1, ErrTimeout + } + return -1, ErrCanceled +} + +// LeaseTimeToLive 检索租约信息 +func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + if s.Leader() == s.ID() { + le := s.lessor.Lookup(lease.LeaseID(r.ID)) + if le == nil { + return nil, lease.ErrLeaseNotFound + } + resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} + if r.Keys { + ks := le.Keys() + kbs := make([][]byte, len(ks)) + for i := range ks { + kbs[i] = []byte(ks[i]) + } + resp.Keys = kbs + } + return resp, nil + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + + // 转发到leader + for cctx.Err() == nil { + leader, err := s.waitLeader(cctx) + if err != nil { + return nil, err + } + for _, url := range leader.PeerURLs { + // + lurl := url + leasehttp.LeaseInternalPrefix // /leases/internal + resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) + if err == nil { + return resp.LeaseTimeToLiveResponse, nil + } + if err == lease.ErrLeaseNotFound { + return nil, err + } + } + } + + if cctx.Err() == context.DeadlineExceeded { + return nil, ErrTimeout + } + return nil, ErrCanceled +} + +// LeaseLeases 显示所有租约信息 +func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { + ls := s.lessor.Leases() // 获取当前节点上的所有租约 + lss := make([]*pb.LeaseStatus, len(ls)) + for i := range ls { + lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)} + } + return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil +} diff --git a/etcd/etcdserver/server.go b/etcd/etcdserver/server.go new file mode 100644 index 00000000000..da96cb54a5c --- /dev/null +++ b/etcd/etcdserver/server.go @@ -0,0 +1,2229 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "encoding/json" + "expvar" + "fmt" + "math" + "math/rand" + "net/http" + "os" + "path" + "regexp" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/coreos/go-semver/semver" + humanize "github.com/dustin/go-humanize" + "github.com/ls-2018/etcd_cn/etcd/config" + "go.uber.org/zap" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2discovery" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3alarm" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3compactor" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex" + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/idutil" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/pkg/runtime" + "github.com/ls-2018/etcd_cn/pkg/schedule" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + "github.com/ls-2018/etcd_cn/pkg/wait" + "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +const ( + DefaultSnapshotCount = 100000 + + // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower + // to catch-up after compacting the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + DefaultSnapshotCatchUpEntries uint64 = 5000 + + StoreClusterPrefix = "/0" + StoreKeysPrefix = "/1" + + // HealthInterval is the minimum time the cluster should backend healthy + // before accepting add member requests. + HealthInterval = 5 * time.Second + + purgeFileInterval = 30 * time.Second + + // max number of in-flight snapshot messages etcdserver allows to have + // This number is more than enough for most clusters with 5 machines. + maxInFlightMsgSnap = 16 + + releaseDelayAfterSnapshot = 30 * time.Second + + // maxPendingRevokes is the maximum number of outstanding expired lease revocations. + maxPendingRevokes = 16 + + recommendedMaxRequestBytes = 10 * 1024 * 1024 // 10M + + readyPercent = 0.9 + + DowngradeEnabledPath = "/downgrade/enabled" +) + +var ( + // monitorVersionInterval should backend smaller than the timeout + // on the connection. Or we will not backend able to reuse the connection + // (since it will timeout). + monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second + + recommendedMaxRequestBytesString = humanize.Bytes(uint64(recommendedMaxRequestBytes)) + storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) +) + +func init() { + rand.Seed(time.Now().UnixNano()) + + expvar.Publish( + "file_descriptor_limit", + expvar.Func( + func() interface{} { + n, _ := runtime.FDLimit() + return n + }, + ), + ) +} + +type Response struct { + Term uint64 + Index uint64 + Event *v2store.Event + Watcher v2store.Watcher + Err error +} + +type ServerV2 interface { + Server + Leader() types.ID + // Do takes a V2 request and attempts to fulfill it, returning a Response. + Do(ctx context.Context, r pb.Request) (Response, error) + stats.Stats + ClientCertAuthEnabled() bool +} + +type ServerV3 interface { + Server + RaftStatusGetter +} + +func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } + +type Server interface { + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) // http 添加节点 + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) // http 移除节点 + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) // http 更新节点 + PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) // http 提升节点 + ClusterVersion() *semver.Version // + Cluster() api.Cluster // 返回内部集群cluster 结构体 + Alarms() []*pb.AlarmMember // + LeaderChangedNotify() <-chan struct{} // 领导者变更通知 + // 1. 当领导层发生变化时,返回的通道将被关闭. + // 2. 因此,每一个任期都需要获得新的通道. + // 3. 用户可能会因为使用这个API而失去一些连续的频道变化. +} + +// EtcdServer 整个etcd节点的功能的入口,包含etcd节点运行过程中需要的大部分成员. +type EtcdServer struct { + inflightSnapshots int64 // 当前正在发送的snapshot数量 + appliedIndex uint64 // 已经apply到状态机的日志index + committedIndex uint64 // 已经提交的日志index,也就是leader确认多数成员已经同步了的日志index + term uint64 + lead uint64 + consistIndex cindex.ConsistentIndexer // 已经持久化到kvstore的index + r raftNode // 重要的数据结果,存储了raft的状态机信息. + readych chan struct{} // 启动成功并注册了自己到cluster,关闭这个通道. + Cfg config.ServerConfig // 配置项 + lgMu *sync.RWMutex + lg *zap.Logger + w wait.Wait // 为了同步调用情况下让调用者阻塞等待调用结果的. + readMu sync.RWMutex // 下面3个结果都是为了实现linearizable 读使用的 + readwaitc chan struct{} // 通过向readwaitC发送一个空结构体来通知etcd服务器它正在等待读取 + readNotifier *notifier // 在没有错误时通知read goroutine 可以处理请求 + + stop chan struct{} // 停止通道 + stopping chan struct{} // 停止时关闭这个通道 + done chan struct{} // etcd的start函数中的循环退出,会关闭这个通道 + leaderChanged chan struct{} // leader变换后 通知linearizable read loop drop掉旧的读请求 + leaderChangedMu sync.RWMutex // + errorc chan error // 错误通道,用以传入不可恢复的错误,关闭raft状态机. + id types.ID // etcd实例id + attributes membership.Attributes // etcd实例属性 + cluster *membership.RaftCluster // 集群信息 + v2store v2store.Store // v2的kv存储 + snapshotter *snap.Snapshotter // 用以snapshot + applyV2 ApplierV2 // v2的applier,用于将commited index apply到raft状态机 + applyV3 applierV3 // v3的applier,用于将commited index apply到raft状态机 + applyV3Base applierV3 // 剥去了鉴权和配额功能的applyV3 + applyV3Internal applierV3Internal // v3的内部applier + applyWait wait.WaitTime // apply的等待队列,等待某个index的日志apply完成 + kv mvcc.WatchableKV // v3用的kv存储 + lessor lease.Lessor // v3用,作用是实现过期时间 + backendLock sync.Mutex // 守护后端存储的锁,改变后端存储和获取后端存储是使用 + backend backend.Backend // 后端存储 bolt.db + beHooks *backendHooks // 存储钩子 + authStore auth.AuthStore // 存储鉴权数据 + alarmStore *v3alarm.AlarmStore // 存储告警数据 + stats *stats.ServerStats // 当前节点状态 + lstats *stats.LeaderStats // leader状态 + SyncTicker *time.Ticker // v2用,实现ttl数据过期的 + compactor v3compactor.Compactor // 压缩数据的周期任务 + peerRt http.RoundTripper // 用于发送远程请求 + reqIDGen *idutil.Generator // 用于生成请求id + // wgMu blocks concurrent waitgroup mutation while etcd stopping + wgMu sync.RWMutex + // wg is used to wait for the goroutines that depends on the etcd state + // to exit when stopping the etcd. + wg sync.WaitGroup + ctx context.Context // 用于由etcd发起的请求这些请求可能需要在etcd关机时被后端取消. + cancel context.CancelFunc + leadTimeMu sync.RWMutex + leadElectedTime time.Time + firstCommitInTermMu sync.RWMutex + firstCommitInTermC chan struct{} // 任期内的第一次commit时创建的 + *AccessController +} + +// 后端存储钩子 +type backendHooks struct { + indexer cindex.ConsistentIndexer // 一致性存储的索引 + lg *zap.Logger + confState raftpb.ConfState // 集群当前的配置信息 + // first write changes it to 'dirty'. false by default, so + // not initialized `confState` is meaningless. + confStateDirty bool + confStateLock sync.Mutex +} + +func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) { + bh.indexer.UnsafeSave(tx) + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + if bh.confStateDirty { + membership.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) + // save bh.confState + bh.confStateDirty = false + } +} + +func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) { + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + bh.confState = *confState + bh.confStateDirty = true +} + +type Temp struct { + Bepath string + W *wal.WAL + N raft.RaftNodeInterFace + S *raft.MemoryStorage + ID types.ID + CL *membership.RaftCluster + Remotes []*membership.Member + Snapshot *raftpb.Snapshot + Prt http.RoundTripper + SS *snap.Snapshotter + ST v2store.Store + CI cindex.ConsistentIndexer + BeExist bool + BeHooks *backendHooks + BE backend.Backend +} + +func MySelfStartRaft(cfg config.ServerConfig) (temp *Temp, err error) { + temp = &Temp{} + temp.ST = v2store.New(StoreClusterPrefix, StoreKeysPrefix) // 创建了一个store结构体 /0 /1 + + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { // 10M + cfg.Logger.Warn( + "超过了建议的请求限度", + zap.Uint("max-request-bytes", cfg.MaxRequestBytes), + zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), + zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), + zap.String("recommended-request-size", recommendedMaxRequestBytesString), + ) + } + // 存在也可以 + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { + return nil, fmt.Errorf("无法访问数据目录: %v", terr) + } + + haveWAL := wal.Exist(cfg.WALDir()) // default.etcd/member/wal + // default.etcd/member/snap + if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { + cfg.Logger.Fatal( + "创建快照目录失败", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + // 移除格式匹配的文件 + if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { + return strings.HasPrefix(fileName, "tmp") + }); err != nil { + cfg.Logger.Error( + "删除快照目录下的临时文件", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + // 创建快照struct + temp.SS = snap.New(cfg.Logger, cfg.SnapDir()) + + temp.Bepath = cfg.BackendPath() // default.etcd/member/snap/db + temp.BeExist = fileutil.Exist(temp.Bepath) + + temp.CI = cindex.NewConsistentIndex(nil) // pointer + temp.BeHooks = &backendHooks{lg: cfg.Logger, indexer: temp.CI} + temp.BE = openBackend(cfg, temp.BeHooks) + temp.CI.SetBackend(temp.BE) + cindex.CreateMetaBucket(temp.BE.BatchTx()) + + // 启动时,判断要不要进行碎片整理 + if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { + err := maybeDefragBackend(cfg, temp.BE) + if err != nil { + return nil, err + } + } + + defer func() { + if err != nil { + temp.BE.Close() + } + }() + // 服务端的 + temp.Prt, err = rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) + if err != nil { + return nil, err + } + + switch { + case !haveWAL && !cfg.NewCluster: // false true 重新加入的成员 + if err = cfg.VerifyJoinExisting(); err != nil { + return nil, err + } + temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(temp.CL, cfg.Name), temp.Prt) + if gerr != nil { + return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) + } + if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, temp.CL, existingCluster); err != nil { + return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) + } + if !isCompatibleWithCluster(cfg.Logger, temp.CL, temp.CL.MemberByName(cfg.Name).ID, temp.Prt) { + return nil, fmt.Errorf("incompatible with current running cluster") + } + + temp.Remotes = existingCluster.Members() + temp.CL.SetID(types.ID(0), existingCluster.ID()) + temp.CL.SetStore(temp.ST) + temp.CL.SetBackend(temp.BE) + temp.ID, temp.N, temp.S, temp.W = startNode(cfg, temp.CL, nil) + temp.CL.SetID(temp.ID, existingCluster.ID()) + + case !haveWAL && cfg.NewCluster: // false true 初始新成员 + if err = cfg.VerifyBootstrap(); err != nil { // 验证peer 通信地址、--initial-advertise-peer-urls" and "--initial-cluster + return nil, err + } + // 创建RaftCluster + temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + if err != nil { + return nil, err + } + m := temp.CL.MemberByName(cfg.Name) // 返回本节点的信息 + if isMemberBootstrapped(cfg.Logger, temp.CL, cfg.Name, temp.Prt, cfg.BootstrapTimeoutEffective()) { + return nil, fmt.Errorf("成员 %s 已经引导过", m.ID) + } + // TODO 是否使用discovery 发现其他节点 + if cfg.ShouldDiscover() { + var str string + str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + if err != nil { + return nil, &DiscoveryError{Op: "join", Err: err} + } + var urlsmap types.URLsMap + urlsmap, err = types.NewURLsMap(str) + if err != nil { + return nil, err + } + if config.CheckDuplicateURL(urlsmap) { + return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) + } + if temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil { + return nil, err + } + } + temp.CL.SetStore(temp.ST) // 结构体 + temp.CL.SetBackend(temp.BE) + // 启动节点 + temp.ID, temp.N, temp.S, temp.W = startNode(cfg, temp.CL, temp.CL.MemberIDs()) // ✅✈️ 🚗🚴🏻😁 + temp.CL.SetID(temp.ID, temp.CL.ID()) + + case haveWAL: + if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { + return nil, fmt.Errorf("cannot write to member directory: %v", err) + } + + if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { + return nil, fmt.Errorf("cannot write to WAL directory: %v", err) + } + + if cfg.ShouldDiscover() { + cfg.Logger.Warn( + "discovery token is ignored since cluster already initialized; valid logs are found", + zap.String("wal-dir", cfg.WALDir()), + ) + } + + // Find a snapshot to start/restart a raft node + walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) + if err != nil { + return nil, err + } + // snapshot files can backend orphaned if etcd crashes after writing them but before writing the corresponding + // wal log entries + temp.Snapshot, err = temp.SS.LoadNewestAvailable(walSnaps) + if err != nil && err != snap.ErrNoSnapshot { + return nil, err + } + + if temp.Snapshot != nil { + if err = temp.ST.Recovery(temp.Snapshot.Data); err != nil { + cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) + } + + if err = assertNoV2StoreContent(cfg.Logger, temp.ST, cfg.V2Deprecation); err != nil { + cfg.Logger.Error("illegal v2store content", zap.Error(err)) + return nil, err + } + + cfg.Logger.Info( + "recovered v2 store from snapshot", + zap.Uint64("snapshot-index", temp.Snapshot.Metadata.Index), + zap.String("snapshot-size", humanize.Bytes(uint64(temp.Snapshot.Size()))), + ) + + if temp.BE, err = recoverSnapshotBackend(cfg, temp.BE, *temp.Snapshot, temp.BeExist, temp.BeHooks); err != nil { + cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) + } + // A snapshot db may have already been recovered, and the old db should have + // already been closed in this case, so we should set the backend again. + temp.CI.SetBackend(temp.BE) + s1, s2 := temp.BE.Size(), temp.BE.SizeInUse() + cfg.Logger.Info( + "recovered v3 backend from snapshot", + zap.Int64("backend-size-bytes", s1), + zap.String("backend-size", humanize.Bytes(uint64(s1))), + zap.Int64("backend-size-in-use-bytes", s2), + zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), + ) + } else { + cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") + } + + if !cfg.ForceNewCluster { + temp.ID, temp.CL, temp.N, temp.S, temp.W = restartNode(cfg, temp.Snapshot) + } else { + temp.ID, temp.CL, temp.N, temp.S, temp.W = restartAsStandaloneNode(cfg, temp.Snapshot) + } + + temp.CL.SetStore(temp.ST) + temp.CL.SetBackend(temp.BE) + temp.CL.Recover(api.UpdateCapability) + if temp.CL.Version() != nil && !temp.CL.Version().LessThan(semver.Version{Major: 3}) && !temp.BeExist { + os.RemoveAll(temp.Bepath) + return nil, fmt.Errorf("database file (%v) of the backend is missing", temp.Bepath) + } + + default: + return nil, fmt.Errorf("不支持的引导配置") + } + + if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("不能访问成员目录: %v", terr) + } + + return +} + +// NewServer 根据提供的配置创建一个新的EtcdServer.在EtcdServer的生命周期内,该配置被认为是静态的. +func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { + temp := &Temp{} + temp, err = MySelfStartRaft(cfg) // 逻辑时钟初始化 + serverStats := stats.NewServerStats(cfg.Name, temp.ID.String()) + leaderStats := stats.NewLeaderStats(cfg.Logger, temp.ID.String()) + + heartbeat := time.Duration(cfg.TickMs) * time.Millisecond + srv = &EtcdServer{ + readych: make(chan struct{}), + Cfg: cfg, + lgMu: new(sync.RWMutex), + lg: cfg.Logger, + errorc: make(chan error, 1), + v2store: temp.ST, + snapshotter: temp.SS, + r: *newRaftNode( + raftNodeConfig{ + lg: cfg.Logger, + isIDRemoved: func(id uint64) bool { return temp.CL.IsIDRemoved(types.ID(id)) }, + RaftNodeInterFace: temp.N, + heartbeat: heartbeat, + raftStorage: temp.S, + storage: NewStorage(temp.W, temp.SS), + }, + ), + id: temp.ID, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: temp.CL, + stats: serverStats, + lstats: leaderStats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: temp.Prt, + reqIDGen: idutil.NewGenerator(uint16(temp.ID), time.Now()), + AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, + consistIndex: temp.CI, + firstCommitInTermC: make(chan struct{}), + } + srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster) + + srv.backend = temp.BE + srv.beHooks = temp.BeHooks + // 可能为了确保发生leader选举时,lease不会过期,最小ttl应该比选举时间长,看代码 + minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat + // 默认的情况下应该是2s, + + // 始终在KV之前恢复出租人.当我们恢复mvcc.KV时,它将把钥匙重新连接到它的租约上.如果我们先恢复mvcc.KV,它将在恢复前把钥匙附加到错误的出租人上. + srv.lessor = lease.NewLessor(srv.Logger(), srv.backend, srv.cluster, lease.LessorConfig{ + MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), + CheckpointInterval: cfg.LeaseCheckpointInterval, + CheckpointPersist: cfg.LeaseCheckpointPersist, + ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(), + }) + + tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken, // 认证格式 simple、jwt + func(index uint64) <-chan struct{} { + return srv.applyWait.Wait(index) + }, + time.Duration(cfg.TokenTTL)*time.Second, + ) + if err != nil { + cfg.Logger.Warn("创建令牌提供程序失败", zap.Error(err)) + return nil, err + } + // watch | kv ... + srv.kv = mvcc.New(srv.Logger(), srv.backend, srv.lessor, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit}) + + kvindex := temp.CI.ConsistentIndex() + srv.lg.Debug("恢复consistentIndex", zap.Uint64("index", kvindex)) + if temp.BeExist { + // TODO: remove kvindex != 0 checking when we do not expect users to upgrade + // etcd from pre-3.0 release. + if temp.Snapshot != nil && kvindex < temp.Snapshot.Metadata.Index { + if kvindex != 0 { + return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", temp.Bepath, kvindex, temp.Snapshot.Metadata.Index) + } + cfg.Logger.Warn( + "consistent index was never saved", + zap.Uint64("snapshot-index", temp.Snapshot.Metadata.Index), + ) + } + } + + srv.authStore = auth.NewAuthStore(srv.Logger(), srv.backend, tp, int(cfg.BcryptCost)) // BcryptCost 为散列身份验证密码指定bcrypt算法的成本/强度默认10 + + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() + if num := cfg.AutoCompactionRetention; num != 0 { + srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv) + if err != nil { + return nil, err + } + srv.compactor.Run() + } + + srv.applyV3Base = srv.newApplierV3Backend() + srv.applyV3Internal = srv.newApplierV3Internal() + // 启动时重置所有警报 + if err = srv.restoreAlarms(); err != nil { + return nil, err + } + + if srv.Cfg.EnableLeaseCheckpoint { + // 通过设置checkpointer使能租期检查点功能. + srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) { + // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后, + // 更新内存数据结构 LeaseMap 的剩余 TTL 信息. + srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp}) + }) + } + + // TODO: move transport initialization near the definition of remote + tr := &rafthttp.Transport{ + Logger: cfg.Logger, + TLSInfo: cfg.PeerTLSInfo, + DialTimeout: cfg.PeerDialTimeout(), + ID: temp.ID, + URLs: cfg.PeerURLs, + ClusterID: temp.CL.ID(), + Raft: srv, + Snapshotter: temp.SS, + ServerStats: serverStats, + LeaderStats: leaderStats, + ErrorC: srv.errorc, + } + if err = tr.Start(); err != nil { + return nil, err + } + // add all remotes into transport + for _, m := range temp.Remotes { + if m.ID != temp.ID { + tr.AddRemote(m.ID, m.PeerURLs) + } + } + for _, m := range temp.CL.Members() { + if m.ID != temp.ID { + tr.AddPeer(m.ID, m.PeerURLs) + } + } + srv.r.transport = tr + + return srv, nil +} + +// assertNoV2StoreContent -> depending on the deprecation stage, warns or report an error +// if the v2store contains custom content. +func assertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { + metaOnly, err := membership.IsMetaStoreOnly(st) + if err != nil { + return err + } + if metaOnly { + return nil + } + if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { + return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) + } + lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") + return nil +} + +func (s *EtcdServer) Logger() *zap.Logger { + s.lgMu.RLock() + l := s.lg + s.lgMu.RUnlock() + return l +} + +func tickToDur(ticks int, tickMs uint) string { + return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond) +} + +func (s *EtcdServer) adjustTicks() { + lg := s.Logger() + clusterN := len(s.cluster.Members()) + + // single-node fresh start, or single-node recovers from snapshot + if clusterN == 1 { + ticks := s.Cfg.ElectionTicks - 1 + lg.Info( + "started as single-node; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), + ) + s.r.advanceTicks(ticks) + return + } + + if !s.Cfg.InitialElectionTickAdvance { + lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + return + } + lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + + // retry up to "rafthttp.ConnReadTimeout", which is 5-sec + // until peer connection reports; otherwise: + // 1. all connections failed, or + // 2. no active peers, or + // 3. restarted single-node with no snapshot + // then, do nothing, because advancing ticks would have no effect + waitTime := rafthttp.ConnReadTimeout + itv := 50 * time.Millisecond + for i := int64(0); i < int64(waitTime/itv); i++ { + select { + case <-time.After(itv): + case <-s.stopping: + return + } + + peerN := s.r.transport.ActivePeers() + if peerN > 1 { + // multi-node received peer connection reports + // adjust ticks, in case slow leader message receive + ticks := s.Cfg.ElectionTicks - 2 + + lg.Info( + "initialized peer connections; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), + zap.Int("active-remote-members", peerN), + ) + + s.r.advanceTicks(ticks) + return + } + } +} + +func (s *EtcdServer) Start() { + s.start() + s.GoAttach(func() { s.adjustTicks() }) + s.GoAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) + s.GoAttach(s.purgeFile) + s.GoAttach(s.monitorVersions) + s.GoAttach(s.linearizableReadLoop) + s.GoAttach(s.monitorKVHash) + s.GoAttach(s.monitorDowngrade) +} + +func (s *EtcdServer) start() { + lg := s.Logger() + + if s.Cfg.SnapshotCount == 0 { // 触发一次磁盘快照的提交事务的次数 + lg.Info("更新快照数量为默认值", + zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), // 触发一次磁盘快照的提交事务的次数 + zap.Uint64("updated-snapshot-count", DefaultSnapshotCount), + ) + s.Cfg.SnapshotCount = DefaultSnapshotCount // 触发一次磁盘快照的提交事务的次数 + } + if s.Cfg.SnapshotCatchUpEntries == 0 { + lg.Info("将快照追赶条目更新为默认条目", + zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries), + zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries), + ) + s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries + } + + s.w = wait.New() + s.applyWait = wait.NewTimeList() + s.done = make(chan struct{}) + s.stop = make(chan struct{}) + s.stopping = make(chan struct{}, 1) + s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readwaitc = make(chan struct{}, 1) + s.readNotifier = newNotifier() + s.leaderChanged = make(chan struct{}) + if s.ClusterVersion() != nil { + lg.Info("启动etcd", zap.String("local-member-id", s.ID().String()), + zap.String("local-etcd-version", version.Version), + zap.String("cluster-id", s.Cluster().ID().String()), + zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), + ) + } else { + lg.Info("启动etcd", zap.String("local-member-id", s.ID().String()), + zap.String("local-etcd-version", version.Version), zap.String("cluster-version", "to_be_decided")) + } + + go s.run() +} + +func (s *EtcdServer) purgeFile() { + lg := s.Logger() + var dberrc, serrc, werrc <-chan error + var dbdonec, sdonec, wdonec <-chan struct{} + if s.Cfg.MaxSnapFiles > 0 { + dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) + sdonec, serrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) + } + if s.Cfg.MaxWALFiles > 0 { + wdonec, werrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping) + } + + select { + case e := <-dberrc: + lg.Fatal("failed to purge snap db file", zap.Error(e)) + case e := <-serrc: + lg.Fatal("failed to purge snap file", zap.Error(e)) + case e := <-werrc: + lg.Fatal("failed to purge wal file", zap.Error(e)) + case <-s.stopping: + if dbdonec != nil { + <-dbdonec + } + if sdonec != nil { + <-sdonec + } + if wdonec != nil { + <-wdonec + } + return + } +} + +type ServerPeer interface { + ServerV2 + RaftHandler() http.Handler + LeaseHandler() http.Handler +} + +func (s *EtcdServer) RaftHandler() http.Handler { + return s.r.transport.Handler() +} + +type ServerPeerV2 interface { + ServerPeer + HashKVHandler() http.Handler + DowngradeEnabledHandler() http.Handler +} + +func (s *EtcdServer) ReportUnreachable(id uint64) { + s.r.ReportUnreachable(id) +} + +// ReportSnapshot reports snapshot sent status to the raft state machine, +// and clears the used snapshot from the snapshot store. +func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + s.r.ReportSnapshot(id, status) +} + +type etcdProgress struct { + confState raftpb.ConfState + snapi uint64 + appliedt uint64 + appliedi uint64 +} + +// raftReadyHandler contains a set of EtcdServer operations to backend called by raftNode, +// and helps decouple state machine logic from Raft algorithms. +// TODO: add a state machine interface to apply the commit entries and do snapshot/recover +type raftReadyHandler struct { + getLead func() (lead uint64) + updateLead func(lead uint64) + updateLeadership func(newLeader bool) + updateCommittedIndex func(uint64) +} + +func (s *EtcdServer) run() { + lg := s.Logger() + + sn, err := s.r.raftStorage.Snapshot() + if err != nil { + lg.Panic("从Raft存储获取快照失败", zap.Error(err)) + } + + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + + var ( + smu sync.RWMutex + syncC <-chan time.Time + ) + setSyncC := func(ch <-chan time.Time) { + smu.Lock() + syncC = ch + smu.Unlock() + } + getSyncC := func() (ch <-chan time.Time) { + smu.RLock() + ch = syncC + smu.RUnlock() + return + } + rh := &raftReadyHandler{ + getLead: func() (lead uint64) { return s.getLead() }, + updateLead: func(lead uint64) { s.setLead(lead) }, + updateLeadership: func(newLeader bool) { + if !s.isLeader() { + // 自己不是leader了 + if s.lessor != nil { + s.lessor.Demote() // 持久化所有租约 + } + if s.compactor != nil { + s.compactor.Pause() + } + setSyncC(nil) + } else { + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) + if s.compactor != nil { + s.compactor.Resume() + } + } + if newLeader { + s.leaderChangedMu.Lock() + lc := s.leaderChanged + s.leaderChanged = make(chan struct{}) + close(lc) + s.leaderChangedMu.Unlock() + } + // TODO: remove the nil checking + // current test utility does not provide the stats + if s.stats != nil { + s.stats.BecomeLeader() + } + }, + updateCommittedIndex: func(ci uint64) { + cci := s.getCommittedIndex() + if ci > cci { + s.setCommittedIndex(ci) + } + }, + } + s.r.start(rh) + + ep := etcdProgress{ + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, + } + + defer func() { + s.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping + close(s.stopping) + s.wgMu.Unlock() + s.cancel() + sched.Stop() + + // wait for gouroutines before closing raft so wal stays open + s.wg.Wait() + + s.SyncTicker.Stop() + + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines + // by adding a peer after raft stops the transport + s.r.stop() + + s.Cleanup() + + close(s.done) + }() + var expiredLeaseC <-chan []*lease.Lease // 返回一个用于接收过期租约的CHAN. + if s.lessor != nil { // v3用,作用是实现过期时间 + expiredLeaseC = s.lessor.ExpiredLeasesC() + } + + for { + select { + case ap := <-s.r.apply(): + // 集群启动时,会先apply两条消息 + // index1:EntryConfChange {"Type":0,"NodeID":10276657743932975437,"Context":"{\"id\":10276657743932975437,\"peerURLs\":[\"http://localhost:2380\"],\"name\":\"default\"}","ID":0} + // index2:EntryNormal nil 用于任期内第一次commit + // index3:EntryNormal {"ID":7587861549007417858,"Method":"PUT","Path":"/0/members/8e9e05c52164694d/attributes","Val":"{\"name\":\"default\",\"clientURLs\":[\"http://localhost:2379\"]}","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false} + // 读取 放入applyc的消息 + f := func(context.Context) { + s.applyAll(&ep, &ap) + } + sched.Schedule(f) + case leases := <-expiredLeaseC: + s.GoAttach(func() { + // 通过并行化增加过期租约删除过程的吞吐量 + c := make(chan struct{}, maxPendingRevokes) // 控制每一批 并发数为16 + for _, lease := range leases { + select { + case c <- struct{}{}: + case <-s.stopping: + return + } + lid := lease.ID + s.GoAttach(func() { + ctx := s.authStore.WithRoot(s.ctx) + _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + if lerr == nil { + } else { + lg.Warn("移除租约失败", zap.String("lease-id", fmt.Sprintf("%016x", lid)), zap.Error(lerr)) + } + <-c + }) + } + }) + case err := <-s.errorc: + lg.Warn("etcd error", zap.Error(err)) + lg.Warn("本机使用的data-dir必须移除") + return + case <-getSyncC(): + if s.v2store.HasTTLKeys() { + s.sync(s.Cfg.ReqTimeout()) + } + case <-s.stop: + return + } + } +} + +// Cleanup removes allocated objects by EtcdServer.NewServer in +// situation that EtcdServer::Start was not called (that takes care of cleanup). +func (s *EtcdServer) Cleanup() { + // kv, lessor and backend can backend nil if running without v3 enabled + // or running unit tests. + if s.lessor != nil { + s.lessor.Stop() + } + if s.kv != nil { + s.kv.Close() + } + if s.authStore != nil { + s.authStore.Close() + } + if s.backend != nil { + s.backend.Close() + } + if s.compactor != nil { + s.compactor.Stop() + } +} + +func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { + s.applySnapshot(ep, apply) // 从持久化的内存存储中恢复出快照 + s.applyEntries(ep, apply) + + s.applyWait.Trigger(ep.appliedi) + + // wait for the raft routine to finish the disk writes before triggering a + // snapshot. or applied index might backend greater than the last index in raft + // storage, since the raft routine might backend slower than apply routine. + <-apply.notifyc + + s.triggerSnapshot(ep) + select { + // snapshot requested via send() + case m := <-s.r.msgSnapC: + merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) + s.sendMergedSnap(merged) + default: + } +} + +func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { + if raft.IsEmptySnap(apply.snapshot) { + return + } + + lg := s.Logger() + lg.Info("开始应用快照", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + defer func() { + lg.Info("已应用快照", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + }() + + if apply.snapshot.Metadata.Index <= ep.appliedi { + lg.Panic("意外得到 来自过时索引的领导者快照", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + } + + // 等待raftnode持久化快找到硬盘上 + <-apply.notifyc + + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks) + if err != nil { + lg.Panic("failed to open snapshot backend", zap.Error(err)) + } + + // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. + // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. + if s.lessor != nil { + lg.Info("restoring lease store") + + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) }) + + lg.Info("restored lease store") + } + + lg.Info("restoring mvcc store") + + if err := s.kv.Restore(newbe); err != nil { + lg.Panic("failed to restore mvcc store", zap.Error(err)) + } + + s.consistIndex.SetBackend(newbe) + lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex())) + + // Closing old backend might block until all the txns + // on the backend are finished. + // We do not want to wait on closing the old backend. + s.backendLock.Lock() + oldbe := s.backend + go func() { + lg.Info("closing old backend file") + defer func() { + lg.Info("closed old backend file") + }() + if err := oldbe.Close(); err != nil { + lg.Panic("failed to close old backend", zap.Error(err)) + } + }() + + s.backend = newbe + s.backendLock.Unlock() + + lg.Info("restoring alarm store") + + if err := s.restoreAlarms(); err != nil { + lg.Panic("failed to restore alarm store", zap.Error(err)) + } + + lg.Info("restored alarm store") + + if s.authStore != nil { + lg.Info("restoring auth store") + + s.authStore.Recover(newbe) + + lg.Info("restored auth store") + } + + lg.Info("restoring v2 store") + if err := s.v2store.Recovery(apply.snapshot.Data); err != nil { + lg.Panic("failed to restore v2 store", zap.Error(err)) + } + + if err := assertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { + lg.Panic("illegal v2store content", zap.Error(err)) + } + + lg.Info("restored v2 store") + + s.cluster.SetBackend(newbe) + + lg.Info("restoring cluster configuration") + + s.cluster.Recover(api.UpdateCapability) + + lg.Info("restored cluster configuration") + lg.Info("removing old peers from network") + + // recover raft transport + s.r.transport.RemoveAllPeers() + + lg.Info("removed old peers from network") + lg.Info("adding peers from new cluster configuration") + + for _, m := range s.cluster.Members() { + if m.ID == s.ID() { + continue + } + s.r.transport.AddPeer(m.ID, m.PeerURLs) + } + + lg.Info("added peers from new cluster configuration") + + ep.appliedt = apply.snapshot.Metadata.Term + ep.appliedi = apply.snapshot.Metadata.Index + ep.snapi = ep.appliedi + ep.confState = apply.snapshot.Metadata.ConfState +} + +func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { + if len(apply.entries) == 0 { + return + } + firsti := apply.entries[0].Index + if firsti > ep.appliedi+1 { + lg := s.Logger() + lg.Panic("意外的 已提交索引", + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("first-committed-entry-index", firsti), + ) + } + var ents []raftpb.Entry + if ep.appliedi+1-firsti < uint64(len(apply.entries)) { + ents = apply.entries[ep.appliedi+1-firsti:] + } + if len(ents) == 0 { + return + } + var shouldstop bool + if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { + go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("")) + } +} + +func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { + if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount { // 触发一次磁盘快照的提交事务的次数 + return + } + + lg := s.Logger() + lg.Info("触发打快照", + zap.String("local-member-id", s.ID().String()), + zap.Uint64("local-member-applied-index", ep.appliedi), + zap.Uint64("local-member-snapshot-index", ep.snapi), + zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), + ) + + s.snapshot(ep.appliedi, ep.confState) + ep.snapi = ep.appliedi +} + +func (s *EtcdServer) hasMultipleVotingMembers() bool { + return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1 +} + +func (s *EtcdServer) isLeader() bool { + return uint64(s.ID()) == s.Lead() +} + +// MoveLeader leader转移 +func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { + if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { + return ErrBadLeaderTransferee + } + + now := time.Now() + interval := time.Duration(s.Cfg.TickMs) * time.Millisecond + + lg := s.Logger() + lg.Info( + "开始leader转移", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(lead).String()), + zap.String("transferee-member-id", types.ID(transferee).String()), + ) + + s.r.TransferLeadership(ctx, lead, transferee) // 开始leader转移 + for s.Lead() != transferee { + select { + case <-ctx.Done(): // time out + return ErrTimeoutLeaderTransfer + case <-time.After(interval): + } + } + + // 耗尽所有请求,或者驱逐掉所有就leader的消息 + lg.Info( + "leader转移完成", + zap.String("local-member-id", s.ID().String()), + zap.String("old-leader-member-id", types.ID(lead).String()), + zap.String("new-leader-member-id", types.ID(transferee).String()), + zap.Duration("took", time.Since(now)), + ) + return nil +} + +func (s *EtcdServer) TransferLeadership() error { + lg := s.Logger() + if !s.isLeader() { + lg.Info( + "skipped leadership transfer; local etcd is not leader", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + return nil + } + + if !s.hasMultipleVotingMembers() { + lg.Info( + "skipped leadership transfer for single voting member cluster", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + return nil + } + + transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) + if !ok { + return ErrUnhealthy + } + + tm := s.Cfg.ReqTimeout() + ctx, cancel := context.WithTimeout(s.ctx, tm) + err := s.MoveLeader(ctx, s.Lead(), uint64(transferee)) + cancel() + return err +} + +// HardStop 在不与集群中其他成员协调的情况下停止etcd. +func (s *EtcdServer) HardStop() { + select { + case s.stop <- struct{}{}: + case <-s.done: + return + } + <-s.done +} + +// Stop 优雅停止本节点, 如果是leader要等leader转移 +func (s *EtcdServer) Stop() { + lg := s.Logger() + if err := s.TransferLeadership(); err != nil { + lg.Warn("leader转移失败", zap.String("local-member-id", s.ID().String()), zap.Error(err)) + } + s.HardStop() +} + +// ReadyNotify 当etcd 准备好服务请求后,会关闭ready ch +func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } + +func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { + select { + case <-time.After(d): + case <-s.done: + } + select { + case s.errorc <- err: + default: + } +} + +// StopNotify 当etcd停止时、会往此channel发送 empty struct +// when the etcd is stopped. +func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } + +// StoppingNotify returns a channel that receives a empty struct +// when the etcd is being stopped. +func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping } + +func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } + +func (s *EtcdServer) LeaderStats() []byte { + lead := s.getLead() + if lead != uint64(s.id) { + return nil + } + return s.lstats.JSON() +} + +func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() } + +// 检查节点操作的权限 +func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { + _ = auth.NewAuthStore + if s.authStore == nil { + // 在普通的etcd进程中,s.authStore永远不会为零.这个分支是为了处理server_test.go中的情况 + return nil + } + + // 请注意,这个权限检查是在API层完成的,所以TOCTOU问题可能会在这样的时间表中引起: + // 更新用户A的会员资格------撤销A的根角色------在状态机层应用会员资格的改变 + // 然而,会员资格的改变和角色管理都需要根权限.所以管理员的谨慎操作可以防止这个问题. + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + + return s.AuthStore().IsAdminPermitted(authInfo) +} + +// 检查learner是否追上了leader +// 注意:如果在集群中没有找到成员,或者成员不是学习者,它将返回nil. +// 这两个条件将在后面的应用阶段之前进行后台检查. +func (s *EtcdServer) isLearnerReady(id uint64) error { + rs := s.raftStatus() + if rs.Progress == nil { + return ErrNotLeader + } + + var learnerMatch uint64 + isFound := false + leaderID := rs.ID + for memberID, progress := range rs.Progress { + if id == memberID { + learnerMatch = progress.Match + isFound = true + break + } + } + + if isFound { + leaderMatch := rs.Progress[leaderID].Match + // learner的进度还没有赶上领导者 + if float64(learnerMatch) < float64(leaderMatch)*readyPercent { + return ErrLearnerNotReady + } + } + return nil +} + +func (s *EtcdServer) mayRemoveMember(id types.ID) error { + if !s.Cfg.StrictReconfigCheck { // 严格配置变更检查 + return nil + } + + lg := s.Logger() + isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner + // no need to check quorum when removing non-voting member + if isLearner { + return nil + } + + if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) { + lg.Warn( + "rejecting member remove request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove-id", id.String()), + zap.Error(ErrNotEnoughStartedMembers), + ) + return ErrNotEnoughStartedMembers + } + + // downed member is safe to remove since it's not part of the active quorum + if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { + return nil + } + + // protect quorum if some members are down + m := s.cluster.VotingMembers() + active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) + if (active - 1) < 1+((len(m)-1)/2) { + lg.Warn( + "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove", id.String()), + zap.Int("active-peers", active), + zap.Error(ErrUnhealthy), + ) + return ErrUnhealthy + } + + return nil +} + +// FirstCommitInTermNotify +// 任期内第一次commit会往这个channel发个信号,这是新leader回答只读请求所必需的 +// Leader不能响应任何只读请求,只要线性语义是必需的 +func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { + s.firstCommitInTermMu.RLock() + defer s.firstCommitInTermMu.RUnlock() + return s.firstCommitInTermC +} + +type confChangeResponse struct { + membs []*membership.Member + err error +} + +// configureAndSendRaft 通过raft发送配置变更,然后等待后端应用到etcd.它将阻塞,直到更改执行或出现错误. +func (s *EtcdServer) configureAndSendRaft(ctx context.Context, cc raftpb.ConfChangeV1) ([]*membership.Member, error) { + lg := s.Logger() + cc.ID = s.reqIDGen.Next() + ch := s.w.Register(cc.ID) + + start := time.Now() + if err := s.r.ProposeConfChange(ctx, cc); err != nil { + s.w.Trigger(cc.ID, nil) + return nil, err + } + + select { + case x := <-ch: + if x == nil { + lg.Panic("配置失败") + } + resp := x.(*confChangeResponse) + lg.Info( + "通过raft应用配置更改", + zap.String("local-member-id", s.ID().String()), + zap.String("raft-conf-change", cc.Type.String()), + zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), + ) + return resp.membs, resp.err + + case <-ctx.Done(): + s.w.Trigger(cc.ID, nil) // GC wait + return nil, s.parseProposeCtxErr(ctx.Err(), start) + + case <-s.stopping: + return nil, ErrStopped + } +} + +// sync proposes a SYNC request and is non-blocking. +// This makes no guarantee that the request will backend proposed or performed. +// The request will backend canceled after the given timeout. +func (s *EtcdServer) sync(timeout time.Duration) { + req := pb.Request{ + Method: "SYNC", + ID: s.reqIDGen.Next(), + Time: time.Now().UnixNano(), + } + data := pbutil.MustMarshal(&req) + // There is no promise that node has leader when do SYNC request, + // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) + s.GoAttach(func() { + s.r.Propose(ctx, data) + cancel() + }) +} + +// publish registers etcd information into the cluster. The information +// is the JSON representation of this etcd's member struct, updated with the +// static clientURLs of the etcd. +// The function keeps attempting to register until it succeeds, +// or its etcd is stopped. +// +// Use v2 store to encode member attributes, and apply through Raft +// but does not go through v2 API endpoint, which means even with v2 +// client handler disabled (e.g. --enable-v2=false), cluster can still +// process publish requests through rafthttp +// TODO: Remove in 3.6 (start using publishV3) +func (s *EtcdServer) publish(timeout time.Duration) { + lg := s.Logger() + b, err := json.Marshal(s.attributes) + if err != nil { + lg.Panic("failed to marshal JSON", zap.Error(err)) + return + } + req := pb.Request{ + Method: "PUT", + Path: membership.MemberAttributesStorePath(s.id), + Val: string(b), + } + + for { + ctx, cancel := context.WithTimeout(s.ctx, timeout) + _, err := s.Do(ctx, req) + cancel() + switch err { + case nil: + close(s.readych) + lg.Info( + "published local member to cluster through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.String("cluster-id", s.cluster.ID().String()), + zap.Duration("publish-timeout", timeout), + ) + return + + case ErrStopped: + lg.Warn( + "stopped publish because etcd is stopped", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + return + + default: + lg.Warn( + "通过raft发布本机信息失败", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + } + } +} + +func (s *EtcdServer) sendMergedSnap(merged snap.Message) { + atomic.AddInt64(&s.inflightSnapshots, 1) + + lg := s.Logger() + fields := []zap.Field{ + zap.String("from", s.ID().String()), + zap.String("to", types.ID(merged.To).String()), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + } + + now := time.Now() + s.r.transport.SendSnapshot(merged) + lg.Info("sending merged snapshot", fields...) + + s.GoAttach(func() { + select { + case ok := <-merged.CloseNotify(): + // delay releasing inflight snapshot for another 30 seconds to + // block log compaction. + // If the follower still fails to catch up, it is probably just too slow + // to catch up. We cannot avoid the snapshot cycle anyway. + if ok { + select { + case <-time.After(releaseDelayAfterSnapshot): + case <-s.stopping: + } + } + + atomic.AddInt64(&s.inflightSnapshots, -1) + + lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...) + + case <-s.stopping: + lg.Warn("canceled sending merged snapshot; etcd stopping", fields...) + return + } + }) +} + +// apply 从raft 获取到committed ---> applying +func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { + // confState 当前快照中的 集群配置 + s.lg.Debug("开始应用日志", zap.Int("num-entries", len(es))) + for i := range es { + e := es[i] + s.lg.Debug("开始应用日志", zap.Uint64("index", e.Index), zap.Uint64("term", e.Term), zap.Stringer("type", e.Type)) + switch e.Type { + case raftpb.EntryNormal: + s.applyEntryNormal(&e) + s.setAppliedIndex(e.Index) + s.setTerm(e.Term) + + case raftpb.EntryConfChange: + shouldApplyV3 := membership.ApplyV2storeOnly // false + if e.Index > s.consistIndex.ConsistentIndex() { // 查找 bolt.db meta 库里的 consistent_index、term + s.consistIndex.SetConsistentIndex(e.Index, e.Term) // 更新内存里的 + shouldApplyV3 = membership.ApplyBoth // true + } + + var cc raftpb.ConfChangeV1 + _ = cc.Unmarshal + pbutil.MustUnmarshal(&cc, e.Data) + // ConfChangeAddNode {"id":10276657743932975437,"peerURLs":["http://localhost:2380"],"name":"default"} + removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3) + s.setAppliedIndex(e.Index) + s.setTerm(e.Term) + shouldStop = shouldStop || removedSelf + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + + default: + lg := s.Logger() + lg.Panic( + "未知的日志类型;必须是 EntryNormal 或 EntryConfChange", + zap.String("type", e.Type.String()), + ) + } + appliedi, appliedt = e.Index, e.Term + } + return appliedt, appliedi, shouldStop +} + +// TODO: non-blocking snapshot +func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { + clone := s.v2store.Clone() + // commit kv to write metadata (for example: consistent index) to disk. + // + // This guarantees that Backend's consistent_index is >= index of last snapshot. + // + // KV().commit() updates the consistent index in backend. + // All operations that update consistent index必须是called sequentially + // from applyAll function. + // So KV().Commit() cannot run in parallel with apply. It has to backend called outside + // the go routine created below. + s.KV().Commit() + + s.GoAttach(func() { + lg := s.Logger() + + d, err := clone.SaveNoCopy() + // TODO: current store will never fail to do a snapshot + // what should we do if the store might fail? + if err != nil { + lg.Panic("failed to save v2 store", zap.Error(err)) + } + snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) + if err != nil { + // the snapshot was done asynchronously with the progress of raft. + // raft might have already got a newer snapshot. + if err == raft.ErrSnapOutOfDate { + return + } + lg.Panic("failed to create snapshot", zap.Error(err)) + } + // SaveSnap saves the snapshot to file and appends the corresponding WAL entry. + if err = s.r.storage.SaveSnap(snap); err != nil { + lg.Panic("failed to save snapshot", zap.Error(err)) + } + if err = s.r.storage.Release(snap); err != nil { + lg.Panic("failed to release wal", zap.Error(err)) + } + + lg.Info( + "saved snapshot", + zap.Uint64("snapshot-index", snap.Metadata.Index), + ) + + // When sending a snapshot, etcd will pause compaction. + // After receives a snapshot, the slow follower needs to get all the entries right after + // the snapshot sent to catch up. If we do not pause compaction, the log entries right after + // the snapshot sent might already backend compacted. It happens when the snapshot takes long time + // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. + if atomic.LoadInt64(&s.inflightSnapshots) != 0 { + lg.Info("skip compaction since there is an inflight snapshot") + return + } + + // keep some in memory log entries for slow followers. + compacti := uint64(1) + if snapi > s.Cfg.SnapshotCatchUpEntries { + compacti = snapi - s.Cfg.SnapshotCatchUpEntries // 保留一定数量的日志,为了follower可以追赶 + } + + err = s.r.raftStorage.Compact(compacti) + if err != nil { + // the compaction was done asynchronously with the progress of raft. + // raft log might already been compact. + if err == raft.ErrCompacted { + return + } + lg.Panic("failed to compact", zap.Error(err)) + } + lg.Info( + "compacted Raft logs", + zap.Uint64("compact-index", compacti), + ) + }) +} + +// CutPeer drops messages to the specified peer. +func (s *EtcdServer) CutPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.CutPeer(id) + } +} + +// MendPeer recovers the message dropping behavior of the given peer. +func (s *EtcdServer) MendPeer(id types.ID) { + tr, ok := s.r.transport.(*rafthttp.Transport) + if ok { + tr.MendPeer(id) + } +} + +func (s *EtcdServer) PauseSending() { s.r.pauseSending() } + +func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } + +// monitorVersions checks the member's version every monitorVersionInterval. +// It updates the cluster version if all members agrees on a higher one. +// It prints out log if there is a member with a higher version than the +// local version. +func (s *EtcdServer) monitorVersions() { + for { + select { + case <-s.FirstCommitInTermNotify(): + case <-time.After(monitorVersionInterval): + case <-s.stopping: + return + } + + if s.Leader() != s.ID() { + continue + } + + v := decideClusterVersion(s.Logger(), getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) + if v != nil { + // only keep major.minor version for comparison + v = &semver.Version{ + Major: v.Major, + Minor: v.Minor, + } + } + + // if the current version is nil: + // 1. use the decided version if possible + // 2. or use the min cluster version + if s.cluster.Version() == nil { + verStr := version.MinClusterVersion + if v != nil { + verStr = v.String() + } + s.GoAttach(func() { s.updateClusterVersionV2(verStr) }) + continue + } + + if v != nil && membership.IsValidVersionChange(s.cluster.Version(), v) { + s.GoAttach(func() { s.updateClusterVersionV2(v.String()) }) + } + } +} + +func (s *EtcdServer) updateClusterVersionV2(ver string) { + lg := s.Logger() + if s.cluster.Version() == nil { + lg.Info("使用v2 API 设置初始集群版本", zap.String("cluster-version", version.Cluster(ver))) + } else { + lg.Info("使用v2 API 更新初始集群版本", zap.String("from", version.Cluster(s.cluster.Version().String())), zap.String("to", version.Cluster(ver))) + } + + req := pb.Request{ + Method: "PUT", + Path: membership.StoreClusterVersionKey(), // /0/version + Val: ver, + } + + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) + fmt.Println("start", time.Now()) + _, err := s.Do(ctx, req) + fmt.Println("end", time.Now()) + cancel() + + switch err { + case nil: + lg.Info("集群版本已更新", zap.String("cluster-version", version.Cluster(ver))) + return + + case ErrStopped: + lg.Warn("终止集群版本更新;etcd被停止了", zap.Error(err)) + return + + default: + lg.Warn("集群版本更新失败", zap.Error(err)) + } +} + +func (s *EtcdServer) monitorDowngrade() { + t := s.Cfg.DowngradeCheckTime + if t == 0 { + return + } + lg := s.Logger() + for { + select { + case <-time.After(t): + case <-s.stopping: + return + } + + if !s.isLeader() { + continue + } + + d := s.cluster.DowngradeInfo() + if !d.Enabled { + continue + } + + targetVersion := d.TargetVersion + v := semver.Must(semver.NewVersion(targetVersion)) + if isMatchedVersions(s.Logger(), v, getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) { + lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + if _, err := s.downgradeCancel(ctx); err != nil { + lg.Warn("failed to cancel downgrade", zap.Error(err)) + } + cancel() + } + } +} + +func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { + switch err { + case context.Canceled: + return ErrCanceled + + case context.DeadlineExceeded: + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() + prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) + if start.After(prevLeadLost) && start.Before(curLeadElected) { + return ErrTimeoutDueToLeaderFail + } + lead := types.ID(s.getLead()) + switch lead { + case types.ID(raft.None): + // 当前没有leader + case s.ID(): // leader是自己 + if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { // 检查是否与大多数节点建立连接 + return ErrTimeoutDueToConnectionLost + } + default: + // 检查是否自给定时间以后,与该节点建立连接 + if !isConnectedSince(s.r.transport, start, lead) { + return ErrTimeoutDueToConnectionLost + } + } + return ErrTimeout + + default: + return err + } +} + +func (s *EtcdServer) Backend() backend.Backend { + s.backendLock.Lock() + defer s.backendLock.Unlock() + return s.backend +} + +func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } + +// 启动时重置所有警报 +func (s *EtcdServer) restoreAlarms() error { + s.applyV3 = s.newApplierV3() + as, err := v3alarm.NewAlarmStore(s.lg, s) + if err != nil { + return err + } + s.alarmStore = as + // 警报只有这两种类型 + if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { + s.applyV3 = newApplierV3Capped(s.applyV3) + } + if len(as.Get(pb.AlarmType_CORRUPT)) > 0 { + s.applyV3 = newApplierV3Corrupt(s.applyV3) + } + return nil +} + +// ----------------------------------------- OVER -------------------------------------------------------------- + +// GoAttach 启动一个协程干活 +func (s *EtcdServer) GoAttach(f func()) { + s.wgMu.RLock() // stopping 关闭 是加锁操作 + defer s.wgMu.RUnlock() + select { + case <-s.stopping: + lg := s.Logger() + lg.Warn("etcd 已停止; 跳过 GoAttach") + return + default: + } + + // 现在可以安全添加因为等待组的等待还没有开始. + s.wg.Add(1) + go func() { + defer s.wg.Done() + f() + }() +} + +// applyEntryNormal 将日志应用到raft内部 +func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { + shouldApplyV3 := membership.ApplyV2storeOnly + index := s.consistIndex.ConsistentIndex() + if e.Index > index { + // 设置当前entry的一致性索引 + s.consistIndex.SetConsistentIndex(e.Index, e.Term) + shouldApplyV3 = membership.ApplyBoth // v2store 、bolt.db 都存储数据 + } + s.lg.Debug("应用日志", zap.Uint64("consistent-index", index), + zap.Uint64("entry-index", e.Index), + zap.Bool("should-applyV3", bool(shouldApplyV3))) + + // 当leader确认时raft状态机可能会产生noop条目. 提前跳过它以避免将来出现一些潜在的错误. + if len(e.Data) == 0 { + s.notifyAboutFirstCommitInTerm() // 被任期内 第一次commit更新channel + // 当本地成员是leader 并完成了上一任期的所有条目时促进follower. + if s.isLeader() { + // 成为leader时,初始化租约管理器 + s.lessor.Promote(s.Cfg.ElectionTimeout()) + } + return + } + // e.Data 是由 pb.InternalRaftRequest、 序列化得到的 + var raftReq pb.InternalRaftRequest + if pbutil.MaybeUnmarshal(&raftReq, e.Data) { + } else { + // 如果不能不能反序列化 + // {"ID":7587861231285799684,"Method":"PUT","Path":"/0/version","Val":"3.5.0","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false} + var r pb.Request + rp := &r + pbutil.MustUnmarshal(rp, e.Data) + s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp), shouldApplyV3)) + fmt.Println("pbutil.MustUnmarshal return") + return + } + // 如果能 + //{"header":{"ID":7587861231285799685},"put":{"key":"YQ==","value":"Yg=="}} + if raftReq.V2 != nil { + req := (*RequestV2)(raftReq.V2) + s.w.Trigger(req.ID, s.applyV2Request(req, shouldApplyV3)) + return + } + + id := raftReq.ID + if id == 0 { + id = raftReq.Header.ID + } + + var ar *applyResult + needResult := s.w.IsRegistered(id) + if needResult || !noSideEffect(&raftReq) { + if !needResult && raftReq.Txn != nil { + removeNeedlessRangeReqs(raftReq.Txn) + } + ar = s.applyV3.Apply(&raftReq, shouldApplyV3) + } + + if !shouldApplyV3 { // 是否存储到bolt.db + return + } + + if ar == nil { + return + } + + if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + s.w.Trigger(id, ar) + return + } + + lg := s.Logger() + lg.Warn("消息超过了后端配额;发出警报", zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Error(ar.err), + ) + + s.GoAttach(func() { + a := &pb.AlarmRequest{ + MemberID: uint64(s.ID()), + Action: pb.AlarmRequest_ACTIVATE, // 日志应用时, 激活警报 + Alarm: pb.AlarmType_NOSPACE, + } + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + s.w.Trigger(id, ar) + }) +} + +// 通知关于任期内的第一次commit +func (s *EtcdServer) notifyAboutFirstCommitInTerm() { + newNotifier := make(chan struct{}) + s.firstCommitInTermMu.Lock() + notifierToClose := s.firstCommitInTermC + // 同于响应只读请求的 + s.firstCommitInTermC = newNotifier + s.firstCommitInTermMu.Unlock() + close(notifierToClose) +} + +// IsLearner 当前节点是不是 raft learner +func (s *EtcdServer) IsLearner() bool { + return s.cluster.IsLocalMemberLearner() +} + +// IsMemberExist returns if the member with the given id exists in cluster. +func (s *EtcdServer) IsMemberExist(id types.ID) bool { + return s.cluster.IsMemberExist(id) +} + +// raftStatus 返回当前节点的raft状态 +func (s *EtcdServer) raftStatus() raft.Status { + return s.r.RaftNodeInterFace.Status() +} + +// 碎片整理 +func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { + size := be.Size() + sizeInUse := be.SizeInUse() + freeableMemory := uint(size - sizeInUse) // 剩余 + thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 + if freeableMemory < thresholdBytes { + cfg.Logger.Info("跳过碎片整理", + zap.Int64("current-db-size-bytes", size), + zap.String("current-db-size", humanize.Bytes(uint64(size))), + zap.Int64("current-db-size-in-use-bytes", sizeInUse), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), + zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), + zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), + ) + return nil + } + return be.Defrag() +} + +// applyConfChange 将一个confChange作用到当前raft,它必须已经committed +func (s *EtcdServer) applyConfChange(cc raftpb.ConfChangeV1, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) { + if err := s.cluster.ValidateConfigurationChange(cc); err != nil { + cc.NodeID = raft.None // 这种,不会处理的 + s.r.ApplyConfChange(cc) + return false, err + } + + lg := s.Logger() + *confState = *s.r.ApplyConfChange(cc) // 生效之后的配置 + s.beHooks.SetConfState(confState) + switch cc.Type { + // 集群里记录的quorum.JointConfig与peer信息已经更新 + case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: + confChangeContext := new(membership.ConfigChangeContext) + if err := json.Unmarshal([]byte(cc.Context), confChangeContext); err != nil { + lg.Panic("发序列化成员失败", zap.Error(err)) + } + if cc.NodeID != uint64(confChangeContext.Member.ID) { + lg.Panic("得到不同的成员ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", confChangeContext.Member.ID.String()), + ) + } + if confChangeContext.IsPromote { // 是否角色提升 + s.cluster.PromoteMember(confChangeContext.Member.ID, shouldApplyV3) + } else { + s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3) // 添加节点 /0/members/8e9e05c52164694d + if confChangeContext.Member.ID != s.id { // 不是本实例 + s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs) + } + } + + case raftpb.ConfChangeRemoveNode: + id := types.ID(cc.NodeID) + s.cluster.RemoveMember(id, shouldApplyV3) // ✅ + if id == s.id { + return true, nil + } + s.r.transport.RemovePeer(id) + + case raftpb.ConfChangeUpdateNode: + m := new(membership.Member) + if err := json.Unmarshal([]byte(cc.Context), m); err != nil { + lg.Panic("反序列化失败", zap.Error(err)) + } + if cc.NodeID != uint64(m.ID) { + lg.Panic("得到了一个不同的ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", m.ID.String()), + ) + } + s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3) + if m.ID != s.id { + s.r.transport.UpdatePeer(m.ID, m.PeerURLs) + } + } + return false, nil +} + +// Alarms 获取所有的警报, +func (s *EtcdServer) Alarms() []*pb.AlarmMember { + return s.alarmStore.Get(pb.AlarmType_NONE) +} + +func (s *EtcdServer) setCommittedIndex(v uint64) { + atomic.StoreUint64(&s.committedIndex, v) +} + +func (s *EtcdServer) getCommittedIndex() uint64 { + return atomic.LoadUint64(&s.committedIndex) +} + +func (s *EtcdServer) setAppliedIndex(v uint64) { + atomic.StoreUint64(&s.appliedIndex, v) +} + +func (s *EtcdServer) getAppliedIndex() uint64 { + return atomic.LoadUint64(&s.appliedIndex) +} + +func (s *EtcdServer) setTerm(v uint64) { + atomic.StoreUint64(&s.term, v) +} + +func (s *EtcdServer) getTerm() uint64 { + return atomic.LoadUint64(&s.term) +} + +func (s *EtcdServer) setLead(v uint64) { + atomic.StoreUint64(&s.lead, v) +} + +func (s *EtcdServer) getLead() uint64 { + return atomic.LoadUint64(&s.lead) +} + +func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } + +func (s *EtcdServer) Cluster() api.Cluster { return s.cluster } + +func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { + s.leaderChangedMu.RLock() + defer s.leaderChangedMu.RUnlock() + return s.leaderChanged +} + +func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv } + +// Process 接收一个raft信息并将其应用于etcd的raft状态机,使用ctx的超时. +func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { + lg := s.Logger() + // 判断该消息的来源有没有被删除 + if s.cluster.IsIDRemoved(types.ID(m.From)) { + lg.Warn("拒绝来自被删除的成员的raft的信息", + zap.String("local-member-id", s.ID().String()), + zap.String("removed-member-id", types.ID(m.From).String()), + ) + return httptypes.NewHTTPError(http.StatusForbidden, "无法处理来自被删除成员的信息") + } + // 操作日志【复制、配置变更 req】 + if m.Type == raftpb.MsgApp { + s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) + } + var _ raft.RaftNodeInterFace = raftNode{} + //_ = raftNode{}.Step + return s.r.Step(ctx, m) +} + +func (s *EtcdServer) ClusterVersion() *semver.Version { + if s.cluster == nil { + return nil + } + return s.cluster.Version() +} diff --git a/server/etcdserver/snapshot_merge.go b/etcd/etcdserver/snapshot_merge.go similarity index 90% rename from server/etcdserver/snapshot_merge.go rename to etcd/etcdserver/snapshot_merge.go index 963ead5a7e2..0e2f7785893 100644 --- a/server/etcdserver/snapshot_merge.go +++ b/etcd/etcdserver/snapshot_merge.go @@ -17,9 +17,9 @@ package etcdserver import ( "io" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/raft/v3/raftpb" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/raft/raftpb" humanize "github.com/dustin/go-humanize" "go.uber.org/zap" @@ -39,7 +39,7 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi // commit kv to write metadata(for example: consistent index). s.KV().Commit() - dbsnap := s.be.Snapshot() + dbsnap := s.backend.Snapshot() // get a snapshot of v3 KV as readCloser rc := newSnapshotReaderCloser(lg, dbsnap) @@ -53,9 +53,7 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi }, Data: d, } - m.Snapshot = &snapshot - - verifySnapshotIndex(snapshot, s.consistIndex.ConsistentIndex()) + m.Snapshot = snapshot return *snap.NewMessage(m, rc, dbsnap.Size()) } diff --git a/etcd/etcdserver/storage.go b/etcd/etcdserver/storage.go new file mode 100644 index 00000000000..d8a2678008d --- /dev/null +++ b/etcd/etcdserver/storage.go @@ -0,0 +1,122 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "io" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" +) + +type Storage interface { + // Save function saves ents and state to the underlying stable storage. + // Save MUST block until st and ents are on stable storage. + Save(st raftpb.HardState, ents []raftpb.Entry) error + // SaveSnap function saves snapshot to the underlying stable storage. + SaveSnap(snap raftpb.Snapshot) error + // Close closes the Storage and performs finalization. + Close() error + // Release releases the locked wal files older than the provided snapshot. + Release(snap raftpb.Snapshot) error + // Sync WAL + Sync() error +} + +// 静态存储实际上是保存到磁盘中,Storage是对WAL和Snapshot的封装 +type storage struct { + *wal.WAL + *snap.Snapshotter +} + +func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { + return &storage{w, s} +} + +// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry. +func (st *storage) SaveSnap(snap raftpb.Snapshot) error { + walsnap := walpb.Snapshot{ + Index: snap.Metadata.Index, + Term: snap.Metadata.Term, + ConfState: &snap.Metadata.ConfState, + } + // save the snapshot file before writing the snapshot to the wal. + // This makes it possible for the snapshot file to become orphaned, but prevents + // a WAL snapshot entry from having no corresponding snapshot file. + err := st.Snapshotter.SaveSnap(snap) + if err != nil { + return err + } + // gofail: var raftBeforeWALSaveSnaphot struct{} + + return st.WAL.SaveSnapshot(walsnap) +} + +// Release releases resources older than the given snap and are no longer needed: +// - releases the locks to the wal files that are older than the provided wal for the given snap. +// - deletes any .snap.db files that are older than the given snap. +func (st *storage) Release(snap raftpb.Snapshot) error { + if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil { + return err + } + return st.Snapshotter.ReleaseSnapDBs(snap) +} + +// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear +// after the position of the given snap in the WAL. +// The snap must have been previously saved to the WAL, or this call will panic. +func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot, unsafeNoFsync bool) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { + var ( + err error + wmetadata []byte + ) + + repaired := false + for { + if w, err = wal.Open(lg, waldir, snap); err != nil { + lg.Fatal("failed to open WAL", zap.Error(err)) + } + if unsafeNoFsync { + w.SetUnsafeNoFsync() + } + if wmetadata, st, ents, err = w.ReadAll(); err != nil { + w.Close() + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || err != io.ErrUnexpectedEOF { + lg.Fatal("failed to read WAL, cannot backend repaired", zap.Error(err)) + } + if !wal.Repair(lg, waldir) { + lg.Fatal("failed to repair WAL", zap.Error(err)) + } else { + lg.Info("repaired WAL", zap.Error(err)) + repaired = true + } + continue + } + break + } + var metadata pb.Metadata + pbutil.MustUnmarshal(&metadata, wmetadata) + id = types.ID(metadata.NodeID) + cid = types.ID(metadata.ClusterID) + return w, id, cid, st, ents +} diff --git a/etcd/etcdserver/util.go b/etcd/etcdserver/util.go new file mode 100644 index 00000000000..ddeefb6789e --- /dev/null +++ b/etcd/etcdserver/util.go @@ -0,0 +1,100 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "reflect" + "time" + + "github.com/golang/protobuf/proto" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "go.uber.org/zap" +) + +// isConnectedToQuorumSince 检查本地成员是否在给定的时间后连接到集群的法定人数. +func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 // 2.5 +} + +// isConnectedSince 检查是否自给定时间以后,是否与该节点建立连接 +func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { + t := transport.ActiveSince(remote) + return !t.IsZero() && t.Before(since) +} + +// isConnectedFullySince 检查本机是否与所有成员都建立了链接,从给定的时间 +func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { + return numConnectedSince(transport, since, self, members) == len(members) +} + +// longestConnected chooses the member with longest active-since-time. +// It returns false, if nothing is active. +func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { + var longest types.ID + var oldest time.Time + for _, id := range membs { + tm := tp.ActiveSince(id) + if tm.IsZero() { // inactive + continue + } + + if oldest.IsZero() { // first longest candidate + oldest = tm + longest = id + } + + if tm.Before(oldest) { + oldest = tm + longest = id + } + } + if uint64(longest) == 0 { + return longest, false + } + return longest, true +} + +func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { + var resp string + if !isNil(respMsg) { + resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) + } + d := time.Since(now) + lg.Warn( + "failed to apply request", + zap.Duration("took", d), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) +} + +func isNil(msg proto.Message) bool { + return msg == nil || reflect.ValueOf(msg).IsNil() +} + +// numConnectedSince 计算自给定时间以来有多少成员与本地成员相连. +func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { + connectedNum := 0 + for _, m := range members { + if m.ID == self || isConnectedSince(transport, since, m.ID) { + connectedNum++ + } + } + return connectedNum +} diff --git a/server/etcdserver/v2_server.go b/etcd/etcdserver/v2_server.go similarity index 84% rename from server/etcdserver/v2_server.go rename to etcd/etcdserver/v2_server.go index 517d7ca7f70..89b007b21f0 100644 --- a/server/etcdserver/v2_server.go +++ b/etcd/etcdserver/v2_server.go @@ -16,12 +16,12 @@ package etcdserver import ( "context" + "fmt" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/errors" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" ) type RequestV2 pb.Request @@ -100,26 +100,27 @@ func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *Requ if err != nil { return Response{}, err } + /* 注册并且创建一个channel, 此处ID每次请求都会重新生成*/ ch := a.s.w.Register(r.ID) start := time.Now() a.s.r.Propose(ctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - + _ = a.s.applyEntryNormal select { case x := <-ch: resp := x.(Response) return resp, resp.Err case <-ctx.Done(): - proposalsFailed.Inc() - a.s.w.Trigger(r.ID, nil) // GC wait + a.s.w.Trigger(r.ID, nil) return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) - case <-a.s.stopping: + case x := <-a.s.stopping: + fmt.Println("<-a.s.stopping", x) } - return Response{}, errors.ErrStopped + return Response{}, ErrStopped } +// --------------- over ------------------------ + func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { r.ID = s.reqIDGen.Next() h := &reqV2HandlerEtcdServer{ @@ -135,11 +136,6 @@ func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { return resp, err } -// Handle interprets r and performs an operation on s.store according to r.Method -// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with -// Quorum == true, r will be sent through consensus before performing its -// respective operation. Do will block until an action is performed or there is -// an error. func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) { if r.Method == "GET" && r.Quorum { r.Method = "QGET" @@ -158,7 +154,7 @@ func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Respons case "HEAD": return v2api.Head(ctx, r) } - return Response{}, errors.ErrUnknownMethod + return Response{}, ErrUnknownMethod } func (r *RequestV2) String() string { diff --git a/etcd/etcdserver/v3_server.go b/etcd/etcdserver/v3_server.go new file mode 100644 index 00000000000..e9b66e747b3 --- /dev/null +++ b/etcd/etcdserver/v3_server.go @@ -0,0 +1,452 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/etcd/auth" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + + "github.com/gogo/protobuf/proto" + "go.uber.org/zap" +) + +const ( + // In the health case, there might backend a small gap (10s of entries) between + // the applied index and committed index. + // However, if the committed entries are very heavy to apply, the gap might grow. + // We should stop accepting new proposals if the gap growing to a certain point. + maxGapBetweenApplyAndCommitIndex = 5000 + readIndexRetryTime = 500 * time.Millisecond +) + +type Authenticator interface { + AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) + AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) + AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) + Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) + UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) + UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) + UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) + UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) + RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) + RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) + RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) + RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) + UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) + RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) +} + +func isTxnSerializable(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + return true +} + +func isTxnReadonly(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil { + return false + } + } + return true +} + +// Watchable returns a watchable interface attached to the etcdserver. +func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } + +func isStopped(err error) bool { + return err == raft.ErrStopped || err == ErrStopped +} + +func uint64ToBigEndianBytes(number uint64) []byte { + byteResult := make([]byte, 8) + binary.BigEndian.PutUint64(byteResult, number) + return byteResult +} + +func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + switch r.Action { + case pb.DowngradeRequest_VALIDATE: + return s.downgradeValidate(ctx, r.Version) + case pb.DowngradeRequest_ENABLE: + return s.downgradeEnable(ctx, r) + case pb.DowngradeRequest_CANCEL: + return s.downgradeCancel(ctx) + default: + return nil, ErrUnknownMethod + } +} + +func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.DowngradeResponse, error) { + resp := &pb.DowngradeResponse{} + + targetVersion, err := convertToClusterVersion(v) + if err != nil { + return nil, err + } + + // gets leaders commit index and wait for local store to finish applying that index + // to avoid using stale downgrade information + err = s.linearizeReadNotify(ctx) + if err != nil { + return nil, err + } + + cv := s.ClusterVersion() + if cv == nil { + return nil, ErrClusterVersionUnavailable + } + resp.Version = cv.String() + + allowedTargetVersion := membership.AllowedDowngradeVersion(cv) + if !targetVersion.Equal(*allowedTargetVersion) { + return nil, ErrInvalidDowngradeTargetVersion + } + + downgradeInfo := s.cluster.DowngradeInfo() + if downgradeInfo.Enabled { + // Todo: return the downgrade status along with the error msg + return nil, ErrDowngradeInProcess + } + return resp, nil +} + +func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + // validate downgrade capability before starting downgrade + v := r.Version + lg := s.Logger() + if resp, err := s.downgradeValidate(ctx, v); err != nil { + lg.Warn("reject downgrade request", zap.Error(err)) + return resp, err + } + targetVersion, err := convertToClusterVersion(v) + if err != nil { + lg.Warn("reject downgrade request", zap.Error(err)) + return nil, err + } + + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} + _, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + if err != nil { + lg.Warn("reject downgrade request", zap.Error(err)) + return nil, err + } + resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + return &resp, nil +} + +func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) { + // gets leaders commit index and wait for local store to finish applying that index + // to avoid using stale downgrade information + if err := s.linearizeReadNotify(ctx); err != nil { + return nil, err + } + + downgradeInfo := s.cluster.DowngradeInfo() + if !downgradeInfo.Enabled { + return nil, ErrNoInflightDowngrade + } + + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} + _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + if err != nil { + return nil, err + } + resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + return &resp, nil +} + +// ---------------------------------------- OVER ------------------------------------------------------------ + +// AuthInfoFromCtx 获取认证信息 +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) // 用户认证 + if authInfo != nil || err != nil { + return authInfo, err + } + if !s.Cfg.ClientCertAuthEnabled { // 是否验证客户端证书 + return nil, nil + } + authInfo = s.AuthStore().AuthInfoFromTLS(ctx) + return authInfo, nil +} + +// doSerialize 为序列化的请求“get"处理认证逻辑,并由“chk"检查权限.身份验证失败时返回一个非空错误. +func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { + trace := traceutil.Get(ctx) // 从上下文获取trace + ai, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return err + } + if ai == nil { + // chk期望非nil AuthInfo;使用空的凭证 + ai = &auth.AuthInfo{} + } + // 检查权限 + if err = chk(ai); err != nil { + return err + } + trace.Step("获取认证元数据") + // 获取序列化请求的响应 + get() + // 如果在处理请求时更新了身份验证存储,请检查过时的令牌修订情况. + if ai.Revision != 0 && ai.Revision != s.authStore.Revision() { + // 节点在 Apply 流程的时候,会判断 Raft 日志条目中的请求鉴权版本号是否小于当前鉴权版本号,如果小于就拒绝写入. + // 请求认证的版本小于当前节点认证的版本 + return auth.ErrAuthOldRevision + } + return nil +} + +// OK 对外提供的接口 +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + return s.raftRequestOnce(ctx, r) +} + +// ok +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) + if err != nil { + return nil, err + } + if result.err != nil { + return nil, result.err + } + // startTime + startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time) + if ok && result.trace != nil { + applyStart := result.trace.GetStartTime() + result.trace.SetStartTime(startTime) + result.trace.InsertStep(0, applyStart, "处理raft请求") + } + marshal, _ := json.Marshal(result.trace) + fmt.Println("trace--->", string(marshal)) + return result.resp, nil +} + +// 当客户端提交一条数据变更请求时 +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { + // 判断已提交未apply的记录是否超过限制 + ai := s.getAppliedIndex() + ci := s.getCommittedIndex() + if ci > ai+maxGapBetweenApplyAndCommitIndex { + return nil, ErrTooManyRequests + } + + r.Header = &pb.RequestHeader{ + ID: s.reqIDGen.Next(), // 生成一个requestID + } + + // 检查authinfo是否不是InternalAuthenticateRequest + if r.Authenticate == nil { + authInfo, err := s.AuthInfoFromCtx(ctx) + if err != nil { + return nil, err + } + if authInfo != nil { + r.Header.Username = authInfo.Username + r.Header.AuthRevision = authInfo.Revision + } + } + // 反序列化请求数据 + + data, err := r.Marshal() + if err != nil { + return nil, err + } + + if len(data) > int(s.Cfg.MaxRequestBytes) { + return nil, ErrRequestTooLarge + } + + id := r.ID // 0 + if id == 0 { + id = r.Header.ID + } + ch := s.w.Register(id) // 注册一个channel,等待处理完成 + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) // 设置请求超时 + // cctx, cancel := context.WithTimeout(ctx, time.Second*1000) // 设置请求超时 + defer cancel() + + start := time.Now() + _ = s.applyEntryNormal + err = s.r.Propose(cctx, data) // 调用raft模块的Propose处理请求,存入到了待发送队列 + if err != nil { + s.w.Trigger(id, nil) + return nil, err + } + + select { + // 等待收到apply结果返回给客户端 + case x := <-ch: + return x.(*applyResult), nil + case <-cctx.Done(): + s.w.Trigger(id, nil) + return nil, s.parseProposeCtxErr(cctx.Err(), start) + case <-s.done: + return nil, ErrStopped + } +} + +// Apply 入口函数,负责处理内部的消息 +func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { + ar := &applyResult{} + defer func(start time.Time) { + success := ar.err == nil || ar.err == mvcc.ErrCompacted + if !success { + warnOfFailedRequest(a.s.Logger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) + } + }(time.Now()) + + switch { + case r.ClusterVersionSet != nil: // 3.5.x 实现 + // 设置集群版本 + a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) + return nil + case r.ClusterMemberAttrSet != nil: + // 集群成员属性 + a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) + return nil + case r.DowngradeInfoSet != nil: + // 成员降级 + a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) + return nil + } + + if !shouldApplyV3 { + return nil + } + + switch { + case r.Range != nil: + ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range) // ✅ + case r.Put != nil: + ar.resp, ar.trace, ar.err = a.s.applyV3.Put(context.TODO(), nil, r.Put) // ✅ + case r.DeleteRange != nil: + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) // ✅ + case r.Txn != nil: + ar.resp, ar.trace, ar.err = a.s.applyV3.Txn(context.TODO(), r.Txn) + case r.Compaction != nil: + ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction) // ✅ 压缩kv 历史事件 + case r.LeaseGrant != nil: + ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) // ✅ 创建租约 + case r.LeaseRevoke != nil: + ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) // ✅ 删除租约 + case r.LeaseCheckpoint != nil: + // 避免 leader 变更时,导致的租约重置 + ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) // ✅ + case r.Alarm != nil: + ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) // ✅ + case r.Authenticate != nil: + ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) // ✅ + case r.AuthEnable != nil: + ar.resp, ar.err = a.s.applyV3.AuthEnable() // ✅ + case r.AuthDisable != nil: + ar.resp, ar.err = a.s.applyV3.AuthDisable() // ✅ + case r.AuthStatus != nil: + ar.resp, ar.err = a.s.applyV3.AuthStatus() // ✅ + case r.AuthUserAdd != nil: + ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) // ✅ + case r.AuthUserDelete != nil: + ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) // ✅ + case r.AuthUserChangePassword != nil: + ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) // ✅ + case r.AuthUserGrantRole != nil: + ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) // ✅ + case r.AuthUserGet != nil: + ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) // ✅ + case r.AuthUserRevokeRole != nil: + ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) // ✅ + case r.AuthUserList != nil: + ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) // ✅ + case r.AuthRoleAdd != nil: + ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) // ✅ + case r.AuthRoleGrantPermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) // ✅ + case r.AuthRoleGet != nil: + ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) // ✅ + case r.AuthRoleRevokePermission != nil: + ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) // ✅ + case r.AuthRoleDelete != nil: + ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) // ✅ + case r.AuthRoleList != nil: + ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) // ✅ + default: + a.s.lg.Panic("没有实现应用", zap.Stringer("raft-request", r)) + } + return ar +} + +// 等待leader就绪 +func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { + leader := s.cluster.Member(s.Leader()) + for leader == nil { + // 等待选举超时 + dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond + select { + case <-time.After(dur): + leader = s.cluster.Member(s.Leader()) + case <-s.stopping: + return nil, ErrStopped + case <-ctx.Done(): + return nil, ErrNoLeader + } + } + if leader == nil || len(leader.PeerURLs) == 0 { + return nil, ErrNoLeader + } + return leader, nil +} + +// Alarm 发送警报信息 +func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { + req := pb.InternalRaftRequest{Alarm: r} + // marshal, _ := json.Marshal(req) + // fmt.Println("marshal-->",string(marshal)) + resp, err := s.raftRequestOnce(ctx, req) + if err != nil { + return nil, err + } + return resp.(*pb.AlarmResponse), nil +} diff --git a/etcd/etcdserver/v3service_auth.go b/etcd/etcdserver/v3service_auth.go new file mode 100644 index 00000000000..83e62bb5ae2 --- /dev/null +++ b/etcd/etcdserver/v3service_auth.go @@ -0,0 +1,230 @@ +package etcdserver + +import ( + "context" + "encoding/base64" + + "github.com/golang/protobuf/proto" + "github.com/ls-2018/etcd_cn/etcd/auth" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "go.uber.org/zap" + "golang.org/x/crypto/bcrypt" + "google.golang.org/grpc" +) + +type AuthClient interface { + Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) + AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) + AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) + AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (*pb.AuthStatusResponse, error) + UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) + UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) + UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) + UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) + UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) + RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) + RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) + RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) + RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) + RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) +} + +func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthEnableResponse), nil +} + +func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthDisableResponse), nil +} + +func (s *EtcdServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthStatus: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthStatusResponse), nil +} + +func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + if err := s.linearizeReadNotify(ctx); err != nil { + return nil, err + } + + lg := s.Logger() + + var resp proto.Message + for { + checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) + if err != nil { + if err != auth.ErrAuthNotEnabled { + lg.Warn( + "invalid authentication was requested", + zap.String("user", r.Name), + zap.Error(err), + ) + } + return nil, err + } + + st, err := s.AuthStore().GenTokenPrefix() + if err != nil { + return nil, err + } + + // internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it. + // In addition, it will let a WAL entry not record password as a plain text. + internalReq := &pb.InternalAuthenticateRequest{ + Name: r.Name, + SimpleToken: st, + } + + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + if err != nil { + return nil, err + } + if checkedRevision == s.AuthStore().Revision() { + break + } + + lg.Info("revision when password checked became stale; retrying") + } + + return resp.(*pb.AuthenticateResponse), nil +} + +// ------------------------------------------- OVER ---------------------------------------------------------vv + +func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + if r.Options == nil || !r.Options.NoPassword { + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost()) + if err != nil { + return nil, err + } + r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword) + r.Password = "" + } + + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserAddResponse), nil +} + +func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserDeleteResponse), nil +} + +func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + if r.Password != "" { + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost()) + if err != nil { + return nil, err + } + r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword) + r.Password = "" + } + + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserChangePasswordResponse), nil +} + +func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGrantRoleResponse), nil +} + +func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserGetResponse), nil +} + +func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserListResponse), nil +} + +func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthUserRevokeRoleResponse), nil +} + +// ------------------------------------------- OVER ---------------------------------------------------------vv + +func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGrantPermissionResponse), nil +} + +func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleGetResponse), nil +} + +func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleListResponse), nil +} + +func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleRevokePermissionResponse), nil +} + +func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleAddResponse), nil +} diff --git a/server/etcdserver/zap_raft.go b/etcd/etcdserver/zap_raft.go similarity index 94% rename from server/etcdserver/zap_raft.go rename to etcd/etcdserver/zap_raft.go index 66dd3caad0d..69540c71189 100644 --- a/server/etcdserver/zap_raft.go +++ b/etcd/etcdserver/zap_raft.go @@ -17,7 +17,7 @@ package etcdserver import ( "errors" - "go.etcd.io/raft/v3" + "github.com/ls-2018/etcd_cn/raft" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -37,8 +37,7 @@ func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { // NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger". func NewRaftLoggerZap(lg *zap.Logger) raft.Logger { - skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1)) - return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()} + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} } // NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" diff --git a/etcd/lease/leasehttp/over_http.go b/etcd/lease/leasehttp/over_http.go new file mode 100644 index 00000000000..88da7bebe59 --- /dev/null +++ b/etcd/lease/leasehttp/over_http.go @@ -0,0 +1,242 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package leasehttp + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/lease/leasepb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/httputil" +) + +var ( + LeasePrefix = "/leases" + LeaseInternalPrefix = "/leases/internal" + applyTimeout = time.Second + ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out") +) + +func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler { + return &leaseHandler{l, waitch} +} + +type leaseHandler struct { + l lease.Lessor + waitch func() <-chan struct{} +} + +func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, "error reading body", http.StatusBadRequest) + return + } + + var v []byte + switch r.URL.Path { + case LeasePrefix: + lreq := pb.LeaseKeepAliveRequest{} + if uerr := lreq.Unmarshal(b); uerr != nil { + http.Error(w, "反序列失败", http.StatusBadRequest) + return + } + select { + case <-h.waitch(): + case <-time.After(applyTimeout): + http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) + return + } + ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID)) + if rerr != nil { + if rerr == lease.ErrLeaseNotFound { + http.Error(w, rerr.Error(), http.StatusNotFound) + return + } + + http.Error(w, rerr.Error(), http.StatusBadRequest) + return + } + // 填写ResponseHeader + resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl} + v, err = resp.Marshal() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + case LeaseInternalPrefix: + lreq := leasepb.LeaseInternalRequest{} + if lerr := lreq.Unmarshal(b); lerr != nil { + http.Error(w, "error unmarshalling request", http.StatusBadRequest) + return + } + select { + case <-h.waitch(): + case <-time.After(applyTimeout): + http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) + return + } + l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID)) + if l == nil { + http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound) + return + } + resp := &leasepb.LeaseInternalResponse{ + LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: lreq.LeaseTimeToLiveRequest.ID, + TTL: int64(l.Remaining().Seconds()), + GrantedTTL: l.TTL(), + }, + } + if lreq.LeaseTimeToLiveRequest.Keys { + ks := l.Keys() + kbs := make([][]byte, len(ks)) + for i := range ks { + kbs[i] = []byte(ks[i]) + } + resp.LeaseTimeToLiveResponse.Keys = kbs + } + + v, err = resp.Marshal() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + default: + http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/protobuf") + w.Write(v) +} + +func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) { + lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal() + if err != nil { + return -1, err + } + + cc := &http.Client{Transport: rt} + req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + if err != nil { + return -1, err + } + req.Header.Set("Content-Type", "application/protobuf") + req.Cancel = ctx.Done() + + resp, err := cc.Do(req) + if err != nil { + return -1, err + } + b, err := readResponse(resp) + if err != nil { + return -1, err + } + + if resp.StatusCode == http.StatusRequestTimeout { + return -1, ErrLeaseHTTPTimeout + } + + if resp.StatusCode == http.StatusNotFound { + return -1, lease.ErrLeaseNotFound + } + + if resp.StatusCode != http.StatusOK { + return -1, fmt.Errorf("lease: unknown error(%s)", string(b)) + } + + lresp := &pb.LeaseKeepAliveResponse{} + if err := lresp.Unmarshal(b); err != nil { + return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + } + if lresp.ID != int64(id) { + return -1, fmt.Errorf("lease: renew id mismatch") + } + return lresp.TTL, nil +} + +func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) { + // will post lreq protobuf to leader + lreq, err := (&leasepb.LeaseInternalRequest{ + LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{ + ID: int64(id), + Keys: keys, + }, + }).Marshal() + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/protobuf") + + req = req.WithContext(ctx) + + cc := &http.Client{Transport: rt} + var b []byte + // buffer errc channel so that errc don't block inside the go routinue + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + b, err = readResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrLeaseHTTPTimeout + } + if resp.StatusCode == http.StatusNotFound { + return nil, lease.ErrLeaseNotFound + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) + } + + lresp := &leasepb.LeaseInternalResponse{} + if err := lresp.Unmarshal(b); err != nil { + return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) + } + if lresp.LeaseTimeToLiveResponse.ID != int64(id) { + return nil, fmt.Errorf("lease: renew id mismatch") + } + return lresp, nil +} + +func readResponse(resp *http.Response) (b []byte, err error) { + b, err = ioutil.ReadAll(resp.Body) + httputil.GracefulClose(resp) + return +} diff --git a/etcd/lease/leasepb/lease.pb.go b/etcd/lease/leasepb/lease.pb.go new file mode 100644 index 00000000000..ca416b03ff9 --- /dev/null +++ b/etcd/lease/leasepb/lease.pb.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-gogo. +// source: lease.proto + +package leasepb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Lease struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` + RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"` +} + +func (m *Lease) Reset() { *m = Lease{} } +func (m *Lease) String() string { return proto.CompactTextString(m) } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_3dd57e402472b33a, []int{0} +} + +type LeaseInternalRequest struct { + LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } +func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalRequest) ProtoMessage() {} +func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3dd57e402472b33a, []int{1} +} + +type LeaseInternalResponse struct { + LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } +func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalResponse) ProtoMessage() {} +func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3dd57e402472b33a, []int{2} +} + +func init() { + proto.RegisterType((*Lease)(nil), "leasepb.Lease") + proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest") + proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse") +} + +func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) } + +var fileDescriptor_3dd57e402472b33a = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45, + 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, 0x2f, + 0x2a, 0x48, 0x86, 0x28, 0x50, 0xf2, 0xe5, 0x62, 0xf5, 0x01, 0x99, 0x20, 0xc4, 0xc7, 0xc5, 0xe4, + 0xe9, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0xc4, 0xe4, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x1c, + 0x12, 0xe2, 0x23, 0xc1, 0x04, 0x16, 0x00, 0x31, 0x85, 0x94, 0xb8, 0x78, 0x82, 0x52, 0x73, 0x13, + 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x41, 0x52, 0xcc, 0x60, 0x29, 0x14, 0x31, 0xa5, 0x12, 0x2e, 0x11, + 0xb0, 0x71, 0x9e, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, + 0x25, 0x42, 0x31, 0x5c, 0x62, 0x60, 0xf1, 0x90, 0xcc, 0xdc, 0xd4, 0x90, 0x7c, 0x9f, 0xcc, 0xb2, + 0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84, + 0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2, + 0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97, + 0x29, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e, + 0x5f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0x94, 0xb9, 0xae, 0x01, 0x00, 0x00, +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Lease) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LeaseInternalRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *LeaseInternalResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Lease) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +var ( + ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") +) diff --git a/server/lease/leasepb/lease.proto b/etcd/lease/leasepb/lease.proto similarity index 100% rename from server/lease/leasepb/lease.proto rename to etcd/lease/leasepb/lease.proto diff --git a/etcd/lease/over_lease_queue.go b/etcd/lease/over_lease_queue.go new file mode 100644 index 00000000000..38aa5b6829c --- /dev/null +++ b/etcd/lease/over_lease_queue.go @@ -0,0 +1,111 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import ( + "container/heap" + "time" +) + +type LeaseWithTime struct { + id LeaseID + time time.Time + index int +} + +type LeaseQueue []*LeaseWithTime + +func (pq LeaseQueue) Len() int { return len(pq) } + +func (pq LeaseQueue) Less(i, j int) bool { + return pq[i].time.Before(pq[j].time) +} + +func (pq LeaseQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *LeaseQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*LeaseWithTime) + item.index = n + *pq = append(*pq, item) +} + +func (pq *LeaseQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +var _ heap.Interface = &LeaseQueue{} + +// ExpiredNotifier 一个租约只能保存一个key,`Register`将更新相应的租约时间. +// 用于通知lessor移除过期租约的队列 +type ExpiredNotifier struct { + m map[LeaseID]*LeaseWithTime + queue LeaseQueue +} + +// 租约到期通知器 +func newLeaseExpiredNotifier() *ExpiredNotifier { + return &ExpiredNotifier{ + m: make(map[LeaseID]*LeaseWithTime), + queue: make(LeaseQueue, 0), + } +} + +// Init ok +func (mq *ExpiredNotifier) Init() { + heap.Init(&mq.queue) + mq.m = make(map[LeaseID]*LeaseWithTime) + for _, item := range mq.queue { + mq.m[item.id] = item + } +} + +// RegisterOrUpdate 注册或更新管理的租约 +func (mq *ExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) { + if old, ok := mq.m[item.id]; ok { + old.time = item.time // 过期时间 + heap.Fix(&mq.queue, old.index) // 当元素值,发生改变, Fix会重新调整顺序 + } else { + heap.Push(&mq.queue, item) // 创建 + mq.m[item.id] = item + } +} + +func (mq *ExpiredNotifier) Unregister() *LeaseWithTime { + item := heap.Pop(&mq.queue).(*LeaseWithTime) + delete(mq.m, item.id) + return item +} + +// Poll 获取第一个要快要过期的租约 +func (mq *ExpiredNotifier) Poll() *LeaseWithTime { + if mq.Len() == 0 { + return nil + } + return mq.queue[0] +} + +func (mq *ExpiredNotifier) Len() int { + return len(mq.m) +} diff --git a/etcd/lease/over_lessor.go b/etcd/lease/over_lessor.go new file mode 100644 index 00000000000..53f7fc7fda9 --- /dev/null +++ b/etcd/lease/over_lessor.go @@ -0,0 +1,866 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import ( + "container/heap" + "context" + "encoding/binary" + "errors" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/ls-2018/etcd_cn/etcd/lease/leasepb" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "go.uber.org/zap" +) + +const ( + NoLease = LeaseID(0) // 是一个特殊的LeaseID,表示没有租约. + MaxLeaseTTL = 9000000000 +) + +var v3_6 = semver.Version{Major: 3, Minor: 6} + +var ( + forever = time.Time{} + leaseRevokeRate = 1000 // 每秒撤销租约的最大数量;可为测试配置 + leaseCheckpointRate = 1000 // 每秒记录在共识日志中的最大租约快照数量;可对测试进行配置 + defaultLeaseCheckpointInterval = 5 * time.Minute // 租约快照的默认时间间隔 + maxLeaseCheckpointBatchSize = 1000 // 租约快照的最大数量,以批处理为一个单一的共识日志条目 + defaultExpiredleaseRetryInterval = 3 * time.Second // 检查过期租约是否被撤销的默认时间间隔. + ErrNotPrimary = errors.New("不是主 lessor") + ErrLeaseNotFound = errors.New("lease没有发现") + ErrLeaseExists = errors.New("lease已存在") + ErrLeaseTTLTooLarge = errors.New("过大的TTL") +) + +type TxnDelete interface { + DeleteRange(key, end []byte) (n, rev int64) + End() +} + +type RangeDeleter func() TxnDelete + +// Checkpointer 允许对租约剩余ttl的检查点到 wal日志.这里定义是为了避免与mvcc的循环依赖. +type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest) + +type LeaseID int64 + +// Lessor 创建、移除、更新租约 +type Lessor interface { + // SetRangeDeleter lets the lessor create TxnDeletes to the store. + // Lessor deletes the items in the revoked or expired lease by creating + // new TxnDeletes. + SetRangeDeleter(rd RangeDeleter) + SetCheckpointer(cp Checkpointer) + Grant(id LeaseID, ttl int64) (*Lease, error) // 创建一个制定了过期时间的租约 + Revoke(id LeaseID) error // 移除租约 + Checkpoint(id LeaseID, remainingTTL int64) error // 更新租约的剩余时间到其他节点 + Attach(id LeaseID, items []LeaseItem) error // + GetLease(item LeaseItem) LeaseID // 返回给定项目的LeaseID.如果没有找到租约,则返回NoLease值. + Detach(id LeaseID, items []LeaseItem) error // 将租约从key上移除 + Promote(extend time.Duration) // 推动lessor成为主lessor.主lessor管理租约的到期和续期.新晋升的lessor更新所有租约的ttl 以延长先前的ttl + Demote() // leader变更,触发 + Renew(id LeaseID) (int64, error) // 重新计算过期时间 + Lookup(id LeaseID) *Lease + Leases() []*Lease // 获取当前节点上的所有租约 + ExpiredLeasesC() <-chan []*Lease // 返回一个用于接收过期租约的CHAN. + Recover(b backend.Backend, rd RangeDeleter) + Stop() +} + +type lessor struct { + mu sync.RWMutex + demotec chan struct{} // 当lessor成为主时,会被设置.当被降级时,会被关闭 + leaseMap map[LeaseID]*Lease // 存储了所有的租约 + leaseExpiredNotifier *ExpiredNotifier // 租约到期管理 + leaseCheckpointHeap LeaseQueue // 记录检查点,租约ID + itemMap map[LeaseItem]LeaseID // key 关联到了哪个租约 + rd RangeDeleter // 租约过期时,使用范围删除 + cp Checkpointer // 当一个租约的最后期限应该被持久化,以保持跨领袖选举和重启的剩余TTL,出租人将通过Checkpointer对租约进行检查. + b backend.Backend // 持久化租约到bolt.db. + minLeaseTTL int64 // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL. + expiredC chan []*Lease // 发送一批已经过期的租约 + // stopC is a channel whose closure indicates that the lessor should be stopped. + stopC chan struct{} + doneC chan struct{} // close表明lessor被停止. + lg *zap.Logger + checkpointInterval time.Duration // 租约快照的默认时间间隔 + expiredLeaseRetryInterval time.Duration // 检查过期租约是否被撤销的默认时间间隔 + checkpointPersist bool // lessor是否应始终保持剩余的TTL(在v3.6中始终启用). + cluster cluster // 基于集群版本 调整lessor逻辑 +} +type Lease struct { + ID LeaseID // 租约ID , 自增得到的, + ttl int64 // 租约的生存时间,以秒为单位 + remainingTTL int64 // 剩余生存时间,以秒为单位,如果为零,则视为未设置,应使用完整的tl. + expiryMu sync.RWMutex // 保护并发的访问 + expiry time.Time // 是租约到期的时间.当expiry.IsZero()为真时,永久存在. + mu sync.RWMutex // 保护并发的访问 itemSet + itemSet map[LeaseItem]struct{} // 哪些租约附加到了key + revokec chan struct{} // 租约被删除、到期 关闭此channel,触发后续逻辑 +} + +type cluster interface { + Version() *semver.Version // 是整个集群的最小major.minor版本. +} + +type LessorConfig struct { + MinLeaseTTL int64 // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL. + CheckpointInterval time.Duration // 租约快照的默认时间间隔 + ExpiredLeasesRetryInterval time.Duration // 租约快照的默认时间间隔 + CheckpointPersist bool // lessor是否应始终保持剩余的TTL(在v3.6中始终启用). +} + +func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor { + return newLessor(lg, b, cluster, cfg) +} + +func (le *lessor) shouldPersistCheckpoints() bool { + cv := le.cluster.Version() + return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, v3_6)) +} + +func greaterOrEqual(first, second semver.Version) bool { + return !first.LessThan(second) +} + +// Promote 当节点成为leader时 +func (le *lessor) Promote(extend time.Duration) { + // extend 是选举超时 + le.mu.Lock() + defer le.mu.Unlock() + + le.demotec = make(chan struct{}) + + // 刷新所有租约的过期时间 + for _, l := range le.leaseMap { + l.refresh(extend) + item := &LeaseWithTime{id: l.ID, time: l.expiry} + le.leaseExpiredNotifier.RegisterOrUpdate(item) // 开始监听租约过期 + le.scheduleCheckpointIfNeeded(l) + } + + if len(le.leaseMap) < leaseRevokeRate { + // 没有租约堆积的可能性 + return + } + + // 如果有重叠,请调整过期时间 + leases := le.unsafeLeases() + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() // 剩余存活时间 + nextWindow := baseWindow + time.Second + expires := 0 // 到期 + // 失效期限少于总失效率,所以堆积的租约不会消耗整个失效限制 + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // 如果租期延长n秒,则比基本窗口早n秒的租期只应延长1秒. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + item := &LeaseWithTime{id: l.ID, time: l.expiry} + le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.scheduleCheckpointIfNeeded(l) + } +} + +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + +func (le *lessor) GetLease(item LeaseItem) LeaseID { + le.mu.RLock() + id := le.itemMap[item] // 找不到就是永久 + le.mu.RUnlock() + return id +} + +func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) { + le.mu.Lock() + defer le.mu.Unlock() + + le.b = b + le.rd = rd + le.leaseMap = make(map[LeaseID]*Lease) + le.itemMap = make(map[LeaseItem]LeaseID) + le.initAndRecover() +} + +func (le *lessor) Stop() { + close(le.stopC) + <-le.doneC +} + +// --------------------------------------------- OVER ----------------------------------------------------------------- + +// FakeLessor is a fake implementation of Lessor interface. +// Used for testing only. +type FakeLessor struct{} + +func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} + +func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {} + +func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } + +func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } + +func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil } + +func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil } + +func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 } + +func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil } + +func (fl *FakeLessor) Promote(extend time.Duration) {} + +func (fl *FakeLessor) Demote() {} + +func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil } + +func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil } + +func (fl *FakeLessor) Leases() []*Lease { return nil } + +func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil } + +func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {} + +func (fl *FakeLessor) Stop() {} + +type FakeTxnDelete struct { + backend.BatchTx +} + +func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 } +func (ftd *FakeTxnDelete) End() { ftd.Unlock() } + +// --------------------------------------------- OVER ----------------------------------------------------------------- + +// Grant 创建租约 +func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { + if id == NoLease { + return nil, ErrLeaseNotFound + } + + if ttl > MaxLeaseTTL { + return nil, ErrLeaseTTLTooLarge + } + + // lessor在高负荷时,应延长租期,以减少续租. + l := &Lease{ + ID: id, + ttl: ttl, + itemSet: make(map[LeaseItem]struct{}), + revokec: make(chan struct{}), // 租约被删除、到期 关闭此channel,触发后续逻辑 + } + + le.mu.Lock() + defer le.mu.Unlock() + + if _, ok := le.leaseMap[id]; ok { + return nil, ErrLeaseExists + } + + if l.ttl < le.minLeaseTTL { + l.ttl = le.minLeaseTTL + } + + if le.isPrimary() { // 是否还是主lessor + l.refresh(0) // 刷新租约的过期时间 + } else { + l.forever() + } + + le.leaseMap[id] = l + l.persistTo(le.b) + + if le.isPrimary() { + item := &LeaseWithTime{id: l.ID, time: l.expiry} + le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.scheduleCheckpointIfNeeded(l) + } + + return l, nil +} + +// expireExists 返回是否有已过期的租约, next 表明它可能在下次尝试中存在. +func (le *lessor) expireExists() (l *Lease, ok bool, next bool) { + if le.leaseExpiredNotifier.Len() == 0 { + return nil, false, false + } + + item := le.leaseExpiredNotifier.Poll() // 获取第一个,不会从堆中剔除 + l = le.leaseMap[item.id] + if l == nil { + // 租约已过期或 已经被移除,不需要再次移除 + le.leaseExpiredNotifier.Unregister() // O(log N) 弹出第一个 + return nil, false, true + } + now := time.Now() + if now.Before(item.time) { + // 判断时间有没有过期 + return l, false, false + } + + // recheck if revoke is complete after retry interval + item.time = now.Add(le.expiredLeaseRetryInterval) + le.leaseExpiredNotifier.RegisterOrUpdate(item) + return l, true, false +} + +// findExpiredLeases 在leaseExpiredNotifier中的小顶堆中删除过期的lease,有数量限制 +func (le *lessor) findExpiredLeases(limit int) []*Lease { + leases := make([]*Lease, 0, 16) + + for { + l, ok, next := le.expireExists() // 获取一个已过期的 租约,以及之后是否可能仍然存在 + if !ok && !next { + // 当前没有,以后不存在 + break + } + if !ok { + // 当前没有 + continue + } + if next { + // 以后存在 + continue + } + // + if l.expired() { + leases = append(leases, l) + if len(leases) == limit { + break + } + } + } + + return leases +} + +// 查找所有过期的租约,并将其发送到过期的通道中等待撤销. +func (le *lessor) revokeExpiredLeases() { + var ls []*Lease + + // 每秒撤销租约的最大数量, 500ms调用一次,那么限制应该改为 /2 + revokeLimit := leaseRevokeRate / 2 + + le.mu.RLock() + if le.isPrimary() { // 主 + // 在leaseExpiredNotifier中的小顶堆中删除过期的lease,有数量限制 + ls = le.findExpiredLeases(revokeLimit) + } + le.mu.RUnlock() + + if len(ls) != 0 { + select { + case <-le.stopC: + return + case le.expiredC <- ls: + default: + // expiredC的接收器可能正在忙着处理其他东西,下次500ms后再试 + } + } +} + +// ExpiredLeasesC 返回一批已过期的租约 +func (le *lessor) ExpiredLeasesC() <-chan []*Lease { + return le.expiredC +} + +// Revoke 从kvindex以及bolt.db中删除 +func (le *lessor) Revoke(id LeaseID) error { + le.mu.Lock() + + l := le.leaseMap[id] + if l == nil { + le.mu.Unlock() + return ErrLeaseNotFound + } + defer close(l.revokec) + le.mu.Unlock() + // mvcc.newWatchableStore + if le.rd == nil { + return nil + } + + txn := le.rd() + + // 对键进行排序,以便在所有成员中以相同的顺序删除,否则后台的哈希值将是不同的. + keys := l.Keys() // 返回当前组约绑定到了哪些key + sort.StringSlice(keys).Sort() + for _, key := range keys { // 该租约附加到了哪些key上 + fmt.Printf("租约:%d到期 删除key:%s \n", id, key) + txn.DeleteRange([]byte(key), nil) // 从内存 kvindex 中 删除 + } + + le.mu.Lock() + defer le.mu.Unlock() + delete(le.leaseMap, l.ID) + // 租约的删除需要与kv的删除在同一个后台事务中.否则,如果 etcdserver 在两者之间发生故障,我们可能会出现不执行撤销或不删除钥匙的结果. + le.b.BatchTx().UnsafeDelete(buckets.Lease, int64ToBytes(int64(l.ID))) // 删除bolt.db 里的key + txn.End() + return nil +} + +// Remaining 返回剩余时间 +func (l *Lease) Remaining() time.Duration { + l.expiryMu.RLock() + defer l.expiryMu.RUnlock() + if l.expiry.IsZero() { + return time.Duration(math.MaxInt64) + } + return time.Until(l.expiry) +} + +type LeaseItem struct { + Key string +} + +func int64ToBytes(n int64) []byte { + bytes := make([]byte, 8) + binary.BigEndian.PutUint64(bytes, uint64(n)) + return bytes +} + +// 是否已过期 +func (l *Lease) expired() bool { + return l.Remaining() <= 0 +} + +// 持久化租约 +func (l *Lease) persistTo(b backend.Backend) { + key := int64ToBytes(int64(l.ID)) + + lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} + val, err := lpb.Marshal() + if err != nil { + panic("序列化lease消息失败") + } + + b.BatchTx().Lock() + b.BatchTx().UnsafePut(buckets.Lease, key, val) + b.BatchTx().Unlock() +} + +func (l *Lease) TTL() int64 { + return l.ttl +} + +// Keys 返回当前组约绑定到了哪些key +func (l *Lease) Keys() []string { + l.mu.RLock() + keys := make([]string, 0, len(l.itemSet)) + for k := range l.itemSet { + keys = append(keys, k.Key) + } + l.mu.RUnlock() + return keys +} + +// getRemainingTTL returns the last checkpointed remaining TTL of the lease. +func (l *Lease) getRemainingTTL() int64 { + if l.remainingTTL > 0 { + return l.remainingTTL + } + return l.ttl +} + +// 创建租约管理器 +func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor { + checkpointInterval := cfg.CheckpointInterval + expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval + if checkpointInterval == 0 { + checkpointInterval = defaultLeaseCheckpointInterval + } + if expiredLeaseRetryInterval == 0 { + expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval + } + l := &lessor{ + leaseMap: make(map[LeaseID]*Lease), + itemMap: make(map[LeaseItem]LeaseID), + leaseExpiredNotifier: newLeaseExpiredNotifier(), // 租约到期移除的队列 + leaseCheckpointHeap: make(LeaseQueue, 0), + b: b, // bolt.db + minLeaseTTL: cfg.MinLeaseTTL, // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL. + checkpointInterval: checkpointInterval, // 租约快照的默认时间间隔 + expiredLeaseRetryInterval: expiredLeaseRetryInterval, // 检查过期租约是否被撤销的默认时间间隔 + checkpointPersist: cfg.CheckpointPersist, // lessor是否应始终保持剩余的TTL(在v3.6中始终启用). + expiredC: make(chan []*Lease, 16), // 避免不必要的阻塞 + stopC: make(chan struct{}), + doneC: make(chan struct{}), + lg: lg, + cluster: cluster, + } + l.initAndRecover() // 从bolt.db恢复租约信息 + + go l.runLoop() // 开始检查是否有快过期的租约 + + return l +} + +// isPrimary 表示该出租人是否为主要出租人.主出租人负责管理租约的到期和更新. +// 在etcd中,raft leader是主要的.因此,在同一时间可能有两个主要的领导者(raft允许同时存在的领导者,但任期不同),最多是一个领导者选举超时. +// 旧的主要领导者不能影响正确性,因为它的提议有一个较小的期限,不会被提交. +// TODO:raft的跟随者不转发租约管理提案.可能会有一个非常小的窗口(通常在一秒钟之内,这取决于调度),在raft领导者降级和lessor降级之间 +// 通常情况下,这不应该是一个问题.租约对时间不应该那么敏感. +func (le *lessor) isPrimary() bool { + return le.demotec != nil +} + +// SetRangeDeleter 主要是设置一个用于获取delete的写事务的函数 +func (le *lessor) SetRangeDeleter(rd RangeDeleter) { + le.mu.Lock() + defer le.mu.Unlock() + le.rd = rd +} + +// Renew 在租约有效期内,重新计算过期时间 +func (le *lessor) Renew(id LeaseID) (int64, error) { + le.mu.RLock() + if !le.isPrimary() { + le.mu.RUnlock() + return -1, ErrNotPrimary + } + + demotec := le.demotec + + l := le.leaseMap[id] + if l == nil { + le.mu.RUnlock() + return -1, ErrLeaseNotFound + } + // 清空剩余时间 + clearRemainingTTL := le.cp != nil && l.remainingTTL > 0 + + le.mu.RUnlock() + if l.expired() { // 租约过期了 + select { + case <-l.revokec: // 当租约到期,会关闭 + return -1, ErrLeaseNotFound + case <-demotec: + return -1, ErrNotPrimary + case <-le.stopC: + return -1, ErrNotPrimary + } + } + + // Clear remaining TTL when we renew if it is set + // By applying a RAFT entry only when the remainingTTL is already set, we limit the number + // of RAFT entries written per lease to a max of 2 per checkpoint interval. + if clearRemainingTTL { + // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后, + // 更新内存数据结构 LeaseMap 的剩余 TTL 信息. + le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), RemainingTtl: 0}}}) + } + + le.mu.Lock() + l.refresh(0) + item := &LeaseWithTime{id: l.ID, time: l.expiry} + le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.mu.Unlock() + + return l.ttl, nil +} + +// Lookup 查找租约 +func (le *lessor) Lookup(id LeaseID) *Lease { + le.mu.RLock() + defer le.mu.RUnlock() + return le.leaseMap[id] +} + +// Leases 获取当前节点上的所有租约 +func (le *lessor) Leases() []*Lease { + le.mu.RLock() + ls := le.unsafeLeases() + le.mu.RUnlock() + sort.Sort(leasesByExpiry(ls)) + return ls +} + +// Attach 将一些key附加到租约上 +func (le *lessor) Attach(id LeaseID, items []LeaseItem) error { + le.mu.Lock() + defer le.mu.Unlock() + + l := le.leaseMap[id] + if l == nil { + return ErrLeaseNotFound + } + + l.mu.Lock() + for _, it := range items { + l.itemSet[it] = struct{}{} + le.itemMap[it] = id + } + l.mu.Unlock() + return nil +} + +// Detach 将一些key从租约上移除 +func (le *lessor) Detach(id LeaseID, items []LeaseItem) error { + le.mu.Lock() + defer le.mu.Unlock() + + l := le.leaseMap[id] + if l == nil { + return ErrLeaseNotFound + } + + l.mu.Lock() + for _, it := range items { + delete(l.itemSet, it) + delete(le.itemMap, it) + } + l.mu.Unlock() + return nil +} + +// refresh 刷新租约的过期时间 +func (l *Lease) refresh(extend time.Duration) { + newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second) + l.expiryMu.Lock() + defer l.expiryMu.Unlock() + l.expiry = newExpiry +} + +// Demote leader不是自己时,会触发 +func (le *lessor) Demote() { + le.mu.Lock() + defer le.mu.Unlock() + + // 将所有租约的有效期设置为永久 + for _, l := range le.leaseMap { + l.forever() // 内存中 + } + // 清空租约 检查点信息 + le.clearScheduledLeasesCheckpoints() + // 重置 租约到期通知器 + le.clearLeaseExpiredNotifier() + + if le.demotec != nil { + close(le.demotec) + le.demotec = nil + } +} + +// forever 设置永久过期时间,当不是主lessor +func (l *Lease) forever() { + l.expiryMu.Lock() + defer l.expiryMu.Unlock() + l.expiry = forever +} + +// 返回所有租约 +func (le *lessor) unsafeLeases() []*Lease { + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + return leases +} + +// 清空租约 检查点信息 +func (le *lessor) clearScheduledLeasesCheckpoints() { + le.leaseCheckpointHeap = make(LeaseQueue, 0) +} + +// OK +func (le *lessor) clearLeaseExpiredNotifier() { + le.leaseExpiredNotifier = newLeaseExpiredNotifier() +} + +// 从bolt.db恢复租约信息 +func (le *lessor) initAndRecover() { + tx := le.b.BatchTx() + tx.Lock() + + tx.UnsafeCreateBucket(buckets.Lease) + _, vs := tx.UnsafeRange(buckets.Lease, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0) + for i := range vs { + var lpb leasepb.Lease + err := lpb.Unmarshal(vs[i]) + if err != nil { + tx.Unlock() + panic("反序列化lease 消息失败") + } + ID := LeaseID(lpb.ID) + if lpb.TTL < le.minLeaseTTL { + lpb.TTL = le.minLeaseTTL + } + le.leaseMap[ID] = &Lease{ + ID: ID, + ttl: lpb.TTL, + // itemSet将在恢复键值对将过期时间设置为永久 ,提升时刷新 + itemSet: make(map[LeaseItem]struct{}), + expiry: forever, + revokec: make(chan struct{}), + remainingTTL: lpb.RemainingTTL, + } + } + le.leaseExpiredNotifier.Init() // 填充mq.m + heap.Init(&le.leaseCheckpointHeap) + tx.Unlock() + + le.b.ForceCommit() +} + +func (le *lessor) SetCheckpointer(cp Checkpointer) { + le.mu.Lock() + defer le.mu.Unlock() + le.cp = cp +} + +// OK +func (le *lessor) runLoop() { + defer close(le.doneC) + for { + // 查找所有过期的租约,并将其发送到过期的通道中等待撤销. + le.revokeExpiredLeases() + // 查找所有到期的预定租约检查点将它们提交给检查点以将它们持久化到共识日志中. + le.checkpointScheduledLeases() // 定时触发更新 Lease 的剩余到期时间的操作. + + select { + case <-time.After(500 * time.Millisecond): + case <-le.stopC: + return + } + } +} + +// 查找所有到期的预定租约检查点将它们提交给检查点以将它们持久化到共识日志中. +func (le *lessor) checkpointScheduledLeases() { + var cps []*pb.LeaseCheckpoint + + // rate limit + for i := 0; i < leaseCheckpointRate/2; i++ { + le.mu.Lock() + if le.isPrimary() { + cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize) + } + le.mu.Unlock() + + if len(cps) != 0 { + // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后, + // 更新内存数据结构 LeaseMap 的剩余 TTL 信息. + // srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp}) + // case r.LeaseCheckpoint != nil: + le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps}) + } + if len(cps) < maxLeaseCheckpointBatchSize { + return + } + } +} + +// 开始执行检查 ,leader 变更时,防止ttl重置 +// 租约创建时、成为leader后、收到checkpoint 共识消息后 +func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) { + if le.cp == nil { + return + } + // 剩余存活时间,大于 checkpointInterval + le.checkpointInterval = time.Second * 20 + if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) { + if le.lg != nil { + le.lg.Info("开始调度 租约 检查", zap.Int64("leaseID", int64(lease.ID)), zap.Duration("intervalSeconds", le.checkpointInterval)) + } + heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{ + id: lease.ID, + time: time.Now().Add(le.checkpointInterval), // 300 秒后租约到期, 检查这个租约 + }) + le.lg.Info("租约", zap.Int("checkpoint", len(le.leaseCheckpointHeap)), zap.Int("lease", len(le.leaseMap))) + } +} + +// 查找到期的检查点 +func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint { + if le.cp == nil { + return nil + } + + now := time.Now() + var cps []*pb.LeaseCheckpoint + for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit { + lt := le.leaseCheckpointHeap[0] + if lt.time.After(now) { // 过了 检查点的时间 + return cps + } + heap.Pop(&le.leaseCheckpointHeap) + var l *Lease + var ok bool + if l, ok = le.leaseMap[lt.id]; !ok { + continue + } + if !now.Before(l.expiry) { + continue + } + remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds())) // 剩余时间 + if remainingTTL >= l.ttl { + continue + } + if le.lg != nil { + le.lg.Debug("检查租约ing", zap.Int64("leaseID", int64(lt.id)), zap.Int64("remainingTTL", remainingTTL)) + } + cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), RemainingTtl: remainingTTL}) + } + return cps +} + +// Checkpoint 更新租约的剩余时间 +func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { + le.mu.Lock() + defer le.mu.Unlock() + + if l, ok := le.leaseMap[id]; ok { + // 当检查点时,我们只更新剩余的TTL,Promote 负责将其应用于租赁到期. + l.remainingTTL = remainingTTL + if le.shouldPersistCheckpoints() { // true + l.persistTo(le.b) + } + if le.isPrimary() { + // 根据需要,安排下一个检查点 + le.scheduleCheckpointIfNeeded(l) + } + } + return nil +} diff --git a/etcd/main.go b/etcd/main.go new file mode 100644 index 00000000000..dd85e316440 --- /dev/null +++ b/etcd/main.go @@ -0,0 +1,33 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is a simple wrapper of the real etcd entrypoint package +// (located at github.com/ls-2018/etcd_cn/etcdmain) to ensure that etcd is still +// "go getable"; e.g. `go get go.etcd.io/etcd` works as expected and +// builds a binary in $GOBIN/etcd +// +// This package should NOT be extended or modified in any way; to modify the +// etcd binary, work in the `github.com/ls-2018/etcd_cn/etcdmain` package. +// +package main + +import ( + "os" + + "github.com/ls-2018/etcd_cn/etcd/etcdmain" +) + +func main() { + etcdmain.Main(os.Args) +} diff --git a/etcd/mvcc/backend/backend_bolt.go b/etcd/mvcc/backend/backend_bolt.go new file mode 100644 index 00000000000..2accec71929 --- /dev/null +++ b/etcd/mvcc/backend/backend_bolt.go @@ -0,0 +1,580 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + humanize "github.com/dustin/go-humanize" + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +var ( + defaultBatchLimit = 10000 + defaultBatchInterval = 100 * time.Millisecond + + defragLimit = 10000 + + // initialMmapSize is the initial size of the mmapped region. Setting this larger than + // the potential max db size can prevent writer from blocking reader. + // This only works for linux. + initialMmapSize = uint64(10 * 1024 * 1024 * 1024) + + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = 30 * time.Second +) + +type Backend interface { + ReadTx() ReadTx // // ReadTx 返回一个读事务.它被主数据路径中的 ConcurrentReadTx 替换 + BatchTx() BatchTx // 开启写事务 + ConcurrentReadTx() ReadTx // 主流程中都是使用的这个并发读事务 + Snapshot() Snapshot // 对db做快照 + Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) + Size() int64 // DB占用的物理磁盘大小,空间可以预分配,所以不是实际数据大小 + SizeInUse() int64 // 实际使用的磁盘空间 + OpenReadTxN() int64 // 返回当前读事务个数 + Defrag() error // 数据文件整理,会回收已删除key和已更新的key旧版本占用的磁盘 + ForceCommit() // 强制当前的批处理tx提交 + Close() error +} + +type Snapshot interface { + Size() int64 // 快照的大小 + WriteTo(w io.Writer) (n int64, err error) // 写快照数据 + Close() error // 关闭快照 +} + +type txReadBufferCache struct { + mu sync.Mutex + buf *txReadBuffer + bufVersion uint64 +} + +type ( + MyBackend = backend + backend struct { + size int64 // 已经占用的磁盘大小 + sizeInUse int64 // 实际使用的大小 + commits int64 // 已提交事务数 + openReadTxN int64 // 当前开启的读事务数 + mlock bool // mlock prevents backend database file to be swapped + boltdbMu sync.RWMutex // 这里的锁也是隔离下面的db对象;正常的创建bolt.DB事务只需要读锁;但是做 defrag 时候需要写锁隔离 + db *bolt.DB // 底层存储为boltDB + batchInterval time.Duration // 批量写提交间隔 默认100ms + batchLimit int // 批量写最大事务数 10000 + batchTx *batchTxBuffered // 负责写请求 + readTx *readTx // 负责读请求 + // txReadBufferCache mirrors "txReadBuffer" within "readTx" -- readTx.baseReadTx.buf. + // When creating "concurrentReadTx": + // - if the cache is up-to-date, "readTx.baseReadTx.buf" copy can be skipped + // - if the cache is empty or outdated, "readTx.baseReadTx.buf" copy is required + txReadBufferCache txReadBufferCache + stopc chan struct{} + donec chan struct{} + hooks Hooks + lg *zap.Logger + } +) + +type BackendConfig struct { + Path string // 是指向后端文件的文件路径. + BatchInterval time.Duration // 是冲刷BatchTx之前的最长时间 + BatchLimit int // 是冲刷BatchTx之前的最大puts数 + BackendFreelistType bolt.FreelistType // 是后端boltdb的freelist类型 + MmapSize uint64 // 是为后端提供的mmap的字节数. + Logger *zap.Logger // + UnsafeNoFsync bool `json:"unsafe-no-fsync"` // 禁用所有fsync的使用. + Mlock bool // 防止后端数据库文件被调换 + Hooks Hooks // 在后端事务的生命周期中被执行 +} + +func DefaultBackendConfig() BackendConfig { + return BackendConfig{ + BatchInterval: defaultBatchInterval, + BatchLimit: defaultBatchLimit, + MmapSize: initialMmapSize, + } +} + +func New(bcfg BackendConfig) Backend { + return newBackend(bcfg) +} + +func NewDefaultBackend(path string) Backend { + bcfg := DefaultBackendConfig() + bcfg.Path = path + return newBackend(bcfg) +} + +func newBackend(bcfg BackendConfig) *backend { + if bcfg.Logger == nil { + bcfg.Logger = zap.NewNop() + } + + bopts := &bolt.Options{} + if boltOpenOptions != nil { + *bopts = *boltOpenOptions + } + bopts.InitialMmapSize = bcfg.mmapSize() + bopts.FreelistType = bcfg.BackendFreelistType + bopts.NoSync = bcfg.UnsafeNoFsync + bopts.NoGrowSync = bcfg.UnsafeNoFsync + bopts.Mlock = bcfg.Mlock + + db, err := bolt.Open(bcfg.Path, 0o600, bopts) + if err != nil { + bcfg.Logger.Panic("打开数据库失败", zap.String("path", bcfg.Path), zap.Error(err)) + } + + b := &backend{ + db: db, + + batchInterval: bcfg.BatchInterval, + batchLimit: bcfg.BatchLimit, + mlock: bcfg.Mlock, + + readTx: &readTx{ + baseReadTx: baseReadTx{ + buf: txReadBuffer{ + txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)}, + bufVersion: 0, + }, + buckets: make(map[BucketID]*bolt.Bucket), + txWg: new(sync.WaitGroup), + txMu: new(sync.RWMutex), + }, + }, + txReadBufferCache: txReadBufferCache{ + mu: sync.Mutex{}, + bufVersion: 0, + buf: nil, + }, + + stopc: make(chan struct{}), + donec: make(chan struct{}), + + lg: bcfg.Logger, + } + + b.batchTx = newBatchTxBuffered(b) + b.hooks = bcfg.Hooks + + go b.run() + return b +} + +// BatchTx 返回当前的批次tx.该tx可以用于读和写操作. +// 写入的结果可以立即在同一个tx中被检索到. +// 写入的结果与其他tx隔离直到当前的tx被提交. +func (b *backend) BatchTx() BatchTx { + return b.batchTx +} + +func (b *backend) ReadTx() ReadTx { return b.readTx } + +// ConcurrentReadTx 创建并返回一个新的 ReadTx它. +// A) 创建并保留backend.readTx.txReadBuffer的副本. +// B) 引用当前批次间隔的 boltdb read Tx(和它的桶缓存). +func (b *backend) ConcurrentReadTx() ReadTx { + // 这里需要读 readTx 的buffer 所以需要读锁 这里的锁占用时间是很低的 + b.readTx.RLock() + defer b.readTx.RUnlock() + b.readTx.txWg.Add(1) + + b.txReadBufferCache.mu.Lock() + + curCache := b.txReadBufferCache.buf // 当前的缓存 + curCacheVer := b.txReadBufferCache.bufVersion // 缓存里的版本 + curBufVer := b.readTx.buf.bufVersion // 当前的版本 + + isEmptyCache := curCache == nil + isStaleCache := curCacheVer != curBufVer // 是不是陈旧的缓存 + + var buf *txReadBuffer + switch { + case isEmptyCache: // 缓冲为空 + // 当持有b.txReadBufferCache.boltdbMu.Lock时执行安全的缓冲区拷贝这只应该运行一次,所以不会有太多的开销 + curBuf := b.readTx.buf.unsafeCopy() + buf = &curBuf + case isStaleCache: + // 最大化并发,尝试不安全的缓冲区拷贝在复制buffer时释放锁——cache可能会再次失效 + // 被其他人覆盖.因此,我们需要再次检查readTx缓冲区版本 + b.txReadBufferCache.mu.Unlock() + curBuf := b.readTx.buf.unsafeCopy() + b.txReadBufferCache.mu.Lock() + buf = &curBuf + default: + // 既不为空也不过时的缓存,只使用当前缓冲区 + buf = curCache + } + if isEmptyCache || curCacheVer == b.txReadBufferCache.bufVersion { + b.txReadBufferCache.buf = buf + b.txReadBufferCache.bufVersion = curBufVer + } + + b.txReadBufferCache.mu.Unlock() + // concurrentReadTx 不应该写入它的 txReadBuffer + return &concurrentReadTx{ + baseReadTx: baseReadTx{ + buf: *buf, // copy一份backend的readTx.buf, 这样就可以不用持有readTx.mu对buffer的保护从而提升读的性能 这里就是空间换时间(锁的竞争) + txMu: b.readTx.txMu, + tx: b.readTx.tx, + buckets: b.readTx.buckets, + txWg: b.readTx.txWg, + }, + } +} + +// ForceCommit 强制当前的批处理tx提交. +func (b *backend) ForceCommit() { + b.batchTx.Commit() +} + +// Snapshot 获取一个blot.db快照结构体 +func (b *backend) Snapshot() Snapshot { + b.batchTx.Commit() + + b.boltdbMu.RLock() + defer b.boltdbMu.RUnlock() + tx, err := b.db.Begin(false) // 读事务 + if err != nil { + b.lg.Fatal("开启读事务失败", zap.Error(err)) + } + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() // 返回该事务所看到的当前数据库大小(以字节为单位). + go func() { + defer close(donec) + // sendRateBytes基于1千兆/秒的连接传输快照数据,假设tcp最小吞吐量为100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1024 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + b.lg.Warn("快照传输时间过长", zap.Duration("taking", time.Since(start)), + zap.Int64("bytes", dbBytes), + zap.String("size", humanize.Bytes(uint64(dbBytes))), + ) + case <-stopc: + return + } + } + }() + + return &snapshot{tx, stopc, donec} +} + +func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) { + h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + + b.boltdbMu.RLock() + defer b.boltdbMu.RUnlock() + err := b.db.View(func(tx *bolt.Tx) error { + c := tx.Cursor() + for next, _ := c.First(); next != nil; next, _ = c.Next() { + b := tx.Bucket(next) + if b == nil { + return fmt.Errorf("获取桶的hash失败 %s", string(next)) + } + h.Write(next) + b.ForEach(func(k, v []byte) error { + if ignores != nil && !ignores(next, k) { + fmt.Println(string(k), string(v)) + h.Write(k) + h.Write(v) + } + return nil + }) + } + return nil + }) + if err != nil { + return 0, err + } + + return h.Sum32(), nil +} + +func (b *backend) Size() int64 { + return atomic.LoadInt64(&b.size) +} + +func (b *backend) SizeInUse() int64 { + return atomic.LoadInt64(&b.sizeInUse) +} + +// 提交bolt事务 +func (b *backend) run() { + defer close(b.donec) + t := time.NewTimer(b.batchInterval) // 100ms 定时提交事务 + defer t.Stop() + for { + select { + case <-t.C: + case <-b.stopc: + b.batchTx.CommitAndStop() + return + } + if b.batchTx.safePending() != 0 { + b.batchTx.Commit() + } + t.Reset(b.batchInterval) // 使其重新触发 + } +} + +func (b *backend) Close() error { + close(b.stopc) + <-b.donec + return b.db.Close() +} + +// Commits returns total number of commits since start +func (b *backend) Commits() int64 { + return atomic.LoadInt64(&b.commits) +} + +// Defrag 碎片整理 +func (b *backend) Defrag() error { + return b.defrag() +} + +// 碎片整理 +func (b *backend) defrag() error { + now := time.Now() + + // 锁定batchTx以确保没有人在使用以前的tx然后关闭以前正在进行的tx. + b.batchTx.Lock() + defer b.batchTx.Unlock() + + // 锁定数据库后锁定tx以避免死锁. + b.boltdbMu.Lock() + defer b.boltdbMu.Unlock() + + // 阻止并发的读请求同时重置TX. + b.readTx.Lock() + defer b.readTx.Unlock() + + b.batchTx.unsafeCommit(true) + + b.batchTx.tx = nil + + // Create a temporary file to ensure we start with a clean slate. + // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup. + dir := filepath.Dir(b.db.Path()) + temp, err := ioutil.TempFile(dir, "db.tmp.*") + if err != nil { + return err + } + options := bolt.Options{} + if boltOpenOptions != nil { + options = *boltOpenOptions + } + options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) { + return temp, nil + } + // 不管打开选项是什么,都不要加载tmp db到内存中 + options.Mlock = false + tdbp := temp.Name() + tmpdb, err := bolt.Open(tdbp, 0o600, &options) + if err != nil { + return err + } + + dbp := b.db.Path() + size1, sizeInUse1 := b.Size(), b.SizeInUse() + if b.lg != nil { + b.lg.Info( + "內存碎片清理中", + zap.String("path", dbp), + zap.Int64("current-db-size-bytes", size1), + zap.String("current-db-size", humanize.Bytes(uint64(size1))), + zap.Int64("current-db-size-in-use-bytes", sizeInUse1), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))), + ) + } + err = defragdb(b.db, tmpdb, defragLimit) + if err != nil { + tmpdb.Close() + if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil { + b.lg.Error("在碎片整理完成后未能删除db.tmp", zap.Error(rmErr)) + } + return err + } + + err = b.db.Close() + if err != nil { + b.lg.Fatal("关闭数据库失败", zap.Error(err)) + } + err = tmpdb.Close() + if err != nil { + b.lg.Fatal("关闭tmp数据库失败", zap.Error(err)) + } + err = os.Rename(tdbp, dbp) + if err != nil { + b.lg.Fatal("重命名tmp数据库失败", zap.Error(err)) + } + + defragmentedBoltOptions := bolt.Options{} + if boltOpenOptions != nil { + defragmentedBoltOptions = *boltOpenOptions + } + defragmentedBoltOptions.Mlock = b.mlock + + b.db, err = bolt.Open(dbp, 0o600, &defragmentedBoltOptions) + if err != nil { + b.lg.Fatal("打开数据库失败", zap.String("path", dbp), zap.Error(err)) + } + b.batchTx.tx = b.unsafeBegin(true) + + b.readTx.reset() + b.readTx.tx = b.unsafeBegin(false) + + size := b.readTx.tx.Size() + db := b.readTx.tx.DB() + atomic.StoreInt64(&b.size, size) + atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize))) + + took := time.Since(now) + + size2, sizeInUse2 := b.Size(), b.SizeInUse() + if b.lg != nil { + b.lg.Info( + "完成了目录碎片整理工作", + zap.String("path", dbp), + zap.Int64("current-db-size-bytes-diff", size2-size1), + zap.Int64("current-db-size-bytes", size2), + zap.String("current-db-size", humanize.Bytes(uint64(size2))), + zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1), + zap.Int64("current-db-size-in-use-bytes", sizeInUse2), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))), + zap.Duration("took", took), + ) + } + return nil +} + +func defragdb(odb, tmpdb *bolt.DB, limit int) error { + // open a tx on tmpdb for writes + tmptx, err := tmpdb.Begin(true) + if err != nil { + return err + } + defer func() { + if err != nil { + tmptx.Rollback() + } + }() + + // open a tx on old db for read + tx, err := odb.Begin(false) + if err != nil { + return err + } + defer tx.Rollback() + + c := tx.Cursor() + + count := 0 + for next, _ := c.First(); next != nil; next, _ = c.Next() { + b := tx.Bucket(next) + if b == nil { + return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) + } + + tmpb, berr := tmptx.CreateBucketIfNotExists(next) + if berr != nil { + return berr + } + tmpb.FillPercent = 0.9 // for bucket2seq write in for each + + if err = b.ForEach(func(k, v []byte) error { + count++ + if count > limit { + err = tmptx.Commit() + if err != nil { + return err + } + tmptx, err = tmpdb.Begin(true) + if err != nil { + return err + } + tmpb = tmptx.Bucket(next) + tmpb.FillPercent = 0.9 // for bucket2seq write in for each + + count = 0 + } + return tmpb.Put(k, v) + }); err != nil { + return err + } + } + + return tmptx.Commit() +} + +func (b *backend) begin(write bool) *bolt.Tx { + b.boltdbMu.RLock() + tx := b.unsafeBegin(write) + b.boltdbMu.RUnlock() + + size := tx.Size() // 返回该事务所看到的当前数据库大小(字节). 24576 + db := tx.DB() + stats := db.Stats() + atomic.StoreInt64(&b.size, size) + atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize))) // 24576-2*4096 + atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN)) // 当前的的读事务数 + + return tx +} + +// 开启写事务? +func (b *backend) unsafeBegin(write bool) *bolt.Tx { + tx, err := b.db.Begin(write) + if err != nil { + b.lg.Fatal("开启事务失败", zap.Error(err)) + } + return tx +} + +func (b *backend) OpenReadTxN() int64 { + return atomic.LoadInt64(&b.openReadTxN) +} + +type snapshot struct { + *bolt.Tx + stopc chan struct{} + donec chan struct{} +} + +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/etcd/mvcc/backend/bolt_batch_tx.go b/etcd/mvcc/backend/bolt_batch_tx.go new file mode 100644 index 00000000000..b3375929c90 --- /dev/null +++ b/etcd/mvcc/backend/bolt_batch_tx.go @@ -0,0 +1,226 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + "sync/atomic" + + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +type BucketID int + +type Bucket interface { + ID() BucketID // ID返回一个水桶的唯一标识符.该ID必须不被持久化并且可以在内存地图中作为轻量级的标识符使用. + Name() []byte + String() string + // IsSafeRangeBucket 是一种避免无意中读取重复key的方法;bucket上的覆盖应该只取limit=1,但已知safeerangebucket永远不会覆盖任何键,所以range是安全的. + IsSafeRangeBucket() bool // 不要在非键桶上使用unsafeRange +} + +// BatchTx 负责读请求 +type BatchTx interface { + ReadTx + UnsafeCreateBucket(bucket Bucket) + UnsafeDeleteBucket(bucket Bucket) + UnsafePut(bucket Bucket, key []byte, value []byte) + UnsafeSeqPut(bucket Bucket, key []byte, value []byte) + UnsafeDelete(bucket Bucket, key []byte) + Commit() // Commit commits a previous tx and begins a new writable one. + CommitAndStop() // CommitAndStop commits the previous tx and does not create a new one. +} + +type batchTx struct { + sync.Mutex + tx *bolt.Tx + backend *backend + pending int // 当前事务中的写入次数 +} + +func (t *batchTx) Lock() { + t.Mutex.Lock() +} + +func (t *batchTx) Unlock() { + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + t.Mutex.Unlock() +} + +func (t *batchTx) RLock() { + panic("unexpected RLock") +} + +func (t *batchTx) RUnlock() { + panic("unexpected RUnlock") +} + +func (t *batchTx) UnsafeCreateBucket(bucket Bucket) { + _, err := t.tx.CreateBucket(bucket.Name()) + if err != nil && err != bolt.ErrBucketExists { + t.backend.lg.Fatal("创建bucket", zap.Stringer("bucket-name", bucket), zap.Error(err)) + } + t.pending++ +} + +func (t *batchTx) UnsafePut(bucket Bucket, key []byte, value []byte) { + t.unsafePut(bucket, key, value, false) +} + +// UnsafeSeqPut OK +func (t *batchTx) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) { + t.unsafePut(bucket, key, value, true) +} + +// OK +func (t *batchTx) unsafePut(bucketType Bucket, key []byte, value []byte, seq bool) { + bucket := t.tx.Bucket(bucketType.Name()) + if bucket == nil { + t.backend.lg.Fatal("找不到bolt.db里的桶", zap.Stringer("bucket-name", bucketType), zap.Stack("stack")) + } + if seq { + // 当工作负载大多为仅附加时,增加填充百分比是很有用的.这可以延迟页面分割和减少空间使用. + // 告诉bolt 当页面已满时,它应该告诉它做一个 90-10 拆分,而不是 50-50 拆分,这更适合于顺序插入.这样可以让其体积稍小. + // 一个例子:使用 FillPercent = 0.9 之前是 103MB,使用之后是64MB,实际数据是22MB. + bucket.FillPercent = 0.9 + } + if err := bucket.Put(key, value); err != nil { + t.backend.lg.Fatal( + "桶写数据失败", zap.Stringer("bucket-name", bucketType), zap.Error(err), + ) + } + t.pending++ +} + +// UnsafeRange 调用法必须持锁 +func (t *batchTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + bucket := t.tx.Bucket(bucketType.Name()) + if bucket == nil { + t.backend.lg.Fatal("无法找到bucket", zap.Stringer("bucket-name", bucketType), zap.Stack("stack")) + } + return unsafeRange(bucket.Cursor(), key, endKey, limit) +} + +// 从bolt.db 查找k,v +func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { + if limit <= 0 { + limit = math.MaxInt64 + } + var isMatch func(b []byte) bool + if len(endKey) > 0 { + // 判断是不是相等 + isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 } + } else { + isMatch = func(b []byte) bool { return bytes.Equal(b, key) } + limit = 1 + } + + for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() { + vs = append(vs, cv) + keys = append(keys, ck) + if limit == int64(len(keys)) { + break + } + } + return keys, vs +} + +// UnsafeDelete 调用方必须持锁 +func (t *batchTx) UnsafeDelete(bucketType Bucket, key []byte) { + bucket := t.tx.Bucket(bucketType.Name()) + if bucket == nil { + t.backend.lg.Fatal( + "查找桶失败", + zap.Stringer("bucket-name", bucketType), + zap.Stack("stack"), + ) + } + err := bucket.Delete(key) + if err != nil { + t.backend.lg.Fatal( + "删除一个key失败", + zap.Stringer("bucket-name", bucketType), + zap.Error(err), + ) + } + t.pending++ +} + +// Commit commits a previous tx and begins a new writable one. +func (t *batchTx) Commit() { + t.Lock() + t.commit(false) + t.Unlock() +} + +// CommitAndStop commits the previous tx and does not create a new one. +func (t *batchTx) CommitAndStop() { + t.Lock() + t.commit(true) + t.Unlock() +} + +func (t *batchTx) safePending() int { + t.Mutex.Lock() + defer t.Mutex.Unlock() + return t.pending +} + +func (t *batchTx) commit(stop bool) { + // 提交最新的事务 + if t.tx != nil { + if t.pending == 0 && !stop { + return + } + err := t.tx.Commit() // bolt.Commit + atomic.AddInt64(&t.backend.commits, 1) + + t.pending = 0 + if err != nil { + t.backend.lg.Fatal("提交事务失败", zap.Error(err)) + } + } + if !stop { + t.tx = t.backend.begin(true) + } +} + +// -------------------------------------------- OVER ------------------------------------------------------------- + +// UnsafeForEach 调用方必须持锁 +func (t *batchTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error { + return unsafeForEach(t.tx, bucket, visitor) +} + +func unsafeForEach(tx *bolt.Tx, bucket Bucket, visitor func(k, v []byte) error) error { + if b := tx.Bucket(bucket.Name()); b != nil { + return b.ForEach(visitor) + } + return nil +} + +// UnsafeDeleteBucket 删除桶 +func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) { + err := t.tx.DeleteBucket(bucket.Name()) + if err != nil && err != bolt.ErrBucketNotFound { + t.backend.lg.Fatal("删除桶失败", zap.Stringer("bucket-name", bucket), zap.Error(err)) + } + t.pending++ +} diff --git a/etcd/mvcc/backend/bolt_batch_tx_buffered.go b/etcd/mvcc/backend/bolt_batch_tx_buffered.go new file mode 100644 index 00000000000..496db8b53b3 --- /dev/null +++ b/etcd/mvcc/backend/bolt_batch_tx_buffered.go @@ -0,0 +1,104 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "sync" + + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +type batchTxBuffered struct { + batchTx + buf txWriteBuffer +} + +func newBatchTxBuffered(backend *backend) *batchTxBuffered { + tx := &batchTxBuffered{ + batchTx: batchTx{backend: backend}, + buf: txWriteBuffer{ + txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)}, + bucket2seq: make(map[BucketID]bool), + }, + } + tx.Commit() + return tx +} + +func (t *batchTxBuffered) Unlock() { + if t.pending != 0 { + t.backend.readTx.Lock() // blocks txReadBuffer for writing. + t.buf.writeback(&t.backend.readTx.buf) + t.backend.readTx.Unlock() + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + } + t.batchTx.Unlock() +} + +func (t *batchTxBuffered) Commit() { + t.Lock() + t.commit(false) + t.Unlock() +} + +func (t *batchTxBuffered) CommitAndStop() { + t.Lock() + t.commit(true) + t.Unlock() +} + +func (t *batchTxBuffered) commit(stop bool) { + if t.backend.hooks != nil { + t.backend.hooks.OnPreCommitUnsafe(t) + } + + // 所有read tx必须是关闭的以获取boltdb提交的rwlock. + t.backend.readTx.Lock() + t.unsafeCommit(stop) + t.backend.readTx.Unlock() +} + +func (t *batchTxBuffered) unsafeCommit(stop bool) { + if t.backend.readTx.tx != nil { + // 等待所有使用当前boltdb tx的存储读取事务完成,然后关闭boltdb tx + go func(tx *bolt.Tx, wg *sync.WaitGroup) { + wg.Wait() + if err := tx.Rollback(); err != nil { + t.backend.lg.Fatal("回滚tx失败", zap.Error(err)) + } + }(t.backend.readTx.tx, t.backend.readTx.txWg) + t.backend.readTx.reset() + } + + t.batchTx.commit(stop) + + if !stop { + t.backend.readTx.tx = t.backend.begin(false) + } +} + +func (t *batchTxBuffered) UnsafePut(bucket Bucket, key []byte, value []byte) { + t.batchTx.UnsafePut(bucket, key, value) + t.buf.put(bucket, key, value) +} + +// UnsafeSeqPut OK +func (t *batchTxBuffered) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) { // ✅ + t.batchTx.UnsafeSeqPut(bucket, key, value) + t.buf.putSeq(bucket, key, value) +} diff --git a/server/storage/backend/config_default.go b/etcd/mvcc/backend/config_default.go similarity index 96% rename from server/storage/backend/config_default.go rename to etcd/mvcc/backend/config_default.go index fd57c7ca84c..847bd10fd78 100644 --- a/server/storage/backend/config_default.go +++ b/etcd/mvcc/backend/config_default.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux && !windows +// +build !linux,!windows package backend diff --git a/server/storage/backend/config_linux.go b/etcd/mvcc/backend/config_linux.go similarity index 100% rename from server/storage/backend/config_linux.go rename to etcd/mvcc/backend/config_linux.go diff --git a/server/storage/backend/config_windows.go b/etcd/mvcc/backend/config_windows.go similarity index 98% rename from server/storage/backend/config_windows.go rename to etcd/mvcc/backend/config_windows.go index 7bb42f3a289..ba6e5a1284c 100644 --- a/server/storage/backend/config_windows.go +++ b/etcd/mvcc/backend/config_windows.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build windows +// +build windows package backend diff --git a/server/storage/backend/doc.go b/etcd/mvcc/backend/doc.go similarity index 100% rename from server/storage/backend/doc.go rename to etcd/mvcc/backend/doc.go diff --git a/etcd/mvcc/backend/hooks.go b/etcd/mvcc/backend/hooks.go new file mode 100644 index 00000000000..5d3695cc604 --- /dev/null +++ b/etcd/mvcc/backend/hooks.go @@ -0,0 +1,34 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +type HookFunc func(tx BatchTx) + +// Hooks 允许事务有效期内执行的额外逻辑. +type Hooks interface { + OnPreCommitUnsafe(tx BatchTx) // 事务提交前执行的钩子 +} + +type hooks struct { + onPreCommitUnsafe HookFunc +} + +func (h hooks) OnPreCommitUnsafe(tx BatchTx) { + h.onPreCommitUnsafe(tx) +} + +func NewHooks(onPreCommitUnsafe HookFunc) Hooks { + return hooks{onPreCommitUnsafe: onPreCommitUnsafe} +} diff --git a/etcd/mvcc/backend/read_tx.go b/etcd/mvcc/backend/read_tx.go new file mode 100644 index 00000000000..e39011de674 --- /dev/null +++ b/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,155 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "math" + "sync" + + bolt "go.etcd.io/bbolt" +) + +// IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but IsSafeRangeBucket +// is known to never overwrite any key so range is safe. +// IsSafeRangeBucket是一个黑科技,用来避免无意中读取重复的键. +// 对一个桶的覆盖应该只在limit=1的情况下获取,但IsSafeRangeBucket是已知的,永远不会覆盖任何键,所以范围是安全的. + +// ReadTx 负责读请求 +type ReadTx interface { + Lock() + Unlock() + RLock() + RUnlock() + UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error // 对指定的桶,所有k,v遍历 +} + +// baseReadTx的访问是并发的所以需要读写锁来保护. +type baseReadTx struct { + // 写事务执行End时候需要获取这个写锁然后把写事务的更新写到 baseReadTx 的buffer里面; + // 创建 concurrentReadTx 时候需要获取读锁因为需要拷贝buffer + mu sync.RWMutex // 保护 txReadBuffer 的访问 + buf txReadBuffer // 用于加速读效率的缓存 blot.db的记录 + txMu *sync.RWMutex // 保护下面的tx和buckets + tx *bolt.Tx // ? + buckets map[BucketID]*bolt.Bucket // 底层bolt.db 每个bucket 的引用 + txWg *sync.WaitGroup // txWg 保护 tx 在批处理间隔结束时不会被回滚直到使用此 tx 的所有读取完成. +} + +func (baseReadTx *baseReadTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + getDups := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return nil + } + visitNoDup := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := baseReadTx.buf.ForEach(bucket, getDups); err != nil { + return err + } + baseReadTx.txMu.Lock() + err := unsafeForEach(baseReadTx.tx, bucket, visitNoDup) + baseReadTx.txMu.Unlock() + if err != nil { + return err + } + return baseReadTx.buf.ForEach(bucket, visitor) +} + +// UnsafeRange 从blot.db 查找键值对 +func (baseReadTx *baseReadTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil || len(endKey) == 0 { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bucketType.IsSafeRangeBucket() { + panic("不要在非keys桶上使用unsafeRange") + } + // 首先从缓存中查询键值对 + keys, vals := baseReadTx.buf.Range(bucketType, key, endKey, limit) + // 检测缓存中返回的键值对是否达到Limit的要求如果达到Limit的指定上限直接返回缓存的查询结果 + if int64(len(keys)) == limit { + return keys, vals + } + + // 查找、创建桶 + bn := bucketType.ID() // key桶的ID是1 + baseReadTx.txMu.RLock() + bucket, ok := baseReadTx.buckets[bn] + baseReadTx.txMu.RUnlock() + lockHeld := false + if !ok { + baseReadTx.txMu.Lock() + lockHeld = true + bucket = baseReadTx.tx.Bucket(bucketType.Name()) // 创建一个桶 + baseReadTx.buckets[bn] = bucket + } + + // 忽略丢失的桶,因为可能已在此批处理中创建 + if bucket == nil { // 在等锁的时候,另外一个调用创建了该桶,低概率事件 + if lockHeld { + baseReadTx.txMu.Unlock() + } + return keys, vals + } + if !lockHeld { + baseReadTx.txMu.Lock() + } + c := bucket.Cursor() + baseReadTx.txMu.Unlock() + // 将查询缓存的结采与查询 BlotDB 的结果合并 然后返回 + k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) // 刨除在缓存中找到的,剩余的从bolt.db中查找 + return append(k2, keys...), append(v2, vals...) +} + +// 负责读请求 +type readTx struct { + baseReadTx +} + +func (rt *readTx) Lock() { rt.mu.Lock() } +func (rt *readTx) Unlock() { rt.mu.Unlock() } +func (rt *readTx) RLock() { rt.mu.RLock() } +func (rt *readTx) RUnlock() { rt.mu.RUnlock() } + +func (rt *readTx) reset() { + rt.buf.reset() + rt.buckets = make(map[BucketID]*bolt.Bucket) + rt.tx = nil + rt.txWg = new(sync.WaitGroup) +} + +type concurrentReadTx struct { + baseReadTx +} + +func (rt *concurrentReadTx) Lock() {} +func (rt *concurrentReadTx) Unlock() {} + +// RLock is no-op. concurrentReadTx does not need to be locked after it is created. +func (rt *concurrentReadTx) RLock() {} + +// RUnlock signals the end of concurrentReadTx. +func (rt *concurrentReadTx) RUnlock() { + rt.txWg.Done() +} diff --git a/server/storage/backend/testing/betesting.go b/etcd/mvcc/backend/testing/betesting.go similarity index 84% rename from server/storage/backend/testing/betesting.go rename to etcd/mvcc/backend/testing/betesting.go index e42908f9365..cde78b290bd 100644 --- a/server/storage/backend/testing/betesting.go +++ b/etcd/mvcc/backend/testing/betesting.go @@ -15,19 +15,18 @@ package betesting import ( - "os" + "io/ioutil" "path/filepath" "testing" "time" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" "github.com/stretchr/testify/assert" "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/storage/backend" ) func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Backend, string) { - dir, err := os.MkdirTemp(t.TempDir(), "etcd_backend_test") + dir, err := ioutil.TempDir(t.TempDir(), "etcd_backend_test") if err != nil { panic(err) } @@ -39,13 +38,13 @@ func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Bac // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(t testing.TB, batchInterval time.Duration, batchLimit int) (backend.Backend, string) { - bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t)) + bcfg := backend.DefaultBackendConfig() bcfg.BatchInterval, bcfg.BatchLimit = batchInterval, batchLimit return NewTmpBackendFromCfg(t, bcfg) } func NewDefaultTmpBackend(t testing.TB) (backend.Backend, string) { - return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig(zaptest.NewLogger(t))) + return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig()) } func Close(t testing.TB, b backend.Backend) { diff --git a/server/storage/backend/tx_buffer.go b/etcd/mvcc/backend/tx_buffer.go similarity index 81% rename from server/storage/backend/tx_buffer.go rename to etcd/mvcc/backend/tx_buffer.go index 779255b7320..47d587d6699 100644 --- a/server/storage/backend/tx_buffer.go +++ b/etcd/mvcc/backend/tx_buffer.go @@ -16,12 +16,13 @@ package backend import ( "bytes" + "fmt" "sort" ) const bucketBufferInitialSize = 512 -// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. +// txBuffer 处理txWriteBuffer和txReadBuffer之间共享的功能. type txBuffer struct { buckets map[BucketID]*bucketBuffer } @@ -49,20 +50,6 @@ func (txw *txWriteBuffer) put(bucket Bucket, k, v []byte) { txw.putInternal(bucket, k, v) } -func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) { - // TODO: Add (in tests?) verification whether k>b[len(b)] - txw.putInternal(bucket, k, v) -} - -func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) { - b, ok := txw.buckets[bucket.ID()] - if !ok { - b = newBucketBuffer() - txw.buckets[bucket.ID()] = b - } - b.add(k, v) -} - func (txw *txWriteBuffer) reset() { txw.txBuffer.reset() for k := range txw.bucket2seq { @@ -94,18 +81,9 @@ func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { txr.bufVersion++ } -// txReadBuffer accesses buffered updates. type txReadBuffer struct { txBuffer - // bufVersion is used to check if the buffer is modified recently - bufVersion uint64 -} - -func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - if b := txr.buckets[bucket.ID()]; b != nil { - return b.Range(key, endKey, limit) - } - return nil, nil + bufVersion uint64 // 用于检查缓存最新是否更新了 } func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error) error { @@ -115,65 +93,20 @@ func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error) return nil } -// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock() -func (txr *txReadBuffer) unsafeCopy() txReadBuffer { - txrCopy := txReadBuffer{ - txBuffer: txBuffer{ - buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)), - }, - bufVersion: 0, - } - for bucketName, bucket := range txr.txBuffer.buckets { - txrCopy.txBuffer.buckets[bucketName] = bucket.Copy() - } - return txrCopy -} - type kv struct { key []byte - val []byte + val string } -// bucketBuffer buffers key-value pairs that are pending commit. +// bucketBuffer 缓存了等待提交的k-v键值对 type bucketBuffer struct { - buf []kv - // used tracks number of elements in use so buf can be reused without reallocation. - used int -} - -func newBucketBuffer() *bucketBuffer { - return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0} -} - -func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { - f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } - idx := sort.Search(bb.used, f) - if idx < 0 || idx >= bb.used { - return nil, nil - } - if len(endKey) == 0 { - if bytes.Equal(key, bb.buf[idx].key) { - keys = append(keys, bb.buf[idx].key) - vals = append(vals, bb.buf[idx].val) - } - return keys, vals - } - if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { - return nil, nil - } - for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { - if bytes.Compare(endKey, bb.buf[i].key) <= 0 { - break - } - keys = append(keys, bb.buf[i].key) - vals = append(vals, bb.buf[i].val) - } - return keys, vals + buf []kv + used int // 跟踪使用中的元素数量,这样buf可以重用而不需要重新分配. } func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { for i := 0; i < bb.used; i++ { - if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { + if err := visitor(bb.buf[i].key, []byte(bb.buf[i].val)); err != nil { return err } } @@ -181,7 +114,7 @@ func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { } func (bb *bucketBuffer) add(k, v []byte) { - bb.buf[bb.used].key, bb.buf[bb.used].val = k, v + bb.buf[bb.used].key, bb.buf[bb.used].val = k, string(v) bb.used++ if bb.used == len(bb.buf) { buf := make([]kv, (3*len(bb.buf))/2) @@ -193,7 +126,7 @@ func (bb *bucketBuffer) add(k, v []byte) { // merge merges data from bbsrc into bb. func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { for i := 0; i < bbsrc.used; i++ { - bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) + bb.add(bbsrc.buf[i].key, []byte(bbsrc.buf[i].val)) } if bb.used == bbsrc.used { return @@ -229,3 +162,76 @@ func (bb *bucketBuffer) Copy() *bucketBuffer { copy(bbCopy.buf, bb.buf) return &bbCopy } + +// unsafeCopy 读缓冲区拷贝 +func (txr *txReadBuffer) unsafeCopy() txReadBuffer { + txrCopy := txReadBuffer{ + txBuffer: txBuffer{ + buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)), + }, + bufVersion: 0, + } + for bucketName, bucket := range txr.txBuffer.buckets { + txrCopy.txBuffer.buckets[bucketName] = bucket.Copy() + } + return txrCopy +} + +func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { + f := func(i int) bool { + return bytes.Compare(bb.buf[i].key, key) >= 0 + } + idx := sort.Search(bb.used, f) // 找到第一个返回TRUE的索引 + if idx < 0 { // 没招到 + return nil, nil + } + if len(endKey) == 0 { + if bytes.Equal(key, bb.buf[idx].key) { + keys = append(keys, bb.buf[idx].key) + vals = append(vals, []byte(bb.buf[idx].val)) + } + fmt.Println(fmt.Sprintf("---->get %s:%s", string(bb.buf[idx].key), bb.buf[idx].val)) + return keys, vals + } + // 缓存中没有对应的key + // bb.buf[idx].key > endKey + if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { + return nil, nil + } + for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { + // bb.buf[idx].key > endKey + if bytes.Compare(endKey, bb.buf[i].key) <= 0 { + break + } + keys = append(keys, bb.buf[i].key) + vals = append(vals, []byte(bb.buf[i].val)) + } + return keys, vals +} + +// Range OK +func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if b := txr.buckets[bucket.ID()]; b != nil { + return b.Range(key, endKey, limit) + } + return nil, nil +} + +// 将k,v写入到db之后,会写入到缓存中 +func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) { + txw.putInternal(bucket, k, v) +} + +// 创建缓存结构 +func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) { + b, ok := txw.buckets[bucket.ID()] + if !ok { + b = newBucketBuffer() + txw.buckets[bucket.ID()] = b + } + b.add(k, v) +} + +func newBucketBuffer() *bucketBuffer { // 512 + return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0} +} diff --git a/etcd/mvcc/buckets/over_bucket.go b/etcd/mvcc/buckets/over_bucket.go new file mode 100644 index 00000000000..dfc649912c1 --- /dev/null +++ b/etcd/mvcc/buckets/over_bucket.go @@ -0,0 +1,62 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buckets + +import ( + "bytes" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" +) + +var ( + Key = backend.Bucket(bucket{id: 1, name: []byte("key"), safeRangeBucket: true}) + Meta = backend.Bucket(bucket{id: 2, name: []byte("meta"), safeRangeBucket: false}) + Lease = backend.Bucket(bucket{id: 3, name: []byte("lease"), safeRangeBucket: false}) + Alarm = backend.Bucket(bucket{id: 4, name: []byte("alarm"), safeRangeBucket: false}) + Cluster = backend.Bucket(bucket{id: 5, name: []byte("cluster"), safeRangeBucket: false}) + + Members = backend.Bucket(bucket{id: 10, name: []byte("members"), safeRangeBucket: false}) + MembersRemoved = backend.Bucket(bucket{id: 11, name: []byte("members_removed"), safeRangeBucket: false}) + + Auth = backend.Bucket(bucket{id: 20, name: []byte("auth"), safeRangeBucket: false}) + AuthUsers = backend.Bucket(bucket{id: 21, name: []byte("authUsers"), safeRangeBucket: false}) + AuthRoles = backend.Bucket(bucket{id: 22, name: []byte("authRoles"), safeRangeBucket: false}) + + Test = backend.Bucket(bucket{id: 100, name: []byte("test"), safeRangeBucket: false}) +) + +type bucket struct { + id backend.BucketID + name []byte + safeRangeBucket bool +} + +func (b bucket) ID() backend.BucketID { return b.id } +func (b bucket) Name() []byte { return b.name } +func (b bucket) String() string { return string(b.Name()) } +func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket } + +var ( + MetaConsistentIndexKeyName = []byte("consistent_index") + MetaTermKeyName = []byte("term") +) + +// DefaultIgnores 定义在哈希检查中要忽略的桶和键. +func DefaultIgnores(bucket, key []byte) bool { + // consistent index & term might be changed due to v2 internal sync, which + // is not controllable by the user. + return bytes.Compare(bucket, Meta.Name()) == 0 && + (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0) +} diff --git a/etcd/mvcc/index.go b/etcd/mvcc/index.go new file mode 100644 index 00000000000..b018016bcf2 --- /dev/null +++ b/etcd/mvcc/index.go @@ -0,0 +1,291 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + + "github.com/google/btree" + "go.uber.org/zap" +) + +type index interface { + Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) + Range(key, end []byte, atRev int64) ([][]byte, []revision) + Revisions(key, end []byte, atRev int64, limit int) ([]revision, int) + CountRevisions(key, end []byte, atRev int64) int + Put(key []byte, rev revision) + Tombstone(key []byte, rev revision) error + RangeSince(key, end []byte, rev int64) []revision + Compact(rev int64) map[revision]struct{} + Keep(rev int64) map[revision]struct{} + Equal(b index) bool + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex +} + +type treeIndex struct { + sync.RWMutex + tree *btree.BTree + lg *zap.Logger +} + +func newTreeIndex(lg *zap.Logger) index { + return &treeIndex{ + tree: btree.New(32), + lg: lg, + } +} + +func (ti *treeIndex) Put(key []byte, rev revision) { + keyi := &keyIndex{Key: string(key)} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + keyi.put(ti.lg, rev.Main, rev.Sub) + ti.tree.ReplaceOrInsert(keyi) + return + } + okeyi := item.(*keyIndex) + okeyi.put(ti.lg, rev.Main, rev.Sub) + marshal, _ := json.Marshal(okeyi) + fmt.Println(string(marshal)) +} + +// 遍历 +func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex) bool) { + keyi, endi := &keyIndex{Key: string(key)}, &keyIndex{Key: string(end)} + + ti.RLock() + defer ti.RUnlock() + // 对树中[pivot, last]范围内的每个值调用迭代器,直到迭代器返回false. + // 假如获取前缀为b 那么结束就是c , 因为是自增的 + + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.Key) > 0 && !item.Less(endi) { + return false + } + fmt.Println("keyIndex ---->:", item.(*keyIndex).Key) + if !f(item.(*keyIndex)) { + return false + } + return true + }) +} + +func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int { + if end == nil || len(end) == 0 { + _, _, _, err := ti.Get(key, atRev) + if err != nil { + return 0 + } + return 1 + } + total := 0 + ti.visit(key, end, func(ki *keyIndex) bool { + if _, _, _, err := ki.get(ti.lg, atRev); err == nil { + total++ + } + return true + }) + return total +} + +func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { + if end == nil || len(end) == 0 { + rev, _, _, err := ti.Get(key, atRev) + if err != nil { + return nil, nil + } + return [][]byte{key}, []revision{rev} + } + ti.visit(key, end, func(ki *keyIndex) bool { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { + revs = append(revs, rev) + keys = append(keys, []byte(ki.Key)) + } + return true + }) + return keys, revs +} + +func (ti *treeIndex) Tombstone(key []byte, rev revision) error { + keyi := &keyIndex{Key: string(key)} + + ti.Lock() + defer ti.Unlock() + item := ti.tree.Get(keyi) + if item == nil { + return ErrRevisionNotFound + } + + ki := item.(*keyIndex) + return ki.tombstone(ti.lg, rev.Main, rev.Sub) +} + +// RangeSince returns all revisions from Key(including) to end(excluding) +// at or after the given rev. The returned slice is sorted in the order +// of revision. +func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { + keyi := &keyIndex{Key: string(key)} + + ti.RLock() + defer ti.RUnlock() + + if end == nil || len(end) == 0 { + item := ti.tree.Get(keyi) + if item == nil { + return nil + } + keyi = item.(*keyIndex) + return keyi.since(ti.lg, rev) + } + + endi := &keyIndex{Key: string(end)} + var revs []revision + ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { + if len(endi.Key) > 0 && !item.Less(endi) { + return false + } + curKeyi := item.(*keyIndex) + revs = append(revs, curKeyi.since(ti.lg, rev)...) + return true + }) + sort.Sort(revisions(revs)) + + return revs +} + +func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { + available := make(map[revision]struct{}) + ti.lg.Info("compact tree index", zap.Int64("revision", rev)) + ti.Lock() + // 为了避免压缩工作影响读写性能, + clone := ti.tree.Clone() + ti.Unlock() + + clone.Ascend(func(item btree.Item) bool { + keyi := item.(*keyIndex) + // Lock is needed here to prevent modification to the keyIndex while + // compaction is going on or revision added to empty before deletion + ti.Lock() + keyi.compact(ti.lg, rev, available) + if keyi.isEmpty() { + item := ti.tree.Delete(keyi) + if item == nil { + ti.lg.Panic("failed to delete during compaction") + } + } + ti.Unlock() + return true + }) + return available +} + +// Keep 查找在给定版本之后的所有修订. +func (ti *treeIndex) Keep(rev int64) map[revision]struct{} { + available := make(map[revision]struct{}) + ti.RLock() + defer ti.RUnlock() + ti.tree.Ascend(func(i btree.Item) bool { + keyi := i.(*keyIndex) + keyi.keep(rev, available) + return true + }) + return available +} + +func (ti *treeIndex) Equal(bi index) bool { + b := bi.(*treeIndex) + + if ti.tree.Len() != b.tree.Len() { + return false + } + + equal := true + + ti.tree.Ascend(func(item btree.Item) bool { + aki := item.(*keyIndex) + bki := b.tree.Get(item).(*keyIndex) + if !aki.equal(bki) { + equal = false + return false + } + return true + }) + + return equal +} + +// ---------------------------------------- OVER -------------------------------------------------------------- + +// Get 获取某个key的某个版本的索引号 , +func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { + keyi := &keyIndex{Key: string(key)} + ti.RLock() + defer ti.RUnlock() + // 判断key 在不在btree里 + if keyi = ti.keyIndex(keyi); keyi == nil { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + return keyi.get(ti.lg, atRev) // 获取修订版本 +} + +// Revisions 获取所有修正版本 +func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []revision, total int) { + if end == nil || len(end) == 0 { + rev, _, _, err := ti.Get(key, atRev) + if err != nil { + return nil, 0 + } + return []revision{rev}, 1 + } + // 指定了end + ti.visit(key, end, func(ki *keyIndex) bool { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { + if limit <= 0 || len(revs) < limit { + revs = append(revs, rev) + } + total++ + } + return true + }) + return revs, total +} + +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +// 判断有没这个key, +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + +func (ti *treeIndex) Insert(ki *keyIndex) { + ti.Lock() + defer ti.Unlock() + ti.tree.ReplaceOrInsert(ki) +} diff --git a/etcd/mvcc/key_index.go b/etcd/mvcc/key_index.go new file mode 100644 index 00000000000..bb4eec7b6af --- /dev/null +++ b/etcd/mvcc/key_index.go @@ -0,0 +1,337 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "errors" + "fmt" + "strings" + + "github.com/google/btree" + "go.uber.org/zap" +) + +var ErrRevisionNotFound = errors.New("mvcc: 修订版本没有找到") + +// keyIndex +// key的删除操作将在末尾追加删除版本 +// 当前代,并创建一个新的空代. +type keyIndex struct { + Key string // Key + Modified revision // 一个key 最新修改的revision . + Generations []generation // 每次新建都会创建一个,删除然后新建也会生成一个 +} + +// generation 包含一个key的多个版本. +type generation struct { + VersionCount int64 // 记录对当前key 有几个版本 + Created revision // 第一次创建时的索引信息 + Revs []revision // 当值存在以后,对该值的修改记录 +} + +type revision struct { + Main int64 // 一个全局递增的主版本号,随put/txn/delete事务递增,一个事务内的key main版本号是一致的 + Sub int64 // 一个事务内的子版本号,从0开始随事务内put/delete操作递增 +} + +func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) { + if len(ki.Generations) != 0 { + lg.Panic( + "'restore' got an unexpected non-empty Generations", + zap.Int("Generations-size", len(ki.Generations)), + ) + } + ki.Modified = modified + g := generation{Created: created, VersionCount: ver, Revs: []revision{modified}} + ki.Generations = append(ki.Generations, g) +} + +// tombstone puts a revision, pointing to a tombstone, to the keyIndex. +// It also creates a new empty generation in the keyIndex. +// It returns ErrRevisionNotFound when tombstone on an empty generation. +func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { + // 当然如果 keyIndex 中的最大版本号被打了删除标记 (tombstone), 就会从 treeIndex 中删除这个 keyIndex,否则会出现内存泄露. + if ki.isEmpty() { + lg.Panic( + "'tombstone' got an unexpected empty keyIndex", + zap.String("Key", string(ki.Key)), + ) + } + if ki.Generations[len(ki.Generations)-1].isEmpty() { + return ErrRevisionNotFound + } + ki.put(lg, main, sub) + ki.Generations = append(ki.Generations, generation{}) + return nil +} + +// since returns revisions since the given rev. Only the revision with the +// largest Sub revision will be returned if multiple revisions have the same +// Main revision. +func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { + if ki.isEmpty() { + lg.Panic("'since' 得到一个意外的空keyIndex", zap.String("Key", ki.Key)) + } + since := revision{rev, 0} + var gi int + // find the Generations to start checking + for gi = len(ki.Generations) - 1; gi > 0; gi-- { + g := ki.Generations[gi] + if g.isEmpty() { + continue + } + if since.GreaterThan(g.Created) { + break + } + } + + var revs []revision + var last int64 + for ; gi < len(ki.Generations); gi++ { + for _, r := range ki.Generations[gi].Revs { + if since.GreaterThan(r) { + continue + } + if r.Main == last { + // replace the revision with a new one that has higher Sub value, + // because the original one should not be seen by external + revs[len(revs)-1] = r + continue + } + revs = append(revs, r) + last = r.Main + } + } + return revs +} + +// compact compacts a keyIndex by removing the versions with smaller or equal +// revision than the given atRev except the largest one (If the largest one is +// a tombstone, it will not be kept). +// If a generation becomes empty during compaction, it will be removed. +func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { + if ki.isEmpty() { + lg.Panic( + "'compact' got an unexpected empty keyIndex", + zap.String("Key", string(ki.Key)), + ) + } + + genIdx, revIndex := ki.doCompact(atRev, available) + + g := &ki.Generations[genIdx] + if !g.isEmpty() { + // remove the previous contents. + if revIndex != -1 { + g.Revs = g.Revs[revIndex:] + } + // remove any tombstone + if len(g.Revs) == 1 && genIdx != len(ki.Generations)-1 { + delete(available, g.Revs[0]) + genIdx++ + } + } + + // remove the previous Generations. + ki.Generations = ki.Generations[genIdx:] +} + +// keep finds the revision to be kept if compact is called at given atRev. +func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) { + if ki.isEmpty() { + return + } + + genIdx, revIndex := ki.doCompact(atRev, available) + g := &ki.Generations[genIdx] + if !g.isEmpty() { + // remove any tombstone + if revIndex == len(g.Revs)-1 && genIdx != len(ki.Generations)-1 { + delete(available, g.Revs[revIndex]) + } + } +} + +func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) { + // walk until reaching the first revision smaller or equal to "atRev", + // and add the revision to the available map + f := func(rev revision) bool { + if rev.Main <= atRev { + available[rev] = struct{}{} + return false + } + return true + } + + genIdx, g := 0, &ki.Generations[0] + // find first generation includes atRev or Created after atRev + for genIdx < len(ki.Generations)-1 { + if tomb := g.Revs[len(g.Revs)-1].Main; tomb > atRev { + break + } + genIdx++ + g = &ki.Generations[genIdx] + } + + revIndex = g.walk(f) + + return genIdx, revIndex +} + +// --------------------------------------------- OVER --------------------------------------------------------------- + +// get 获取满足给定atRev的键的修改、创建的revision和版本.Rev必须大于或等于给定的atRev. +func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { + if ki.isEmpty() { // 判断有没有修订版本 + lg.Panic("'get'得到一个意外的空keyIndex", zap.String("Key", ki.Key)) + } + g := ki.findGeneration(atRev) + if g.isEmpty() { + return revision{}, revision{}, 0, ErrRevisionNotFound + } + + n := g.walk(func(rev revision) bool { return rev.Main > atRev }) // 返回第一个小于等于 该修订版本的最新的索引 + if n != -1 { + return g.Revs[n], g.Created, g.VersionCount - int64(len(g.Revs)-n-1), nil + } + + return revision{}, revision{}, 0, ErrRevisionNotFound +} + +func (ki *keyIndex) Less(b btree.Item) bool { + return strings.Compare(ki.Key, b.(*keyIndex).Key) == -1 +} + +func (ki *keyIndex) equal(b *keyIndex) bool { + if !strings.EqualFold(ki.Key, b.Key) { + return false + } + if ki.Modified != b.Modified { + return false + } + if len(ki.Generations) != len(b.Generations) { + return false + } + for i := range ki.Generations { + ag, bg := ki.Generations[i], b.Generations[i] + if !ag.equal(bg) { + return false + } + } + return true +} + +func (ki *keyIndex) String() string { + var s string + for _, g := range ki.Generations { + s += g.String() + } + return s +} + +func (g *generation) isEmpty() bool { return g == nil || len(g.Revs) == 0 } + +// 遍历返回符合条件的索引,倒序遍历 +func (g *generation) walk(f func(rev revision) bool) int { + l := len(g.Revs) + for i := range g.Revs { + ok := f(g.Revs[l-i-1]) + if !ok { + return l - i - 1 + } + } + return -1 +} + +func (g generation) equal(b generation) bool { + if g.VersionCount != b.VersionCount { + return false + } + if len(g.Revs) != len(b.Revs) { + return false + } + + for i := range g.Revs { + ar, br := g.Revs[i], b.Revs[i] + if ar != br { + return false + } + } + return true +} + +func (g *generation) String() string { + return fmt.Sprintf("g: 创建[%d] 版本数[%d], 修订记录 %#v\n", g.Created, g.VersionCount, g.Revs) +} + +// OK +func (ki *keyIndex) isEmpty() bool { + // 只有一个历史版本,且 + return len(ki.Generations) == 1 && ki.Generations[0].isEmpty() +} + +// findGeneration 找到给定rev所属的keyIndex的生成.如果给定的rev在两代之间,这意味着在给定的rev上键不存在,它将返回nil. +// 如果修订版本是接下来要写的,就返回当前代 +func (ki *keyIndex) findGeneration(rev int64) *generation { + lastg := len(ki.Generations) - 1 + cg := lastg + // 倒着查找 + for cg >= 0 { + if len(ki.Generations[cg].Revs) == 0 { + cg-- + continue + } + g := ki.Generations[cg] + if cg != lastg { + // 每次生成的key的最新修订版本 + // 不是最新的一组,但最大的都比 rev + if tomb := g.Revs[len(g.Revs)-1].Main; tomb <= rev { + // tomb应该是删除的代数 + return nil + } + } + // 0 rev last + if g.Revs[0].Main <= rev { + // 找到对应修订版本 所属的gen版本 + return &ki.Generations[cg] + } + cg-- + } + return nil +} + +// put 将一个修订放到keyIndex中. +func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { + rev := revision{Main: main, Sub: sub} + if !rev.GreaterThan(ki.Modified) { + lg.Panic( + "'put'有一个意想不到的小修改", + zap.Int64("given-revision-Main", rev.Main), + zap.Int64("given-revision-Sub", rev.Sub), + zap.Int64("Modified-revision-Main", ki.Modified.Main), + zap.Int64("Modified-revision-Sub", ki.Modified.Sub), + ) + } + if len(ki.Generations) == 0 { + ki.Generations = append(ki.Generations, generation{}) + } + g := &ki.Generations[len(ki.Generations)-1] + if len(g.Revs) == 0 { // create a new Key + g.Created = rev + } + g.Revs = append(g.Revs, rev) + g.VersionCount++ + ki.Modified = rev +} diff --git a/etcd/mvcc/kv2.go b/etcd/mvcc/kv2.go new file mode 100644 index 00000000000..c61a9e90d4b --- /dev/null +++ b/etcd/mvcc/kv2.go @@ -0,0 +1 @@ +package mvcc diff --git a/etcd/mvcc/kvstore.go b/etcd/mvcc/kvstore.go new file mode 100644 index 00000000000..e8a91b03955 --- /dev/null +++ b/etcd/mvcc/kvstore.go @@ -0,0 +1,537 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "context" + "errors" + "fmt" + "hash/crc32" + "math" + "sync" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/schedule" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + + "go.uber.org/zap" +) + +var ( + scheduledCompactKeyName = []byte("scheduledCompactRev") + finishedCompactKeyName = []byte("finishedCompactRev") + + ErrCompacted = errors.New("mvcc: 指定的修订版本已被压缩") + ErrFutureRev = errors.New("mvcc: 指定的修订版本还没有") +) + +const ( + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +var ( + restoreChunkKeys = 10000 // non-const for testing + defaultCompactBatchLimit = 1000 +) + +type StoreConfig struct { + CompactionBatchLimit int +} + +type store struct { + ReadView + WriteView + cfg StoreConfig + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex + b backend.Backend + kvindex index + le lease.Lessor // 租约管理器 + revMu sync.RWMutex // 保护currentRev和compactMainRev + currentRev int64 // 是最后一个已完成事务的修订 + compactMainRev int64 + fifoSched schedule.Scheduler + stopc chan struct{} + lg *zap.Logger +} + +// NewStore returns a new store. It is useful to create a store inside +// mvcc pkg. It should only be used for testing externally. +func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store { + if lg == nil { + lg = zap.NewNop() + } + if cfg.CompactionBatchLimit == 0 { + cfg.CompactionBatchLimit = defaultCompactBatchLimit + } + s := &store{ + cfg: cfg, + b: b, + kvindex: newTreeIndex(lg), + + le: le, + + currentRev: 1, + compactMainRev: -1, + + fifoSched: schedule.NewFIFOScheduler(), + + stopc: make(chan struct{}), + + lg: lg, + } + s.ReadView = &readView{s} + s.WriteView = &writeView{s} + if s.le != nil { + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) }) + } + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafeCreateBucket(buckets.Key) + tx.UnsafeCreateBucket(buckets.Meta) + tx.Unlock() + s.b.ForceCommit() + + s.mu.Lock() + defer s.mu.Unlock() + if err := s.restore(); err != nil { + // TODO: return the error instead of panic here? + panic("failed to recover store from backend") + } + + return s +} + +// 返回读事务、 并发读,串行读 +func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead { + s.mu.RLock() + s.revMu.RLock() + // 对于只读的工作负载,我们通过复制事务读缓冲区来使用共享缓冲区提高并发性 + // 对于写/写/读事务,我们使用共享缓冲区 + // 而不是复制事务读缓冲区,以避免事务开销. + var tx backend.ReadTx + if mode == ConcurrentReadTxMode { + tx = s.b.ConcurrentReadTx() + } else { + tx = s.b.ReadTx() + } + + tx.RLock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return &storeTxnRead{s, tx, firstRev, rev, trace} +} + +func (s *store) Write(trace *traceutil.Trace) TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: storeTxnRead{s, tx, 0, 0, trace}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return tw +} + +func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { + if ctx == nil || ctx.Err() != nil { + select { + case <-s.stopc: + default: + // fix deadlock in mvcc,for more information, please refer to pr 11817. + // s.stopc is only updated in restore operation, which is called by apply + // snapshot call, compaction and apply snapshot requests are serialized by + // raft, and do not happen at the same time. + s.mu.Lock() + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + s.mu.Unlock() + } + return + } + close(ch) +} + +// Hash OK +func (s *store) Hash() (hash uint32, revision int64, err error) { + // TODO: hash和revision可能不一致,一个可能的解决方案是在函数的开头添加s.revMu.RLock(),这是昂贵的 + s.b.ForceCommit() + h, err := s.b.Hash(buckets.DefaultIgnores) + + return h, s.currentRev, err +} + +// HashByRev 计算所有MVCC修订到给定修订的哈希值. +func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) { + s.mu.RLock() + s.revMu.RLock() + compactRev, currentRev = s.compactMainRev, s.currentRev + s.revMu.RUnlock() + + if rev > 0 && rev <= compactRev { + s.mu.RUnlock() + return 0, 0, compactRev, ErrCompacted + } else if rev > 0 && rev > currentRev { + s.mu.RUnlock() + return 0, currentRev, 0, ErrFutureRev + } + + if rev == 0 { + rev = currentRev + } + keep := s.kvindex.Keep(rev) + + tx := s.b.ReadTx() + tx.RLock() + defer tx.RUnlock() + s.mu.RUnlock() + + upper := revision{Main: rev + 1} + lower := revision{Main: compactRev + 1} + h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + + h.Write(buckets.Key.Name()) + err = tx.UnsafeForEach(buckets.Key, func(k, v []byte) error { + kr := bytesToRev(k) + if !upper.GreaterThan(kr) { + return nil + } + // skip revisions that are scheduled for deletion + // due to compacting; don't skip if there isn't one. + if lower.GreaterThan(kr) && len(keep) > 0 { + if _, ok := keep[kr]; !ok { + return nil + } + } + h.Write(k) + h.Write(v) + return nil + }) + hash = h.Sum32() + + return hash, currentRev, compactRev, err +} + +func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) { + s.revMu.Lock() + if rev <= s.compactMainRev { + ch := make(chan struct{}) + f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } + s.fifoSched.Schedule(f) + s.revMu.Unlock() + return ch, ErrCompacted + } + if rev > s.currentRev { + s.revMu.Unlock() + return nil, ErrFutureRev + } + + s.compactMainRev = rev + + rbytes := newRevBytes() + revToBytes(revision{Main: rev}, rbytes) + + tx := s.b.BatchTx() + tx.Lock() + tx.UnsafePut(buckets.Meta, scheduledCompactKeyName, rbytes) + tx.Unlock() + // ensure that desired compaction is persisted + s.b.ForceCommit() + + s.revMu.Unlock() + + return nil, nil +} + +func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { + ch := make(chan struct{}) + j := func(ctx context.Context) { + if ctx.Err() != nil { + s.compactBarrier(ctx, ch) + return + } + keep := s.kvindex.Compact(rev) + if !s.scheduleCompaction(rev, keep) { // 删除bolt.db中旧版本 + s.compactBarrier(context.TODO(), ch) + return + } + close(ch) + } + + s.fifoSched.Schedule(j) + trace.Step("schedule compaction") + return ch, nil +} + +func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) { + ch, err := s.updateCompactRev(rev) + if err != nil { + return ch, err + } + + return s.compact(traceutil.TODO(), rev) +} + +func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { + s.mu.Lock() + + ch, err := s.updateCompactRev(rev) + trace.Step("check and update compact revision") + if err != nil { + s.mu.Unlock() + return ch, err + } + s.mu.Unlock() + + return s.compact(trace, rev) // Compact +} + +func (s *store) Commit() { + s.mu.Lock() + defer s.mu.Unlock() + s.b.ForceCommit() +} + +func (s *store) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + + close(s.stopc) + s.fifoSched.Stop() + + s.b = b + s.kvindex = newTreeIndex(s.lg) + + { + // During restore the metrics might report 'special' values + s.revMu.Lock() + s.currentRev = 1 + s.compactMainRev = -1 + s.revMu.Unlock() + } + + s.fifoSched = schedule.NewFIFOScheduler() + s.stopc = make(chan struct{}) + + return s.restore() +} + +func (s *store) restore() error { + min, max := newRevBytes(), newRevBytes() + revToBytes(revision{Main: 1}, min) + revToBytes(revision{Main: math.MaxInt64, Sub: math.MaxInt64}, max) + + keyToLease := make(map[string]lease.LeaseID) + + // restore index + tx := s.b.BatchTx() + tx.Lock() + + _, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, finishedCompactKeyName, nil, 0) + if len(finishedCompactBytes) != 0 { + s.revMu.Lock() + s.compactMainRev = bytesToRev(finishedCompactBytes[0]).Main + + s.lg.Info( + "restored last compact revision", + zap.Stringer("meta-bucket-name", buckets.Meta), + zap.String("meta-bucket-name-Key", string(finishedCompactKeyName)), + zap.Int64("restored-compact-revision", s.compactMainRev), + ) + s.revMu.Unlock() + } + _, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).Main + } + + // index keys concurrently as they're loaded in from tx + rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) + for { + keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break + } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(s.lg, rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break + } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.Sub++ + revToBytes(newMin, min) + } + close(rkvc) + + { + s.revMu.Lock() + s.currentRev = <-revc + + // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. + // the correct revision should be set to compaction revision in the case, not the largest revision + // we have seen. + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + s.revMu.Unlock() + } + + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 + } + + for key, lid := range keyToLease { + if s.le == nil { + tx.Unlock() + panic("no lessor to attach lease") + } + err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) + if err != nil { + s.lg.Error( + "failed to attach a lease", + zap.String("lease-id", fmt.Sprintf("%016x", lid)), + zap.Error(err), + ) + } + } + + tx.Unlock() + + s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev)) + + if scheduledCompact != 0 { + if _, err := s.compactLockfree(scheduledCompact); err != nil { + s.lg.Warn("compaction encountered error", zap.Error(err)) + } + + s.lg.Info( + "resume scheduled compaction", + zap.Stringer("meta-bucket-name", buckets.Meta), + zap.String("meta-bucket-name-Key", string(scheduledCompactKeyName)), + zap.Int64("scheduled-compact-revision", scheduledCompact), + ) + } + + return nil +} + +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{Key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.Main + if ok { + if isTombstone(rkv.key) { + if err := ki.tombstone(lg, rev.Main, rev.Sub); err != nil { + lg.Warn("tombstone encountered error", zap.Error(err)) + } + continue + } + ki.put(lg, rev.Main, rev.Sub) + } else if !isTombstone(rkv.key) { + ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + +func (s *store) Close() error { + close(s.stopc) + s.fifoSched.Stop() + return nil +} + +// appendMarkTombstone appends tombstone mark to normal revision bytes. +func appendMarkTombstone(lg *zap.Logger, b []byte) []byte { + if len(b) != revBytesLen { + lg.Panic( + "cannot append tombstone mark to non-normal revision bytes", + zap.Int("expected-revision-bytes-size", revBytesLen), + zap.Int("given-revision-bytes-size", len(b)), + ) + } + return append(b, markTombstone) +} + +// isTombstone checks whether the revision bytes is a tombstone. +func isTombstone(b []byte) bool { + return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone +} diff --git a/etcd/mvcc/kvstore_compaction.go b/etcd/mvcc/kvstore_compaction.go new file mode 100644 index 00000000000..0a655ca0ec0 --- /dev/null +++ b/etcd/mvcc/kvstore_compaction.go @@ -0,0 +1,77 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "encoding/binary" + "time" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "go.uber.org/zap" +) + +// scheduleCompaction 任务遍历、删除 Key 的过程可能会对 boltdb 造成压力,为了不影响正常读写请求,它在执行过程中会通过参数控制每次遍历、 +// 删除的 Key 数(默认为 100,每批间隔 10ms),分批完成 boltdb Key 的删除操作. +func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { + totalStart := time.Now() + keyCompactions := 0 + + end := make([]byte, 8) + binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) + + last := make([]byte, 8+1+8) + for { + var rev revision + + tx := s.b.BatchTx() + tx.Lock() + keys, _ := tx.UnsafeRange(buckets.Key, last, end, int64(s.cfg.CompactionBatchLimit)) + for _, key := range keys { + rev = bytesToRev(key) + if _, ok := keep[rev]; !ok { + tx.UnsafeDelete(buckets.Key, key) + keyCompactions++ + } + } + + if len(keys) < s.cfg.CompactionBatchLimit { + rbytes := make([]byte, 8+1+8) + revToBytes(revision{Main: compactMainRev}, rbytes) + tx.UnsafePut(buckets.Meta, finishedCompactKeyName, rbytes) + tx.Unlock() + s.lg.Info( + "finished scheduled compaction", + zap.Int64("compact-revision", compactMainRev), + zap.Duration("took", time.Since(totalStart)), + ) + return true + } + + // update last + revToBytes(revision{Main: rev.Main, Sub: rev.Sub + 1}, last) + tx.Unlock() + // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer + s.b.ForceCommit() + + select { + case <-time.After(10 * time.Millisecond): + case <-s.stopc: + return false + } + } +} + +// 当我们通过 boltdb 删除大量的 Key,在事务提交后 B+ tree 经过分裂、平衡,会释放出若干 branch/leaf page 页面,然而 boltdb 并不会将其释放给磁盘, +// 调整 db 大小操作是昂贵的,会对性能有较大的损害. diff --git a/etcd/mvcc/kvstore_txn.go b/etcd/mvcc/kvstore_txn.go new file mode 100644 index 00000000000..c9ec85b1c90 --- /dev/null +++ b/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,68 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + firstRev int64 + rev int64 // 总的修订版本 + trace *traceutil.Trace +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } + +func (tr *storeTxnRead) Rev() int64 { + return tr.rev +} + +func (tr *storeTxnRead) End() { + tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx. + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + storeTxnRead + tx backend.BatchTx + beginRev int64 // 是TXN开始时的修订版本;它将写到下次修订. + changes []mvccpb.KeyValue // 写事务接收到的k,v 包含修订版本数据 +} + +func (tw *storeTxnWrite) Rev() int64 { + return tw.beginRev +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } + +// End 主要是用来解锁 +func (tw *storeTxnWrite) End() { + // 只有在Txn修改了Mvcc状态时才会更新索引. + if len(tw.changes) != 0 { + // 保持revMu锁,以防止新的读Txns打开,直到写回. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} diff --git a/etcd/mvcc/over_kv.go b/etcd/mvcc/over_kv.go new file mode 100644 index 00000000000..7620d2a9d4c --- /dev/null +++ b/etcd/mvcc/over_kv.go @@ -0,0 +1,86 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// RangeOptions 请求参数 +type RangeOptions struct { + Limit int64 // 用户限制的数据量 + Rev int64 // 指定的修订版本 + Count bool // 是否统计修订版本数 +} + +// RangeResult 响应 +type RangeResult struct { + KVs []mvccpb.KeyValue + Rev int64 // 最新的修订版本 + Count int // 统计当前的 修订版本数 +} + +type ReadView interface { + // FirstRev + // before cur + // compact + // rev rev rev + FirstRev() int64 // 在打开txn时返回第一个KV修订.在压实之后,第一个修订增加到压实修订. + Rev() int64 // 在打开txn时返回KV的修订. + Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) // 读取数据 +} + +// TxnRead 只读事务,不会锁住其他只读事务 +type TxnRead interface { + ReadView + End() // 标记事务已完成 并且准备提交 +} + +type WriteView interface { + DeleteRange(key, end []byte) (n, rev int64) // 删除指定范围的数据 + // Put 将给定的k v放入存储区.Put还接受额外的参数lease,将lease作为元数据附加到键值对上.KV实现 不验证租约id. + // put还会增加存储的修订版本,并在事件历史中生成一个事件.返回的修订版本是执行操作时KV的当前修订版本. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +type TxnWrite interface { + TxnRead + WriteView + // Changes 获取打开write txn后所做的更改. + Changes() []mvccpb.KeyValue +} + +// txnReadWrite 读事务-->写事务,对任何写操作都感到恐慌. +type txnReadWrite struct { + TxnRead +} + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type ReadTxMode uint32 + +const ( + ConcurrentReadTxMode = ReadTxMode(1) // 缓冲区拷贝,提高性能 并发ReadTx模式 + SharedBufReadTxMode = ReadTxMode(2) +) diff --git a/etcd/mvcc/over_kv_del.go b/etcd/mvcc/over_kv_del.go new file mode 100644 index 00000000000..ce4b8c75d1b --- /dev/null +++ b/etcd/mvcc/over_kv_del.go @@ -0,0 +1,78 @@ +package mvcc + +import ( + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + "go.uber.org/zap" +) + +// DeleteRange 1 +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write(traceutil.TODO()) + defer tw.End() + + return tw.(*storeTxnWrite).DeleteRange(key, end) +} + +// DeleteRange 2 +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, tw.beginRev + 1 + } + return 0, tw.beginRev +} + +// 从k,v index中删除 +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev++ + } + keys, _ := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for _, key := range keys { + tw.delete(key) + } // 4.20 号 + return int64(len(keys)) +} + +// bolt.db 删除数据 +func (tw *storeTxnWrite) delete(key []byte) { + indexBytes := newRevBytes() + idxRev := revision{Main: tw.beginRev + 1, Sub: int64(len(tw.changes))} + revToBytes(idxRev, indexBytes) + + indexBytes = appendMarkTombstone(tw.storeTxnRead.s.lg, indexBytes) + + kv := mvccpb.KeyValue{Key: string(key)} + + d, err := kv.Marshal() + if err != nil { + tw.storeTxnRead.s.lg.Fatal("序列化失败 mvccpb.KeyValue", zap.Error(err)) + } + + tw.tx.UnsafeSeqPut(buckets.Key, indexBytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to tombstone an existing Key", + zap.String("Key", string(key)), + zap.Error(err), + ) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + tw.storeTxnRead.s.lg.Error("未能从key上分离出旧租约", zap.Error(err)) + } + } +} diff --git a/etcd/mvcc/over_kv_get.go b/etcd/mvcc/over_kv_get.go new file mode 100644 index 00000000000..e507bf46865 --- /dev/null +++ b/etcd/mvcc/over_kv_get.go @@ -0,0 +1,77 @@ +package mvcc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "go.uber.org/zap" +) + +// Range OK +func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(ctx, key, end, tr.Rev(), ro) +} + +func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(ctx, key, end, rev, ro) +} + +// OK +func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev // 指定修订版本 + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + if ro.Count { // 是否统计修订版本数 + total := tr.s.kvindex.CountRevisions(key, end, rev) + tr.trace.Step("从内存索引树中统计修订数") + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + // 获取版本数据 + revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) + tr.trace.Step("从内存索引树中获取指定范围的keys") + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + + limit := int(ro.Limit) + if limit <= 0 || limit > len(revpairs) { + limit = len(revpairs) // 实际收到的数据量 + } + + kvs := make([]mvccpb.KeyValue, limit) + revBytes := newRevBytes() // len 为17的数组 + // 拿着索引数据去bolt.db 查数据 + for i, revpair := range revpairs[:len(kvs)] { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + revToBytes(revpair, revBytes) + // 根据修订版本获取数据 + _, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0) + if len(vs) != 1 { + tr.s.lg.Fatal("Range找不到修订对", zap.Int64("revision-Main", revpair.Main), zap.Int64("revision-Sub", revpair.Sub)) + } + if err := kvs[i].Unmarshal([]byte(vs[0])); err != nil { + tr.s.lg.Fatal( + "反序列失败 mvccpb.KeyValue", + zap.Error(err), + ) + } + } + tr.trace.Step("从bolt.db 中range Key") + return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil +} diff --git a/etcd/mvcc/over_kv_interface.go b/etcd/mvcc/over_kv_interface.go new file mode 100644 index 00000000000..87be597ad80 --- /dev/null +++ b/etcd/mvcc/over_kv_interface.go @@ -0,0 +1,28 @@ +package mvcc + +import ( + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type KV interface { + ReadView + WriteView + Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead // 创建读事务 + Write(trace *traceutil.Trace) TxnWrite // 创建写事务 + Hash() (hash uint32, revision int64, err error) // 计算kv存储的hash值 + HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error) // 计算所有MVCC修订到给定修订的哈希值. + Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) // 释放所有被替换的修订数小于rev的键. + Commit() // 将未完成的TXNS提交到底层后端. + Restore(b backend.Backend) error + Close() error +} + +type WatchableKV interface { + KV + Watchable +} + +type Watchable interface { + NewWatchStream() WatchStream +} diff --git a/etcd/mvcc/over_kv_put.go b/etcd/mvcc/over_kv_put.go new file mode 100644 index 00000000000..c0899ba4b0c --- /dev/null +++ b/etcd/mvcc/over_kv_put.go @@ -0,0 +1,74 @@ +package mvcc + +import ( + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "go.uber.org/zap" +) + +// OK +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 // 事务开始时有一个ID,写这个操作,对应的ID应+1 + c := rev + oldLease := lease.NoLease + + // 如果该键之前存在,使用它之前创建的并获取它之前的leaseID + _, created, beforeVersion, err := tw.s.kvindex.Get(key, rev) // 0,0,nil <= rev的最新修改 + if err == nil { + c = created.Main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + tw.trace.Step("获取键先前的created_revision和leaseID") + } + indexBytes := newRevBytes() + idxRev := revision{Main: rev, Sub: int64(len(tw.changes))} // 当前请求的修订版本 + revToBytes(idxRev, indexBytes) + + kv := mvccpb.KeyValue{ + Key: string(key), + Value: string(value), + CreateRevision: c, // 当前代,创建时的修订版本 + ModRevision: rev, // 修订版本 + Version: beforeVersion + 1, // Version是key的版本.删除键会将该键的版本重置为0,对键的任何修改都会增加它的版本. + Lease: int64(leaseID), // 租约ID + } + + d, err := kv.Marshal() + if err != nil { + tw.storeTxnRead.s.lg.Fatal("序列化失败 mvccpb.KeyValue", zap.Error(err)) + } + + tw.trace.Step("序列化 mvccpb.KeyValue") + tw.tx.UnsafeSeqPut(buckets.Key, indexBytes, d) // ✅ 写入db,buf + _ = (&treeIndex{}).Put + tw.s.kvindex.Put(key, idxRev) // 当前请求的修订版本 + tw.changes = append(tw.changes, kv) + tw.trace.Step("存储键值对到bolt.db") + + // 如果用户没穿,就是 NoLease + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("没找到租约") + } + // 分离旧的租约 + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + tw.storeTxnRead.s.lg.Error("从key中分离旧的租约失败", zap.Error(err)) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("没找到租约") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("租约附加失败") + } + } + tw.trace.Step("附加租约到key") +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return tw.beginRev + 1 +} diff --git a/etcd/mvcc/over_kv_view.go b/etcd/mvcc/over_kv_view.go new file mode 100644 index 00000000000..250e472bcf1 --- /dev/null +++ b/etcd/mvcc/over_kv_view.go @@ -0,0 +1,50 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "context" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) // 并发ReadTx模式 + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) + defer tr.End() + return tr.Range(ctx, key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write(traceutil.TODO()) + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/etcd/mvcc/over_revision.go b/etcd/mvcc/over_revision.go new file mode 100644 index 00000000000..f3618377670 --- /dev/null +++ b/etcd/mvcc/over_revision.go @@ -0,0 +1,53 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import "encoding/binary" + +// revBytesLen 正常修订版本的长度 +const revBytesLen = 8 + 1 + 8 + +func (a revision) GreaterThan(b revision) bool { + if a.Main > b.Main { + return true + } + if a.Main < b.Main { + return false + } + return a.Sub > b.Sub +} + +func newRevBytes() []byte { + return make([]byte, revBytesLen, markedRevBytesLen) +} + +func revToBytes(rev revision, bytes []byte) { + binary.BigEndian.PutUint64(bytes, uint64(rev.Main)) + bytes[8] = '_' + binary.BigEndian.PutUint64(bytes[9:], uint64(rev.Sub)) +} + +func bytesToRev(bytes []byte) revision { + return revision{ + Main: int64(binary.BigEndian.Uint64(bytes[0:8])), + Sub: int64(binary.BigEndian.Uint64(bytes[9:])), + } +} + +type revisions []revision + +func (a revisions) Len() int { return len(a) } +func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } +func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/etcd/mvcc/over_watchable_store.go b/etcd/mvcc/over_watchable_store.go new file mode 100644 index 00000000000..8e38bffc4a1 --- /dev/null +++ b/etcd/mvcc/over_watchable_store.go @@ -0,0 +1,503 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "sync" + "time" + + "github.com/ls-2018/etcd_cn/etcd/lease" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" + + "go.uber.org/zap" +) + +var ( + // chanBufLen is the length of the buffered chan + // for sending out watched events. + // See https://github.com/etcd-io/etcd/issues/11906 for more detail. + chanBufLen = 128 + + // maxWatchersPerSync is the number of watchers to sync in a single batch + maxWatchersPerSync = 512 +) + +type watchable interface { + watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) + progress(w *watcher) + rev() int64 +} + +type watchableStore struct { + *store + mu sync.RWMutex + victims []watcherBatch // 因为channel阻塞而暂存的 + victimc chan struct{} // 如果watcher实例关联的ch通道被阻塞了,则对应的watcherBatch实例会暂时记录到该字段中 + unsynced watcherGroup // 用于存储未同步完成的实例 + synced watcherGroup // 用于存储同步完成的实例 + stopc chan struct{} + wg sync.WaitGroup +} + +// cancelFunc updates unsynced and synced maps when running +// cancel operations. +type cancelFunc func() + +func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV { + return newWatchableStore(lg, b, le, cfg) +} + +func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore { + if lg == nil { + lg = zap.NewNop() + } + s := &watchableStore{ + store: NewStore(lg, b, le, cfg), + victimc: make(chan struct{}, 1), // 如果watcher实例关联的ch通道被阻塞了,则对应的watcherBatch实例会暂时记录到该字段中 + unsynced: newWatcherGroup(), // 用于存储未同步完成的实例 + synced: newWatcherGroup(), // 用于存储已经同步完成的实例 + stopc: make(chan struct{}), + } + s.store.ReadView = &readView{s} // 调用storage中全局view查询 + s.store.WriteView = &writeView{s} // 调用storage中全局view查询 + if s.le != nil { + // 使用此存储作为删除器,因此撤销触发器监听事件 + s.le.SetRangeDeleter(func() lease.TxnDelete { + return s.Write(traceutil.TODO()) + }) + } + s.wg.Add(2) + go s.syncWatchersLoop() + go s.syncVictimsLoop() // 用于循环清除watchableStore中的victims + return s +} + +func (s *watchableStore) Close() error { + close(s.stopc) + s.wg.Wait() + return s.store.Close() +} + +func (s *watchableStore) NewWatchStream() WatchStream { + return &watchStream{ + watchable: s, + ch: make(chan WatchResponse, chanBufLen), + cancels: make(map[WatchID]cancelFunc), + watchers: make(map[WatchID]*watcher), + } +} + +// watcher 初始化 +func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { + wa := &watcher{ + key: string(key), + end: string(end), + minRev: startRev, + id: id, + ch: ch, // 将变更事件塞进去,可能会与其他watcher 共享 + filterFuncs: fcs, + } + + s.mu.Lock() + s.revMu.RLock() + // 为0 或者是超过最新的修订版本 , 都设置为下一个修订版本 + synced := startRev > s.store.currentRev || startRev == 0 + if synced { + wa.minRev = s.store.currentRev + 1 + if startRev > wa.minRev { + wa.minRev = startRev + } + s.synced.add(wa) // 把当前watcher 定义为已经同步完的 + } else { + s.unsynced.add(wa) // 把当前watcher 定义为 没有同步完的 + } + s.revMu.RUnlock() + s.mu.Unlock() + + return wa, func() { s.cancelWatcher(wa) } +} + +// 移除watcher +func (s *watchableStore) cancelWatcher(wa *watcher) { + for { + s.mu.Lock() + if s.unsynced.delete(wa) { + break + } else if s.synced.delete(wa) { + break + } else if wa.compacted { // 因日志压缩,该watcher已被移除 + break + } else if wa.ch == nil { // 判断是否还能发送数据 + // already canceled (e.g., cancel/close race) + break + } + + if !wa.victim { + s.mu.Unlock() + panic("观察者不是受害者,但不在观察组中") + } + + var victimBatch watcherBatch + for _, wb := range s.victims { + if wb[wa] != nil { + victimBatch = wb + break + } + } + if victimBatch != nil { + delete(victimBatch, wa) + break + } + + // victim being processed so not accessible; retry + s.mu.Unlock() + time.Sleep(time.Millisecond) + } + + wa.ch = nil + s.mu.Unlock() +} + +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + wa.restore = true + s.unsynced.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + +// syncWatchersLoop 每隔100ms ,将所有未通知的事件通知给所有的监听者. +func (s *watchableStore) syncWatchersLoop() { + defer s.wg.Done() + + for { + s.mu.RLock() + st := time.Now() + lastUnsyncedWatchers := s.unsynced.size() // 获取当前的unsynced watcherGroup的大小 + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + // 存在需要进行同步的watcher实例,调用syncWatchers()方法对unsynced watcherGroup中的watcher进行批量同步 + // 会尝试发送 + unsyncedWatchers = s.syncWatchers() + } + syncDuration := time.Since(st) + + waitDuration := 100 * time.Millisecond + // 阻塞中的worker + if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { + waitDuration = syncDuration + } + + select { + case <-time.After(waitDuration): + case <-s.stopc: + return + } + } +} + +// syncVictimsLoop 尝试将因通道阻塞的数据 重新发送,如失败,可将watcher重新放入unsyncedGroup, +// 如果发送成功,则根据相应的watcher的同步情况,将watcher实例迁移到(un)synced watcherGroup中. +func (s *watchableStore) syncVictimsLoop() { + defer s.wg.Done() + + for { + for s.moveVictims() != 0 { + // 尝试更新有问题的watcher 受损的 + } + s.mu.RLock() + isEmpty := len(s.victims) == 0 + s.mu.RUnlock() + + var tickc <-chan time.Time + if !isEmpty { + tickc = time.After(10 * time.Millisecond) + } + + select { + case <-tickc: + case <-s.victimc: // 读数据,表示不再受损 + case <-s.stopc: + return + } + } +} + +// 尝试更新阻塞中的事件数据 +func (s *watchableStore) moveVictims() (moved int) { + s.mu.Lock() + victims := s.victims + s.victims = nil + s.mu.Unlock() + + var newVictim watcherBatch + for _, wb := range victims { + // try to send responses again + // 尝试发送受损的响应【因通道阻塞导致的】 + for w, eb := range wb { + rev := w.minRev - 1 + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { + } else { + if newVictim == nil { + newVictim = make(watcherBatch) + } + newVictim[w] = eb + continue + } + moved++ + } + + s.mu.Lock() + s.store.revMu.RLock() + curRev := s.store.currentRev + for w, eb := range wb { + if newVictim != nil && newVictim[w] != nil { + // 不能发送响应数据的,继续保持受损状态 + continue + } + w.victim = false + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + // 当该watcher不再受损 ,通过watcher的修订版本与全局的修订版本,判断该watcher是存入synced还是unsynced + if w.minRev <= curRev { + s.unsynced.add(w) + } else { + s.synced.add(w) + } + } + s.store.revMu.RUnlock() + s.mu.Unlock() + } + + if len(newVictim) > 0 { + s.mu.Lock() + s.victims = append(s.victims, newVictim) + s.mu.Unlock() + } + + return moved +} + +// syncWatchers 向所有未同步完成的watcher 发消息 +// 1. choose a set of watchers from the unsynced watcher group +// 2. iterate over the set to get the minimum revision and remove compacted watchers +// 3. use minimum revision to get all Key-value pairs and send those events to watchers +// 4. remove synced watchers in set from unsynced group and move to synced group +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + + if s.unsynced.size() == 0 { + return 0 + } + + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() + + curRev := s.store.currentRev + compactionRev := s.store.compactMainRev + // 根据unsynced watcherGroup中记录的watcher个数对其进行分批返回,同时获取该批watcher实例中查找最小的minRev字段,maxWatchersPerSync默认为512 + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) + minBytes, maxBytes := newRevBytes(), newRevBytes() + revToBytes(revision{Main: minRev}, minBytes) + revToBytes(revision{Main: curRev + 1}, maxBytes) + + tx := s.store.b.ReadTx() + tx.RLock() + revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0) // 对key Bucket进行范围查找 + evs := kvsToEvents(s.store.lg, wg, revs, vs) // 负责将BoltDB中查询的键值对信息转换成相应的event实例 + tx.RUnlock() + + var victims watcherBatch + wb := newWatcherBatch(wg, evs) + for w := range wg.watchers { // 事件发送值每一个watcher对应的Channel中 + w.minRev = curRev + 1 + + eb, ok := wb[w] + if !ok { + // bring un-notified watcher to synced + s.synced.add(w) + s.unsynced.delete(w) + continue + } + + if eb.moreRev != 0 { + w.minRev = eb.moreRev + } + + if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { + // 往通道里发送的消息 + } else { + // case 1 确实是发送失败 + // case 2 通道阻塞了,暂时标记位victim + if victims == nil { + victims = make(watcherBatch) + } + w.victim = true + } + + if w.victim { + victims[w] = eb + } else { + if eb.moreRev != 0 { + continue + } + s.synced.add(w) + } + s.unsynced.delete(w) + } + s.addVictim(victims) + + return s.unsynced.size() +} + +// 将BoltDB中查询的键值对信息转换成相应的Event实例,通过判断BoltDB查询的键值对是否存在于watcherGroup的key中,记录mvccpb.PUT or mvccpb.DELETE +func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { + for i, v := range vals { + var kv mvccpb.KeyValue + if err := kv.Unmarshal(v); err != nil { + lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } + + if !wg.contains(string(kv.Key)) { + continue + } + + ty := mvccpb.PUT + if isTombstone(revs[i]) { + ty = mvccpb.DELETE + // patch in mod revision so watchers won't skip + kv.ModRevision = bytesToRev(revs[i]).Main + } + evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) + } + return evs +} + +// notify 当前的修订版本,当前的变更事件 用于通知对应的watcher +func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { + var victim watcherBatch + // type watcherBatch map[*watcher]*eventBatch + // 找到所有的watch,synced使用了map和红黑树来快速找到监听的key + for watcher, eb := range newWatcherBatch(&s.synced, evs) { + if eb.revs != 1 { + s.store.lg.Panic("在watch通知中出现多次修订", zap.Int("number-of-revisions", eb.revs)) + } + if watcher.send(WatchResponse{WatchID: watcher.id, Events: eb.evs, Revision: rev}) { + } else { + // 移动缓慢的观察者到victim + watcher.minRev = rev + 1 + if victim == nil { + victim = make(watcherBatch) + } + watcher.victim = true + victim[watcher] = eb + s.synced.delete(watcher) + } + } + s.addVictim(victim) // 将因为chan满没发出的消息缓存,然后使用unsynced再将消息发送出去 +} + +// 添加不健康的事件 watcher:待发送的消息 +func (s *watchableStore) addVictim(victim watcherBatch) { + if victim == nil { + return + } + s.victims = append(s.victims, victim) + select { + case s.victimc <- struct{}{}: + default: + } +} + +func (s *watchableStore) rev() int64 { + return s.store.Rev() +} + +func (s *watchableStore) progress(w *watcher) { + s.mu.RLock() + defer s.mu.RUnlock() + + if _, ok := s.synced.watchers[w]; ok { + w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) + // If the ch is full, this watcher is receiving events. + // We do not need to send progress at all. + } +} + +type watcher struct { + key string + end string + victim bool // 当ch被阻塞时,并正在进行受害者处理时被设置. + compacted bool // 当 watcher 因为压缩而被移除时,compacted被设置 + // restore is true when the watcher is being restored from leader snapshot + // which means that this watcher has just been moved from "synced" to "unsynced" + // watcher group, possibly with a future revision when it was first added + // to the synced watcher + // "unsynced" watcher revision must always be <= current revision, + // except when the watcher were to be moved from "synced" watcher group + restore bool + minRev int64 // 开始监听的修订版本 + id WatchID // watcher id + filterFuncs []FilterFunc // 事件过滤 + ch chan<- WatchResponse // 将变更事件塞进去,可能会与其他watcher 共享 +} + +// 向客户端发送事件 +func (w *watcher) send(wr WatchResponse) bool { + progressEvent := len(wr.Events) == 0 + + if len(w.filterFuncs) != 0 { + ne := make([]mvccpb.Event, 0, len(wr.Events)) + for i := range wr.Events { + filtered := false + for _, filter := range w.filterFuncs { + if filter(wr.Events[i]) { + filtered = true + break + } + } + if !filtered { + ne = append(ne, wr.Events[i]) + } + } + wr.Events = ne + } + + // 所有的事件都被过滤掉了 + if !progressEvent && len(wr.Events) == 0 { + return true + } + select { + case w.ch <- wr: // 正常的事件 + return true + default: + return false + } +} diff --git a/etcd/mvcc/over_watchable_store_txn.go b/etcd/mvcc/over_watchable_store_txn.go new file mode 100644 index 00000000000..8ff28785289 --- /dev/null +++ b/etcd/mvcc/over_watchable_store_txn.go @@ -0,0 +1,55 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/traceutil" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // 当异步事件post检查当前存储版本时,在可观察存储锁下写入TXN,因此更新是可见的 + tw.s.mu.Lock() + tw.s.notify(rev, evs) // 事务结束时, 通知watcher + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite { + return &watchableStoreTxnWrite{s.store.Write(trace), s} +} diff --git a/etcd/mvcc/over_watcher.go b/etcd/mvcc/over_watcher.go new file mode 100644 index 00000000000..a4630a734af --- /dev/null +++ b/etcd/mvcc/over_watcher.go @@ -0,0 +1,179 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "bytes" + "errors" + "sync" + + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" +) + +// AutoWatchID is the watcher ID passed in WatchStream.Watch when no +// user-provided ID is available. If pass, an ID will automatically be assigned. +const AutoWatchID WatchID = 0 + +var ( + ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") + ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty") + ErrWatcherDuplicateID = errors.New("mvcc: 在WatchStream上提供的重复的watch ID") +) + +type WatchID int64 + +// FilterFunc returns true if the given event should be filtered out. +type FilterFunc func(e mvccpb.Event) bool + +type WatchStream interface { + // Watch 创建watch id 默认为0 , 范围监听 起始的修订版本 事件过滤 + Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) + + Chan() <-chan WatchResponse // 所有watch的响应会会被塞入返回的channel + + // RequestProgress requests the progress of the watcher with given ID. The response + // will only be sent if the watcher is currently synced. + // The responses will be sent through the WatchRespone Chan attached + // with this stream to ensure correct ordering. + // The responses contains no events. The revision in the response is the progress + // of the watchers since the watcher is currently synced. + RequestProgress(id WatchID) + + // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be + // returned. + Cancel(id WatchID) error + + // Close closes Chan and release all related resources. + Close() + Rev() int64 // 返回当前watch指定的修订版本 +} + +type WatchResponse struct { + // WatchID is the WatchID of the watcher this response sent to. + WatchID WatchID + + // Events contains all the events that needs to send. + Events []mvccpb.Event + + // Revision is the revision of the KV when the watchResponse is Created. + // For a normal response, the revision should be the same as the last + // Modified revision inside Events. For a delayed response to a unsynced + // watcher, the revision is greater than the last Modified revision + // inside Events. + Revision int64 + + // CompactRevision is set when the watcher is cancelled due to compaction. + CompactRevision int64 +} + +// watchStream contains a collection of watchers that share +// one streaming chan to send out watched events and other control events. +// watchers的一写信息 +type watchStream struct { + watchable watchable + ch chan WatchResponse // 用于传递watch 响应的通道 + mu sync.Mutex // guards fields below it + nextID WatchID // 预先分配给这个流中的下一个新的观察者 ,第一次是0 + closed bool + cancels map[WatchID]cancelFunc // 用于取消特定的watcher + watchers map[WatchID]*watcher // 记录watcher事件及其Id +} + +// Watch 在当前stream创建watcher并返回 WatchID. +func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) { + // 防止键>按字典顺序结束的错误范围 + // 监视请求'WithFromKey'有空字节范围结束 + if len(end) != 0 && bytes.Compare(key, end) != -1 { + return -1, ErrEmptyWatcherRange + } + + ws.mu.Lock() + defer ws.mu.Unlock() + if ws.closed { + return -1, ErrEmptyWatcherRange + } + + if id == AutoWatchID { + // 因为是从0开始,每次执行都是为了获取一个自增的WatchId 从1开始 + for ws.watchers[ws.nextID] != nil { + ws.nextID++ + } + id = ws.nextID + ws.nextID++ + } else if _, ok := ws.watchers[id]; ok { + return -1, ErrWatcherDuplicateID + } + + w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) + ws.cancels[id] = c // 回调函数用于删除watcher + ws.watchers[id] = w // 记录watcher事件及其Id + return id, nil +} + +func (ws *watchStream) Chan() <-chan WatchResponse { + return ws.ch +} + +func (ws *watchStream) Cancel(id WatchID) error { + ws.mu.Lock() + cancel, ok := ws.cancels[id] + w := ws.watchers[id] + ok = ok && !ws.closed + ws.mu.Unlock() + + if !ok { + return ErrWatcherNotExist + } + cancel() + + ws.mu.Lock() + // The watch isn't removed until cancel so that if Close() is called, + // it will wait for the cancel. Otherwise, Close() could close the + // watch channel while the store is still posting events. + if ww := ws.watchers[id]; ww == w { + delete(ws.cancels, id) + delete(ws.watchers, id) + } + ws.mu.Unlock() + + return nil +} + +func (ws *watchStream) Close() { + ws.mu.Lock() + defer ws.mu.Unlock() + + for _, cancel := range ws.cancels { + cancel() + } + ws.closed = true + close(ws.ch) +} + +func (ws *watchStream) Rev() int64 { + ws.mu.Lock() + defer ws.mu.Unlock() + return ws.watchable.rev() +} + +func (ws *watchStream) RequestProgress(id WatchID) { + ws.mu.Lock() + w, ok := ws.watchers[id] + ws.mu.Unlock() + if !ok { + return + } + ws.watchable.progress(w) +} diff --git a/etcd/mvcc/over_watcher_group.go b/etcd/mvcc/over_watcher_group.go new file mode 100644 index 00000000000..5f3c352e45a --- /dev/null +++ b/etcd/mvcc/over_watcher_group.go @@ -0,0 +1,293 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "fmt" + "math" + + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/adt" +) + +// watchBatchMaxRevs is the maximum distinct revisions that +// may be sent to an unsynced watcher at a time. Declared as +// var instead of const for testing purposes. +var watchBatchMaxRevs = 1000 + +type eventBatch struct { + // evs is a batch of revision-ordered events + evs []mvccpb.Event + // revs is the minimum unique revisions observed for this batch + revs int + // moreRev is first revision with more events following this batch + moreRev int64 +} + +// OK +func (eb *eventBatch) add(ev mvccpb.Event) { + if eb.revs > watchBatchMaxRevs { + // maxed out batch size + return + } + + if len(eb.evs) == 0 { + // base case + eb.revs = 1 + eb.evs = append(eb.evs, ev) + return + } + + // revision accounting + ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision + evRev := ev.Kv.ModRevision + if evRev > ebRev { + eb.revs++ + if eb.revs > watchBatchMaxRevs { + eb.moreRev = evRev + return + } + } + + eb.evs = append(eb.evs, ev) +} + +type watcherBatch map[*watcher]*eventBatch // 记录了每个watcher 待返回的事件[批] + +// 给watcher发送一批事件,存储响应 +func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { + eb := wb[w] + if eb == nil { + eb = &eventBatch{} + wb[w] = eb + } + eb.add(ev) +} + +// newWatcherBatch 当收到一批事件后,去watchGroup组找匹配的watcher ,然后发送出去 +func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { + if len(wg.watchers) == 0 { // 没有watcher + return nil + } + wb := make(watcherBatch) // 给watcher发送一批事件 + for _, ev := range evs { + for w := range wg.watcherSetByKey(ev.Kv.Key) { + if ev.Kv.ModRevision >= w.minRev { + // 不要重复通知 + wb.add(w, ev) + } + } + } + return wb +} + +type watcherSet map[*watcher]struct{} + +func (w watcherSet) add(wa *watcher) { + if _, ok := w[wa]; ok { + panic("添加同一个watcher两次!") + } + w[wa] = struct{}{} +} + +// 合并watcher +func (w watcherSet) union(ws watcherSet) { + for wa := range ws { + w.add(wa) + } +} + +func (w watcherSet) delete(wa *watcher) { + if _, ok := w[wa]; !ok { + panic("要移除的watcher 已丢失!") + } + delete(w, wa) +} + +type watcherSetByKey map[string]watcherSet // 监听的key + +func (w watcherSetByKey) add(wa *watcher) { + set := w[wa.key] + if set == nil { + set = make(watcherSet) + w[wa.key] = set + } + set.add(wa) +} + +func (w watcherSetByKey) delete(wa *watcher) bool { + k := wa.key + if v, ok := w[k]; ok { + if _, ok := v[wa]; ok { + delete(v, wa) + if len(v) == 0 { + // remove the set; nothing left + delete(w, k) + } + return true + } + } + return false +} + +// watcher的集合 +type watcherGroup struct { + keyWatchers watcherSetByKey // 监听单个key的watcher + ranges adt.IntervalTree // 红黑树 按照间隔排序 + watchers watcherSet +} + +// 用于存储同步完成、未同步完成的实例 +func newWatcherGroup() watcherGroup { + return watcherGroup{ + keyWatchers: make(watcherSetByKey), + ranges: adt.NewIntervalTree(), + watchers: make(watcherSet), // 元素集 + } +} + +// 添加一个watcher +func (wg *watcherGroup) add(wa *watcher) { + wg.watchers.add(wa) + if wa.end == "" || len(wa.end) == 0 { + wg.keyWatchers.add(wa) + return + } + // 范围监听 + // 已经注册了interval ? + // 红黑树里存储了范围key + ivl := adt.NewStringAffineInterval(wa.key, wa.end) + if iv := wg.ranges.Find(ivl); iv != nil { + iv.Val.(watcherSet).add(wa) + return + } + + ws := make(watcherSet) + ws.add(wa) + wg.ranges.Insert(ivl, ws) +} + +// 监听的key在watcherGroup中是否有一个watcher +func (wg *watcherGroup) contains(key string) bool { + _, ok := wg.keyWatchers[key] + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) // 是否有元素与key重叠 +} + +// size 返回当前group里有多少元素 +func (wg *watcherGroup) size() int { return len(wg.watchers) } + +// 删除watcher +func (wg *watcherGroup) delete(wa *watcher) bool { + if _, ok := wg.watchers[wa]; !ok { + return false + } + wg.watchers.delete(wa) + if wa.end == "" || len(wa.end) == 0 { + wg.keyWatchers.delete(wa) + return true + } + + ivl := adt.NewStringAffineInterval(wa.key, wa.end) + iv := wg.ranges.Find(ivl) + if iv == nil { + return false + } + + ws := iv.Val.(watcherSet) + delete(ws, wa) + if len(ws) == 0 { + // remove interval missing watchers + if ok := wg.ranges.Delete(ivl); !ok { + panic("could not remove watcher from interval tree") + } + } + + return true +} + +// choose selects watchers from the watcher group to update +func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { + if len(wg.watchers) < maxWatchers { + return wg, wg.chooseAll(curRev, compactRev) + } + ret := newWatcherGroup() + for w := range wg.watchers { + if maxWatchers <= 0 { + break + } + maxWatchers-- + ret.add(w) + } + return &ret, ret.chooseAll(curRev, compactRev) +} + +func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { + minRev := int64(math.MaxInt64) + for w := range wg.watchers { + if w.minRev > curRev { + // after network partition, possibly choosing future revision watcher from restore operation + // with watch Key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2" + // do not panic when such watcher had been moved from "synced" watcher during restore operation + if !w.restore { + panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev)) + } + + // mark 'restore' done, since it's chosen + w.restore = false + } + if w.minRev < compactRev { + select { + case w.ch <- WatchResponse{ + WatchID: w.id, + CompactRevision: compactRev, + }: + w.compacted = true + wg.delete(w) + default: + // retry next time + } + continue + } + if minRev > w.minRev { + minRev = w.minRev + } + } + return minRev +} + +// watcherSetByKey gets the set of watchers that receive events on the given Key. +func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { + wkeys := wg.keyWatchers[key] + wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) + + // zero-copy cases + switch { + case len(wranges) == 0: + // no need to merge ranges or copy; reuse single-Key set + return wkeys + case len(wranges) == 0 && len(wkeys) == 0: + return nil + case len(wranges) == 1 && len(wkeys) == 0: + return wranges[0].Val.(watcherSet) + } + + // copy case + ret := make(watcherSet) + ret.union(wg.keyWatchers[key]) + for _, item := range wranges { + ret.union(item.Val.(watcherSet)) + } + return ret +} diff --git a/server/proxy/grpcproxy/adapter/auth_client_adapter.go b/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go similarity index 98% rename from server/proxy/grpcproxy/adapter/auth_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/auth_client_adapter.go index 140212b9620..912c04a54d4 100644 --- a/server/proxy/grpcproxy/adapter/auth_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" grpc "google.golang.org/grpc" ) diff --git a/server/proxy/grpcproxy/adapter/chan_stream.go b/etcd/proxy/grpcproxy/adapter/chan_stream.go similarity index 98% rename from server/proxy/grpcproxy/adapter/chan_stream.go rename to etcd/proxy/grpcproxy/adapter/chan_stream.go index 1af514b1fdd..f202879c1cf 100644 --- a/server/proxy/grpcproxy/adapter/chan_stream.go +++ b/etcd/proxy/grpcproxy/adapter/chan_stream.go @@ -140,7 +140,7 @@ func (s *chanStream) RecvMsg(m interface{}) error { } func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { - // ch1 is buffered so server can send error on close + // ch1 is buffered so etcd can send error on close ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) diff --git a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go b/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go similarity index 96% rename from server/proxy/grpcproxy/adapter/cluster_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go index c1fff054de4..54a5040085a 100644 --- a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" ) diff --git a/etcd/proxy/grpcproxy/adapter/doc.go b/etcd/proxy/grpcproxy/adapter/doc.go new file mode 100644 index 00000000000..e6fd2c9ca8c --- /dev/null +++ b/etcd/proxy/grpcproxy/adapter/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adapter provides gRPC adapters between client and etcd +// gRPC interfaces without needing to go through a gRPC connection. +package adapter diff --git a/server/proxy/grpcproxy/adapter/election_client_adapter.go b/etcd/proxy/grpcproxy/adapter/election_client_adapter.go similarity index 97% rename from server/proxy/grpcproxy/adapter/election_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/election_client_adapter.go index 81d7434474a..f8a17132512 100644 --- a/server/proxy/grpcproxy/adapter/election_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" "google.golang.org/grpc" ) @@ -60,6 +60,7 @@ type es2ecServerStream struct{ chanServerStream } func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { return s.SendMsg(rr) } + func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { @@ -71,6 +72,7 @@ func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { return s.SendMsg(rr) } + func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { diff --git a/server/proxy/grpcproxy/adapter/kv_client_adapter.go b/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go similarity index 96% rename from server/proxy/grpcproxy/adapter/kv_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/kv_client_adapter.go index ddb6ada4732..f5ff992b7d9 100644 --- a/server/proxy/grpcproxy/adapter/kv_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" grpc "google.golang.org/grpc" ) diff --git a/server/proxy/grpcproxy/adapter/lease_client_adapter.go b/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go similarity index 97% rename from server/proxy/grpcproxy/adapter/lease_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/lease_client_adapter.go index 6640d1d39e3..7292966b4ad 100644 --- a/server/proxy/grpcproxy/adapter/lease_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" ) @@ -62,6 +62,7 @@ type ls2lcServerStream struct{ chanServerStream } func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { return s.SendMsg(rr) } + func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { @@ -73,6 +74,7 @@ func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { return s.SendMsg(rr) } + func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { diff --git a/server/proxy/grpcproxy/adapter/lock_client_adapter.go b/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go similarity index 94% rename from server/proxy/grpcproxy/adapter/lock_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/lock_client_adapter.go index a3ceaf26dae..8e1187adba0 100644 --- a/server/proxy/grpcproxy/adapter/lock_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" "google.golang.org/grpc" ) diff --git a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go similarity index 98% rename from server/proxy/grpcproxy/adapter/maintenance_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go index 6369a16d8b4..02c57b76a40 100644 --- a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" ) @@ -72,6 +72,7 @@ type ss2scServerStream struct{ chanServerStream } func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { return s.SendMsg(rr) } + func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { @@ -83,6 +84,7 @@ func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { return s.SendMsg(rr) } + func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { diff --git a/server/proxy/grpcproxy/adapter/watch_client_adapter.go b/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go similarity index 97% rename from server/proxy/grpcproxy/adapter/watch_client_adapter.go rename to etcd/proxy/grpcproxy/adapter/watch_client_adapter.go index 2a93e29e01c..5719942d1e5 100644 --- a/server/proxy/grpcproxy/adapter/watch_client_adapter.go +++ b/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -18,9 +18,8 @@ import ( "context" "errors" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "google.golang.org/grpc" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" ) var errAlreadySentHeader = errors.New("adapter: already sent header") @@ -47,6 +46,7 @@ type ws2wcServerStream struct{ chanServerStream } func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { return s.SendMsg(wr) } + func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { @@ -58,6 +58,7 @@ func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { return s.SendMsg(wr) } + func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { var v interface{} if err := s.RecvMsg(&v); err != nil { diff --git a/etcd/proxy/grpcproxy/auth.go b/etcd/proxy/grpcproxy/auth.go new file mode 100644 index 00000000000..7861b3d2052 --- /dev/null +++ b/etcd/proxy/grpcproxy/auth.go @@ -0,0 +1,116 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type AuthProxy struct { + client *clientv3.Client +} + +func NewAuthProxy(c *clientv3.Client) pb.AuthServer { + return &AuthProxy{client: c} +} + +func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).AuthEnable(ctx, r) +} + +func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).AuthDisable(ctx, r) +} + +func (ap *AuthProxy) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).AuthStatus(ctx, r) +} + +func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).Authenticate(ctx, r) +} + +func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleAdd(ctx, r) +} + +func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleDelete(ctx, r) +} + +func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleGet(ctx, r) +} + +func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleList(ctx, r) +} + +func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleRevokePermission(ctx, r) +} + +func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).RoleGrantPermission(ctx, r) +} + +func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserAdd(ctx, r) +} + +func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserDelete(ctx, r) +} + +func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserGet(ctx, r) +} + +func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserList(ctx, r) +} + +func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserGrantRole(ctx, r) +} + +func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserRevokeRole(ctx, r) +} + +func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { + conn := ap.client.ActiveConnection() + return pb.NewAuthClient(conn).UserChangePassword(ctx, r) +} diff --git a/etcd/proxy/grpcproxy/cache/store.go b/etcd/proxy/grpcproxy/cache/store.go new file mode 100644 index 00000000000..606ad4e3a81 --- /dev/null +++ b/etcd/proxy/grpcproxy/cache/store.go @@ -0,0 +1,171 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cache exports functionality for efficiently caching and mapping +// `RangeRequest`s to corresponding `RangeResponse`s. +package cache + +import ( + "errors" + "sync" + + "github.com/golang/groupcache/lru" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/adt" +) + +var ( + DefaultMaxEntries = 2048 + ErrCompacted = rpctypes.ErrGRPCCompacted +) + +type Cache interface { + Add(req *pb.RangeRequest, resp *pb.RangeResponse) + Get(req *pb.RangeRequest) (*pb.RangeResponse, error) + Compact(revision int64) + Invalidate(key []byte, endkey []byte) + Size() int + Close() +} + +// keyFunc returns the key of a request, which is used to look up its caching response in the cache. +func keyFunc(req *pb.RangeRequest) string { + b, err := req.Marshal() + if err != nil { + panic(err) + } + return string(b) +} + +func NewCache(maxCacheEntries int) Cache { + return &cache{ + lru: lru.New(maxCacheEntries), + cachedRanges: adt.NewIntervalTree(), + compactedRev: -1, + } +} + +func (c *cache) Close() {} + +// cache implements Cache +type cache struct { + mu sync.RWMutex + lru *lru.Cache + + // a reverse index for cache invalidation + cachedRanges adt.IntervalTree + + compactedRev int64 +} + +// Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache. +func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { + key := keyFunc(req) + + c.mu.Lock() + defer c.mu.Unlock() + + if req.Revision > c.compactedRev { + c.lru.Add(key, resp) + } + // we do not need to invalidate a request with a revision specified. + // so we do not need to add it into the reverse index. + if req.Revision != 0 { + return + } + + var ( + iv *adt.IntervalValue + ivl adt.Interval + ) + if len(req.RangeEnd) != 0 { + ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd)) + } else { + ivl = adt.NewStringAffinePoint(string(req.Key)) + } + + iv = c.cachedRanges.Find(ivl) + + if iv == nil { + val := map[string]struct{}{key: {}} + c.cachedRanges.Insert(ivl, val) + } else { + val := iv.Val.(map[string]struct{}) + val[key] = struct{}{} + iv.Val = val + } +} + +// Get looks up the caching response for a given request. +// Get is also responsible for lazy eviction when accessing compacted entries. +func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { + key := keyFunc(req) + + c.mu.Lock() + defer c.mu.Unlock() + + if req.Revision > 0 && req.Revision < c.compactedRev { + c.lru.Remove(key) + return nil, ErrCompacted + } + + if resp, ok := c.lru.Get(key); ok { + return resp.(*pb.RangeResponse), nil + } + return nil, errors.New("not exist") +} + +// Invalidate invalidates the cache entries that intersecting with the given range from key to endkey. +func (c *cache) Invalidate(key, endkey []byte) { + c.mu.Lock() + defer c.mu.Unlock() + + var ( + ivs []*adt.IntervalValue + ivl adt.Interval + ) + if len(endkey) == 0 { + ivl = adt.NewStringAffinePoint(string(key)) + } else { + ivl = adt.NewStringAffineInterval(string(key), string(endkey)) + } + + ivs = c.cachedRanges.Stab(ivl) + for _, iv := range ivs { + keys := iv.Val.(map[string]struct{}) + for key := range keys { + c.lru.Remove(key) + } + } + // delete after removing all keys since it is destructive to 'ivs' + c.cachedRanges.Delete(ivl) +} + +// Compact invalidate all caching response before the given rev. +// Replace with the invalidation is lazy. The actual removal happens when the entries is accessed. +func (c *cache) Compact(revision int64) { + c.mu.Lock() + defer c.mu.Unlock() + + if revision > c.compactedRev { + c.compactedRev = revision + } +} + +func (c *cache) Size() int { + c.mu.RLock() + defer c.mu.RUnlock() + return c.lru.Len() +} diff --git a/etcd/proxy/grpcproxy/cluster.go b/etcd/proxy/grpcproxy/cluster.go new file mode 100644 index 00000000000..0baca27a4f0 --- /dev/null +++ b/etcd/proxy/grpcproxy/cluster.go @@ -0,0 +1,214 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + "errors" + "fmt" + "os" + "sync" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "golang.org/x/time/rate" + + "go.uber.org/zap" +) + +// allow maximum 1 retry per second +const resolveRetryRate = 1 + +type clusterProxy struct { + lg *zap.Logger + clus clientv3.Cluster + ctx context.Context + + // advertise client URL + advaddr string + prefix string + + em endpoints.Manager + + umu sync.RWMutex + umap map[string]endpoints.Endpoint +} + +// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints. +// The returned channel is closed when there is grpc-proxy endpoint registered +// and the client's context is canceled so the 'register' loop returns. +// TODO: Expand the API to report creation errors +func NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) { + if lg == nil { + lg = zap.NewNop() + } + + var em endpoints.Manager + if advaddr != "" && prefix != "" { + var err error + if em, err = endpoints.NewManager(c, prefix); err != nil { + lg.Error("failed to provision endpointsManager", zap.String("prefix", prefix), zap.Error(err)) + return nil, nil + } + } + + cp := &clusterProxy{ + lg: lg, + clus: c.Cluster, + ctx: c.Ctx(), + + advaddr: advaddr, + prefix: prefix, + umap: make(map[string]endpoints.Endpoint), + em: em, + } + + donec := make(chan struct{}) + if em != nil { + go func() { + defer close(donec) + cp.establishEndpointWatch(prefix) + }() + return cp, donec + } + + close(donec) + return cp, donec +} + +func (cp *clusterProxy) establishEndpointWatch(prefix string) { + rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate) + for rm.Wait(cp.ctx) == nil { + wc, err := cp.em.NewWatchChannel(cp.ctx) + if err != nil { + cp.lg.Warn("failed to establish endpoint watch", zap.String("prefix", prefix), zap.Error(err)) + continue + } + cp.monitor(wc) + } +} + +func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) { + for { + select { + case <-cp.ctx.Done(): + cp.lg.Info("watching endpoints interrupted", zap.Error(cp.ctx.Err())) + return + case updates := <-wa: + cp.umu.Lock() + for _, up := range updates { + switch up.Op { + case endpoints.Add: + cp.umap[up.Endpoint.Addr] = up.Endpoint + case endpoints.Delete: + delete(cp.umap, up.Endpoint.Addr) + } + } + cp.umu.Unlock() + } + } +} + +func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { + if r.IsLearner { + return cp.memberAddAsLearner(ctx, r.PeerURLs) + } + return cp.memberAdd(ctx, r.PeerURLs) +} + +func (cp *clusterProxy) memberAdd(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) { + mresp, err := cp.clus.MemberAdd(ctx, peerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberAddResponse)(*mresp) + return &resp, err +} + +func (cp *clusterProxy) memberAddAsLearner(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) { + mresp, err := cp.clus.MemberAddAsLearner(ctx, peerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberAddResponse)(*mresp) + return &resp, err +} + +func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { + mresp, err := cp.clus.MemberRemove(ctx, r.ID) + if err != nil { + return nil, err + } + resp := (pb.MemberRemoveResponse)(*mresp) + return &resp, err +} + +func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { + mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs) + if err != nil { + return nil, err + } + resp := (pb.MemberUpdateResponse)(*mresp) + return &resp, err +} + +func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { + cp.umu.RLock() + defer cp.umu.RUnlock() + mbs := make([]*pb.Member, 0, len(cp.umap)) + for addr, upt := range cp.umap { + m, err := decodeMeta(fmt.Sprint(upt.Metadata)) + if err != nil { + return nil, err + } + mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}}) + } + return mbs, nil +} + +// MemberList wraps member list API with following rules: +// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver +// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr' +// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register' +// - If 'advaddr' is empty, forward to member list API +func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { + if cp.advaddr != "" { + if cp.prefix != "" { + mbs, err := cp.membersFromUpdates() + if err != nil { + return nil, err + } + if len(mbs) > 0 { + return &pb.MemberListResponse{Members: mbs}, nil + } + } + // prefix is empty or no grpc-proxy members haven't been registered + hostname, _ := os.Hostname() + return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil + } + mresp, err := cp.clus.MemberList(ctx) + if err != nil { + return nil, err + } + resp := (pb.MemberListResponse)(*mresp) + return &resp, err +} + +func (cp *clusterProxy) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { + // TODO: implement + return nil, errors.New("not implemented") +} diff --git a/server/proxy/grpcproxy/doc.go b/etcd/proxy/grpcproxy/doc.go similarity index 100% rename from server/proxy/grpcproxy/doc.go rename to etcd/proxy/grpcproxy/doc.go diff --git a/etcd/proxy/grpcproxy/election.go b/etcd/proxy/grpcproxy/election.go new file mode 100644 index 00000000000..5a35019fa18 --- /dev/null +++ b/etcd/proxy/grpcproxy/election.go @@ -0,0 +1,66 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionProxy struct { + client *clientv3.Client +} + +func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { + return &electionProxy{client: client} +} + +func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req) +} + +func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req) +} + +func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req) +} + +func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { + conn := ep.client.ActiveConnection() + ctx, cancel := context.WithCancel(s.Context()) + defer cancel() + sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req) + if err != nil { + return err + } + for { + rr, err := sc.Recv() + if err != nil { + return err + } + if err = s.Send(rr); err != nil { + return err + } + } +} + +func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req) +} diff --git a/etcd/proxy/grpcproxy/health.go b/etcd/proxy/grpcproxy/health.go new file mode 100644 index 00000000000..b1e6081c754 --- /dev/null +++ b/etcd/proxy/grpcproxy/health.go @@ -0,0 +1,78 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "go.uber.org/zap" +) + +// HandleHealth registers health handler on '/health'. +func HandleHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { + if lg == nil { + lg = zap.NewNop() + } + mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkHealth(c) })) +} + +// HandleProxyHealth registers health handler on '/proxy/health'. +func HandleProxyHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { + if lg == nil { + lg = zap.NewNop() + } + mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkProxyHealth(c) })) +} + +func checkHealth(c *clientv3.Client) etcdhttp.Health { + h := etcdhttp.Health{Health: "false"} + ctx, cancel := context.WithTimeout(c.Ctx(), time.Second) + _, err := c.Get(ctx, "a") + cancel() + if err == nil || err == rpctypes.ErrPermissionDenied { + h.Health = "true" + } else { + h.Reason = fmt.Sprintf("GET ERROR:%s", err) + } + return h +} + +func checkProxyHealth(c *clientv3.Client) etcdhttp.Health { + if c == nil { + return etcdhttp.Health{Health: "false", Reason: "no connection to proxy"} + } + h := checkHealth(c) + if h.Health != "true" { + return h + } + ctx, cancel := context.WithTimeout(c.Ctx(), time.Second*3) + ch := c.Watch(ctx, "a", clientv3.WithCreatedNotify()) + select { + case <-ch: + case <-ctx.Done(): + h.Health = "false" + h.Reason = "WATCH TIMEOUT" + } + cancel() + return h +} diff --git a/etcd/proxy/grpcproxy/kv.go b/etcd/proxy/grpcproxy/kv.go new file mode 100644 index 00000000000..3a9cc94138b --- /dev/null +++ b/etcd/proxy/grpcproxy/kv.go @@ -0,0 +1,234 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy/cache" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type kvProxy struct { + kv clientv3.KV + cache cache.Cache +} + +func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { + kv := &kvProxy{ + kv: c.KV, + cache: cache.NewCache(cache.DefaultMaxEntries), + } + donec := make(chan struct{}) + close(donec) + return kv, donec +} + +func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + if r.Serializable { + resp, err := p.cache.Get(r) + switch err { + case nil: + return resp, nil + case cache.ErrCompacted: + return nil, err + } + + } + + resp, err := p.kv.Do(ctx, RangeRequestToOp(r)) + if err != nil { + return nil, err + } + + // cache linearizable as serializable + req := *r + req.Serializable = true + gresp := (*pb.RangeResponse)(resp.Get()) + p.cache.Add(&req, gresp) + + return gresp, nil +} + +func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + p.cache.Invalidate([]byte(r.Key), nil) + + resp, err := p.kv.Do(ctx, PutRequestToOp(r)) + return (*pb.PutResponse)(resp.Put()), err +} + +func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + p.cache.Invalidate([]byte(r.Key), []byte(r.RangeEnd)) + + resp, err := p.kv.Do(ctx, DelRequestToOp(r)) + return (*pb.DeleteRangeResponse)(resp.Del()), err +} + +func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) { + for i := range resps { + if resps[i].ResponseOp_ResponsePut != nil { + p.cache.Invalidate([]byte(reqs[i].GetRequestPut().Key), nil) + } + + if resps[i].ResponseOp_ResponseDeleteRange != nil { + rdr := reqs[i].GetRequestDeleteRange() + p.cache.Invalidate([]byte(rdr.Key), []byte(rdr.RangeEnd)) + } + if resps[i].ResponseOp_ResponseRange != nil { + tv := resps[i].ResponseOp_ResponseRange + req := *(reqs[i].GetRequestRange()) + req.Serializable = true + p.cache.Add(&req, tv.ResponseRange) + } + + } +} + +func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { + op := TxnRequestToOp(r) + opResp, err := p.kv.Do(ctx, op) + if err != nil { + return nil, err + } + resp := opResp.Txn() + + // txn may claim an outdated key is updated; be safe and invalidate + for _, cmp := range r.Compare { + p.cache.Invalidate([]byte(cmp.Key), []byte(cmp.RangeEnd)) + } + // update any fetched keys + if resp.Succeeded { + p.txnToCache(r.Success, resp.Responses) + } else { + p.txnToCache(r.Failure, resp.Responses) + } + + return (*pb.TxnResponse)(resp), nil +} + +func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + var opts []clientv3.CompactOption + if r.Physical { + opts = append(opts, clientv3.WithCompactPhysical()) + } + + resp, err := p.kv.Compact(ctx, r.Revision, opts...) + if err == nil { + p.cache.Compact(r.Revision) + } + + return (*pb.CompactionResponse)(resp), err +} + +func requestOpToOp(union *pb.RequestOp) clientv3.Op { + if union.RequestOp_RequestRange != nil { + tv := union.RequestOp_RequestRange + if tv.RequestRange != nil { + return RangeRequestToOp(tv.RequestRange) + } + } + if union.RequestOp_RequestPut != nil { + tv := union.RequestOp_RequestPut + if tv.RequestPut != nil { + return PutRequestToOp(tv.RequestPut) + } + } + if union.RequestOp_RequestDeleteRange != nil { + tv := union.RequestOp_RequestDeleteRange + if tv.RequestDeleteRange != nil { + return DelRequestToOp(tv.RequestDeleteRange) + } + } + if union.RequestOp_RequestTxn != nil { + tv := union.RequestOp_RequestTxn + if tv.RequestTxn != nil { + return TxnRequestToOp(tv.RequestTxn) + } + } + + panic("unknown request") +} + +func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { + var opts []clientv3.OpOption + if len(r.RangeEnd) != 0 { + opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) + } + opts = append(opts, clientv3.WithRev(r.Revision)) + opts = append(opts, clientv3.WithLimit(r.Limit)) + opts = append(opts, clientv3.WithSort( + clientv3.SortTarget(r.SortTarget), + clientv3.SortOrder(r.SortOrder)), + ) + opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision)) + opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) + opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) + opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) + if r.CountOnly { + opts = append(opts, clientv3.WithCountOnly()) + } + if r.KeysOnly { + opts = append(opts, clientv3.WithKeysOnly()) + } + if r.Serializable { + opts = append(opts, clientv3.WithSerializable()) + } + + return clientv3.OpGet(string(r.Key), opts...) +} + +func PutRequestToOp(r *pb.PutRequest) clientv3.Op { + opts := []clientv3.OpOption{} + opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) + if r.IgnoreValue { + opts = append(opts, clientv3.WithIgnoreValue()) + } + if r.IgnoreLease { + opts = append(opts, clientv3.WithIgnoreLease()) + } + if r.PrevKv { + opts = append(opts, clientv3.WithPrevKV()) + } + return clientv3.OpPut(string(r.Key), string(r.Value), opts...) +} + +func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op { + opts := []clientv3.OpOption{} + if len(r.RangeEnd) != 0 { + opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) + } + if r.PrevKv { + opts = append(opts, clientv3.WithPrevKV()) + } + return clientv3.OpDelete(string(r.Key), opts...) +} + +func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op { + cmps := make([]clientv3.Cmp, len(r.Compare)) + thenops := make([]clientv3.Op, len(r.Success)) + elseops := make([]clientv3.Op, len(r.Failure)) + for i := range r.Compare { + cmps[i] = (clientv3.Cmp)(*r.Compare[i]) + } + for i := range r.Success { + thenops[i] = requestOpToOp(r.Success[i]) + } + for i := range r.Failure { + elseops[i] = requestOpToOp(r.Failure[i]) + } + return clientv3.OpTxn(cmps, thenops, elseops) +} diff --git a/server/proxy/grpcproxy/leader.go b/etcd/proxy/grpcproxy/leader.go similarity index 97% rename from server/proxy/grpcproxy/leader.go rename to etcd/proxy/grpcproxy/leader.go index 158e3ee8814..75c270f5dae 100644 --- a/server/proxy/grpcproxy/leader.go +++ b/etcd/proxy/grpcproxy/leader.go @@ -19,7 +19,7 @@ import ( "math" "sync" - clientv3 "go.etcd.io/etcd/client/v3" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" "golang.org/x/time/rate" ) diff --git a/etcd/proxy/grpcproxy/lease.go b/etcd/proxy/grpcproxy/lease.go new file mode 100644 index 00000000000..19f809e296e --- /dev/null +++ b/etcd/proxy/grpcproxy/lease.go @@ -0,0 +1,384 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + "io" + "sync" + "sync/atomic" + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type leaseProxy struct { + // leaseClient handles req from LeaseGrant() that requires a lease ID. + leaseClient pb.LeaseClient + + lessor clientv3.Lease + + ctx context.Context + + leader *leader + + // mu protects adding outstanding leaseProxyStream through wg. + mu sync.RWMutex + + // wg waits until all outstanding leaseProxyStream quit. + wg sync.WaitGroup +} + +func NewLeaseProxy(ctx context.Context, c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(ctx) + lp := &leaseProxy{ + leaseClient: pb.NewLeaseClient(c.ActiveConnection()), + lessor: c.Lease, + ctx: cctx, + leader: newLeader(cctx, c.Watcher), + } + ch := make(chan struct{}) + go func() { + defer close(ch) + <-lp.leader.stopNotify() + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + case <-lp.leader.disconnectNotify(): + cancel() + } + <-lp.ctx.Done() + lp.mu.Unlock() + lp.wg.Wait() + }() + return lp, ch +} + +func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return rp, nil +} + +func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID)) + if err != nil { + return nil, err + } + lp.leader.gotLeader() + return (*pb.LeaseRevokeResponse)(r), nil +} + +func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { + var ( + r *clientv3.LeaseTimeToLiveResponse + err error + ) + if rr.Keys { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys()) + } else { + r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID)) + } + if err != nil { + return nil, err + } + rp := &pb.LeaseTimeToLiveResponse{ + Header: r.ResponseHeader, + ID: int64(r.ID), + TTL: r.TTL, + GrantedTTL: r.GrantedTTL, + Keys: r.Keys, + } + return rp, err +} + +func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { + r, err := lp.lessor.Leases(ctx) + if err != nil { + return nil, err + } + leases := make([]*pb.LeaseStatus, len(r.Leases)) + for i := range r.Leases { + leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)} + } + rp := &pb.LeaseLeasesResponse{ + Header: r.ResponseHeader, + Leases: leases, + } + return rp, err +} + +func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { + lp.mu.Lock() + select { + case <-lp.ctx.Done(): + lp.mu.Unlock() + return lp.ctx.Err() + default: + lp.wg.Add(1) + } + lp.mu.Unlock() + + ctx, cancel := context.WithCancel(stream.Context()) + lps := leaseProxyStream{ + stream: stream, + lessor: lp.lessor, + keepAliveLeases: make(map[int64]*atomicCounter), + respc: make(chan *pb.LeaseKeepAliveResponse), + ctx: ctx, + cancel: cancel, + } + + errc := make(chan error, 2) + + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { + v := md[rpctypes.MetadataRequireLeaderKey] + if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { + lostLeaderC = lp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + lp.wg.Done() + return rpctypes.ErrNoLeader + default: + } + } + } + stopc := make(chan struct{}, 3) + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.recvLoop(); err != nil { + errc <- err + } + }() + + go func() { + defer func() { stopc <- struct{}{} }() + if err := lps.sendLoop(); err != nil { + errc <- err + } + }() + + // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated. + go func() { + defer func() { stopc <- struct{}{} }() + select { + case <-lostLeaderC: + case <-ctx.Done(): + case <-lp.ctx.Done(): + } + }() + + var err error + select { + case <-stopc: + stopc <- struct{}{} + case err = <-errc: + } + cancel() + + // recv/send may only shutdown after function exits; + // this goroutine notifies lease proxy that the stream is through + go func() { + <-stopc + <-stopc + <-stopc + lps.close() + close(errc) + lp.wg.Done() + }() + + select { + case <-lostLeaderC: + return rpctypes.ErrNoLeader + case <-lp.leader.disconnectNotify(): + return status.Error(codes.Canceled, "the client connection is closing") + default: + if err != nil { + return err + } + return ctx.Err() + } +} + +type leaseProxyStream struct { + stream pb.Lease_LeaseKeepAliveServer + + lessor clientv3.Lease + // wg tracks keepAliveLoop goroutines + wg sync.WaitGroup + // mu protects keepAliveLeases + mu sync.RWMutex + // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease. + keepAliveLeases map[int64]*atomicCounter + // respc receives lease keepalive responses from etcd backend + respc chan *pb.LeaseKeepAliveResponse + + ctx context.Context + cancel context.CancelFunc +} + +func (lps *leaseProxyStream) recvLoop() error { + for { + rr, err := lps.stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + lps.mu.Lock() + neededResps, ok := lps.keepAliveLeases[rr.ID] + if !ok { + neededResps = &atomicCounter{} + lps.keepAliveLeases[rr.ID] = neededResps + lps.wg.Add(1) + go func() { + defer lps.wg.Done() + if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil { + lps.cancel() + } + }() + } + neededResps.add(1) + lps.mu.Unlock() + } +} + +func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error { + cctx, ccancel := context.WithCancel(lps.ctx) + defer ccancel() + respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + // ticker expires when loop hasn't received keepalive within TTL + var ticker <-chan time.Time + for { + select { + case <-ticker: + lps.mu.Lock() + // if there are outstanding keepAlive reqs at the moment of ticker firing, + // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs. + if neededResps.get() > 0 { + lps.mu.Unlock() + ticker = nil + continue + } + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + return nil + case rp, ok := <-respc: + if !ok { + lps.mu.Lock() + delete(lps.keepAliveLeases, leaseID) + lps.mu.Unlock() + if neededResps.get() == 0 { + return nil + } + ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID)) + if err != nil { + return err + } + r := &pb.LeaseKeepAliveResponse{ + Header: ttlResp.ResponseHeader, + ID: int64(ttlResp.ID), + TTL: ttlResp.TTL, + } + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-lps.ctx.Done(): + return nil + } + } + return nil + } + if neededResps.get() == 0 { + continue + } + ticker = time.After(time.Duration(rp.TTL) * time.Second) + r := &pb.LeaseKeepAliveResponse{ + Header: rp.ResponseHeader, + ID: int64(rp.ID), + TTL: rp.TTL, + } + lps.replyToClient(r, neededResps) + } + } +} + +func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) { + timer := time.After(500 * time.Millisecond) + for neededResps.get() > 0 { + select { + case lps.respc <- r: + neededResps.add(-1) + case <-timer: + return + case <-lps.ctx.Done(): + return + } + } +} + +func (lps *leaseProxyStream) sendLoop() error { + for { + select { + case lrp, ok := <-lps.respc: + if !ok { + return nil + } + if err := lps.stream.Send(lrp); err != nil { + return err + } + case <-lps.ctx.Done(): + return lps.ctx.Err() + } + } +} + +func (lps *leaseProxyStream) close() { + lps.cancel() + lps.wg.Wait() + // only close respc channel if all the keepAliveLoop() goroutines have finished + // this ensures those goroutines don't send resp to a closed resp channel + close(lps.respc) +} + +type atomicCounter struct { + counter int64 +} + +func (ac *atomicCounter) add(delta int64) { + atomic.AddInt64(&ac.counter, delta) +} + +func (ac *atomicCounter) get() int64 { + return atomic.LoadInt64(&ac.counter) +} diff --git a/etcd/proxy/grpcproxy/lock.go b/etcd/proxy/grpcproxy/lock.go new file mode 100644 index 00000000000..35c83d640e9 --- /dev/null +++ b/etcd/proxy/grpcproxy/lock.go @@ -0,0 +1,38 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockProxy struct { + client *clientv3.Client +} + +func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { + return &lockProxy{client: client} +} + +func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req) +} + +func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req) +} diff --git a/etcd/proxy/grpcproxy/maintenance.go b/etcd/proxy/grpcproxy/maintenance.go new file mode 100644 index 00000000000..bfe69cc3db9 --- /dev/null +++ b/etcd/proxy/grpcproxy/maintenance.go @@ -0,0 +1,96 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + "io" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type maintenanceProxy struct { + client *clientv3.Client +} + +func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer { + return &maintenanceProxy{ + client: c, + } +} + +func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).Defragment(ctx, dr) +} + +func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error { + conn := mp.client.ActiveConnection() + ctx, cancel := context.WithCancel(stream.Context()) + defer cancel() + + ctx = withClientAuthToken(ctx, stream.Context()) + + sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr) + if err != nil { + return err + } + + for { + rr, err := sc.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + err = stream.Send(rr) + if err != nil { + return err + } + } +} + +func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).Hash(ctx, r) +} + +func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).HashKV(ctx, r) +} + +func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).Alarm(ctx, r) +} + +func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).Status(ctx, r) +} + +func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r) +} + +func (mp *maintenanceProxy) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).Downgrade(ctx, r) +} diff --git a/server/proxy/grpcproxy/register.go b/etcd/proxy/grpcproxy/register.go similarity index 88% rename from server/proxy/grpcproxy/register.go rename to etcd/proxy/grpcproxy/register.go index 4fafb481022..505a73a8ea9 100644 --- a/server/proxy/grpcproxy/register.go +++ b/etcd/proxy/grpcproxy/register.go @@ -18,9 +18,10 @@ import ( "encoding/json" "os" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/client/v3/naming/endpoints" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints" "go.uber.org/zap" "golang.org/x/time/rate" @@ -29,7 +30,7 @@ import ( // allow maximum 1 retry per second const registerRetryRate = 1 -// Register registers itself as a grpc-proxy server by writing prefixed-key +// Register registers itself as a grpc-proxy etcd by writing prefixed-key // with session of specified TTL (in seconds). The returned channel is closed // when the client's context is canceled. func Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} { @@ -51,7 +52,7 @@ func Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, tt return case <-ss.Done(): - lg.Warn("session expired; possible network partition or server restart") + lg.Warn("session expired; possible network partition or etcd restart") lg.Warn("creating a new session to rejoin") continue } diff --git a/etcd/proxy/grpcproxy/util.go b/etcd/proxy/grpcproxy/util.go new file mode 100644 index 00000000000..f3cef2a5d38 --- /dev/null +++ b/etcd/proxy/grpcproxy/util.go @@ -0,0 +1,75 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func getAuthTokenFromClient(ctx context.Context) string { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + ts, ok := md[rpctypes.TokenFieldNameGRPC] + if ok { + return ts[0] + } + } + return "" +} + +func withClientAuthToken(ctx, ctxWithToken context.Context) context.Context { + token := getAuthTokenFromClient(ctxWithToken) + if token != "" { + ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token) + } + return ctx +} + +type proxyTokenCredential struct { + token string +} + +func (cred *proxyTokenCredential) RequireTransportSecurity() bool { + return false +} + +func (cred *proxyTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + return map[string]string{ + rpctypes.TokenFieldNameGRPC: cred.token, + }, nil +} + +func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + token := getAuthTokenFromClient(ctx) + if token != "" { + tokenCred := &proxyTokenCredential{token} + opts = append(opts, grpc.PerRPCCredentials(tokenCred)) + } + return invoker(ctx, method, req, reply, cc, opts...) +} + +func AuthStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + tokenif := ctx.Value(rpctypes.TokenFieldNameGRPC) + if tokenif != nil { + tokenCred := &proxyTokenCredential{tokenif.(string)} + opts = append(opts, grpc.PerRPCCredentials(tokenCred)) + } + return streamer(ctx, desc, cc, method, opts...) +} diff --git a/etcd/proxy/grpcproxy/watch.go b/etcd/proxy/grpcproxy/watch.go new file mode 100644 index 00000000000..45a1ec74fab --- /dev/null +++ b/etcd/proxy/grpcproxy/watch.go @@ -0,0 +1,315 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "context" + "sync" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type watchProxy struct { + cw clientv3.Watcher + ctx context.Context + + leader *leader + + ranges *watchRanges + + // mu protects adding outstanding watch servers through wg. + mu sync.Mutex + + // wg waits until all outstanding watch servers quit. + wg sync.WaitGroup + + // kv is used for permission checking + kv clientv3.KV + lg *zap.Logger +} + +func NewWatchProxy(ctx context.Context, lg *zap.Logger, c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { + cctx, cancel := context.WithCancel(ctx) + wp := &watchProxy{ + cw: c.Watcher, + ctx: cctx, + leader: newLeader(cctx, c.Watcher), + + kv: c.KV, // for permission checking + lg: lg, + } + wp.ranges = newWatchRanges(wp) + ch := make(chan struct{}) + go func() { + defer close(ch) + <-wp.leader.stopNotify() + wp.mu.Lock() + select { + case <-wp.ctx.Done(): + case <-wp.leader.disconnectNotify(): + cancel() + } + <-wp.ctx.Done() + wp.mu.Unlock() + wp.wg.Wait() + wp.ranges.stop() + }() + return wp, ch +} + +func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { + wp.mu.Lock() + select { + case <-wp.ctx.Done(): + wp.mu.Unlock() + select { + case <-wp.leader.disconnectNotify(): + return status.Error(codes.Canceled, "the client connection is closing") + default: + return wp.ctx.Err() + } + default: + wp.wg.Add(1) + } + wp.mu.Unlock() + + ctx, cancel := context.WithCancel(stream.Context()) + wps := &watchProxyStream{ + ranges: wp.ranges, + watchers: make(map[int64]*watcher), + stream: stream, + watchCh: make(chan *pb.WatchResponse, 1024), + ctx: ctx, + cancel: cancel, + kv: wp.kv, + lg: wp.lg, + } + + var lostLeaderC <-chan struct{} + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { + v := md[rpctypes.MetadataRequireLeaderKey] + if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { + lostLeaderC = wp.leader.lostNotify() + // if leader is known to be lost at creation time, avoid + // letting events through at all + select { + case <-lostLeaderC: + wp.wg.Done() + return rpctypes.ErrNoLeader + default: + } + } + } + + // post to stopc => terminate etcd stream; can't use a waitgroup + // since all goroutines will only terminate after Watch() exits. + stopc := make(chan struct{}, 3) + go func() { + defer func() { stopc <- struct{}{} }() + wps.recvLoop() + }() + go func() { + defer func() { stopc <- struct{}{} }() + wps.sendLoop() + }() + // tear down watch if leader goes down or entire watch proxy is terminated + go func() { + defer func() { stopc <- struct{}{} }() + select { + case <-lostLeaderC: + case <-ctx.Done(): + case <-wp.ctx.Done(): + } + }() + + <-stopc + cancel() + + // recv/send may only shutdown after function exits; + // goroutine notifies proxy that stream is through + go func() { + <-stopc + <-stopc + wps.close() + wp.wg.Done() + }() + + select { + case <-lostLeaderC: + return rpctypes.ErrNoLeader + case <-wp.leader.disconnectNotify(): + return status.Error(codes.Canceled, "the client connection is closing") + default: + return wps.ctx.Err() + } +} + +// watchProxyStream forwards etcd watch events to a proxied client stream. +type watchProxyStream struct { + ranges *watchRanges + + // mu protects watchers and nextWatcherID + mu sync.Mutex + // watchers receive events from watch broadcast. + watchers map[int64]*watcher + // nextWatcherID is the id to assign the next watcher on this stream. + nextWatcherID int64 + + stream pb.Watch_WatchServer + + // watchCh receives watch responses from the watchers. + watchCh chan *pb.WatchResponse + + ctx context.Context + cancel context.CancelFunc + + // kv is used for permission checking + kv clientv3.KV + lg *zap.Logger +} + +func (wps *watchProxyStream) close() { + var wg sync.WaitGroup + wps.cancel() + wps.mu.Lock() + wg.Add(len(wps.watchers)) + for _, wpsw := range wps.watchers { + go func(w *watcher) { + wps.ranges.delete(w) + wg.Done() + }(wpsw) + } + wps.watchers = nil + wps.mu.Unlock() + + wg.Wait() + + close(wps.watchCh) +} + +func (wps *watchProxyStream) checkPermissionForWatch(key, rangeEnd []byte) error { + if len(key) == 0 { + // If the length of the key is 0, we need to obtain full range. + // look at clientv3.WithPrefix() + key = []byte{0} + rangeEnd = []byte{0} + } + req := &pb.RangeRequest{ + Serializable: true, + Key: string(key), + RangeEnd: string(rangeEnd), + CountOnly: true, + Limit: 1, + } + _, err := wps.kv.Do(wps.ctx, RangeRequestToOp(req)) + return err +} + +func (wps *watchProxyStream) recvLoop() error { + for { + req, err := wps.stream.Recv() + if err != nil { + return err + } + if req.WatchRequest_CreateRequest != nil { + uv := req.WatchRequest_CreateRequest + cr := uv.CreateRequest + + if err := wps.checkPermissionForWatch([]byte(cr.Key), []byte(cr.RangeEnd)); err != nil { + wps.watchCh <- &pb.WatchResponse{ + Header: &pb.ResponseHeader{}, + WatchId: -1, + Created: true, + Canceled: true, + CancelReason: err.Error(), + } + continue + } + + wps.mu.Lock() + w := &watcher{ + wr: watchRange{string(cr.Key), string(cr.RangeEnd)}, + id: wps.nextWatcherID, + wps: wps, + + nextrev: cr.StartRevision, + progress: cr.ProgressNotify, + prevKV: cr.PrevKv, + filters: v3rpc.FiltersFromRequest(cr), + } + if !w.wr.valid() { + w.post(&pb.WatchResponse{WatchId: -1, Created: true, Canceled: true}) + wps.mu.Unlock() + continue + } + wps.nextWatcherID++ + w.nextrev = cr.StartRevision + wps.watchers[w.id] = w + wps.ranges.add(w) + wps.mu.Unlock() + wps.lg.Debug("create watcher", zap.String("key", w.wr.key), zap.String("end", w.wr.end), zap.Int64("watcherId", wps.nextWatcherID)) + } else if req.WatchRequest_CancelRequest != nil { + uv := req.WatchRequest_CancelRequest + wps.delete(uv.CancelRequest.WatchId) + wps.lg.Debug("cancel watcher", zap.Int64("watcherId", uv.CancelRequest.WatchId)) + } else { + // Panic or Fatalf would allow to network clients to crash the serve remotely. + wps.lg.Error("not supported request type by gRPC proxy", zap.Stringer("request", req)) + } + } +} + +func (wps *watchProxyStream) sendLoop() { + for { + select { + case wresp, ok := <-wps.watchCh: + if !ok { + return + } + if err := wps.stream.Send(wresp); err != nil { + return + } + case <-wps.ctx.Done(): + return + } + } +} + +func (wps *watchProxyStream) delete(id int64) { + wps.mu.Lock() + defer wps.mu.Unlock() + + w, ok := wps.watchers[id] + if !ok { + return + } + wps.ranges.delete(w) + delete(wps.watchers, id) + resp := &pb.WatchResponse{ + Header: &w.lastHeader, + WatchId: id, + Canceled: true, + } + wps.watchCh <- resp +} diff --git a/server/proxy/grpcproxy/watch_broadcast.go b/etcd/proxy/grpcproxy/watch_broadcast.go similarity index 91% rename from server/proxy/grpcproxy/watch_broadcast.go rename to etcd/proxy/grpcproxy/watch_broadcast.go index 1d9a43df143..acd277c01bd 100644 --- a/server/proxy/grpcproxy/watch_broadcast.go +++ b/etcd/proxy/grpcproxy/watch_broadcast.go @@ -19,15 +19,15 @@ import ( "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" "go.uber.org/zap" ) -// watchBroadcast broadcasts a server watcher to many client watchers. +// watchBroadcast broadcasts a etcd watcher to many client watchers. type watchBroadcast struct { - // cancel stops the underlying etcd server watcher and closes ch. + // cancel stops the underlying etcd etcd watcher and closes ch. cancel context.CancelFunc donec chan struct{} @@ -88,7 +88,6 @@ func (wb *watchBroadcast) bcast(wr clientv3.WatchResponse) { r.send(wr) } if len(wb.receivers) > 0 { - eventsCoalescing.Add(float64(len(wb.receivers) - 1)) } } @@ -122,10 +121,10 @@ func (wb *watchBroadcast) add(w *watcher) bool { return false } wb.receivers[w] = struct{}{} - watchersCoalescing.Inc() return true } + func (wb *watchBroadcast) delete(w *watcher) { wb.mu.Lock() defer wb.mu.Unlock() @@ -135,7 +134,6 @@ func (wb *watchBroadcast) delete(w *watcher) { delete(wb.receivers, w) if len(wb.receivers) > 0 { // do not dec the only left watcher for coalescing. - watchersCoalescing.Dec() } } @@ -150,7 +148,6 @@ func (wb *watchBroadcast) empty() bool { return wb.size() == 0 } func (wb *watchBroadcast) stop() { if !wb.empty() { // do not dec the only left watcher for coalescing. - watchersCoalescing.Sub(float64(wb.size() - 1)) } wb.cancel() diff --git a/server/proxy/grpcproxy/watch_broadcasts.go b/etcd/proxy/grpcproxy/watch_broadcasts.go similarity index 97% rename from server/proxy/grpcproxy/watch_broadcasts.go rename to etcd/proxy/grpcproxy/watch_broadcasts.go index dacd3007d1d..2dddea26a10 100644 --- a/server/proxy/grpcproxy/watch_broadcasts.go +++ b/etcd/proxy/grpcproxy/watch_broadcasts.go @@ -63,7 +63,7 @@ func (wbs *watchBroadcasts) coalesce(wb *watchBroadcast) { wbswb.mu.Lock() // 1. check if wbswb is behind wb so it won't skip any events in wb // 2. ensure wbswb started; nextrev == 0 may mean wbswb is waiting - // for a current watcher and expects a create event from the server. + // for a current watcher and expects a create event from the etcd. if wb.nextrev >= wbswb.nextrev && wbswb.responses > 0 { for w := range wb.receivers { wbswb.receivers[w] = struct{}{} diff --git a/server/proxy/grpcproxy/watch_ranges.go b/etcd/proxy/grpcproxy/watch_ranges.go similarity index 100% rename from server/proxy/grpcproxy/watch_ranges.go rename to etcd/proxy/grpcproxy/watch_ranges.go diff --git a/etcd/proxy/grpcproxy/watcher.go b/etcd/proxy/grpcproxy/watcher.go new file mode 100644 index 00000000000..d8aa082119d --- /dev/null +++ b/etcd/proxy/grpcproxy/watcher.go @@ -0,0 +1,130 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "time" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/etcd/mvcc" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" +) + +type watchRange struct { + key, end string +} + +func (wr *watchRange) valid() bool { + return len(wr.end) == 0 || wr.end > wr.key || (wr.end[0] == 0 && len(wr.end) == 1) +} + +type watcher struct { + // user configuration + + wr watchRange + filters []mvcc.FilterFunc + progress bool + prevKV bool + + // id is the id returned to the client on its watch stream. + id int64 + // nextrev is the minimum expected next event revision. + nextrev int64 + // lastHeader has the last header sent over the stream. + lastHeader pb.ResponseHeader + + // wps is the parent. + wps *watchProxyStream +} + +// send filters out repeated events by discarding revisions older +// than the last one sent over the watch channel. +func (w *watcher) send(wr clientv3.WatchResponse) { + if wr.IsProgressNotify() && !w.progress { + return + } + if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 { + return + } + if w.nextrev == 0 { + // current watch; expect updates following this revision + w.nextrev = wr.Header.Revision + 1 + } + + events := make([]*mvccpb.Event, 0, len(wr.Events)) + + var lastRev int64 + for i := range wr.Events { + ev := (*mvccpb.Event)(wr.Events[i]) + if ev.Kv.ModRevision < w.nextrev { + continue + } else { + // We cannot update w.rev here. + // txn can have multiple events with the same rev. + // If w.nextrev updates here, it would skip events in the same txn. + lastRev = ev.Kv.ModRevision + } + + filtered := false + for _, filter := range w.filters { + if filter(*ev) { + filtered = true + break + } + } + if filtered { + continue + } + + if !w.prevKV { + evCopy := *ev + evCopy.PrevKv = nil + ev = &evCopy + } + events = append(events, ev) + } + + if lastRev >= w.nextrev { + w.nextrev = lastRev + 1 + } + + // all events are filtered out? + if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { + return + } + + w.lastHeader = wr.Header + w.post(&pb.WatchResponse{ + Header: &wr.Header, + Created: wr.Created, + CompactRevision: wr.CompactRevision, + Canceled: wr.Canceled, + WatchId: w.id, + Events: events, + }) +} + +// post puts a watch response on the watcher's proxy stream channel +func (w *watcher) post(wr *pb.WatchResponse) bool { + select { + case w.wps.watchCh <- wr: + case <-time.After(50 * time.Millisecond): + w.wps.cancel() + w.wps.lg.Error("failed to put a watch response on the watcher's proxy stream channel,err is timeout") + return false + } + return true +} diff --git a/etcd/proxy/httpproxy/director.go b/etcd/proxy/httpproxy/director.go new file mode 100644 index 00000000000..e20e2226a0d --- /dev/null +++ b/etcd/proxy/httpproxy/director.go @@ -0,0 +1,179 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpproxy + +import ( + "math/rand" + "net/url" + "sync" + "time" + + "go.uber.org/zap" +) + +// defaultRefreshInterval is the default proxyRefreshIntervalMs value +// as in etcdmain/config.go. +const defaultRefreshInterval = 30000 * time.Millisecond + +var once sync.Once + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func newDirector(lg *zap.Logger, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) *director { + if lg == nil { + lg = zap.NewNop() + } + d := &director{ + lg: lg, + uf: urlsFunc, + failureWait: failureWait, + } + d.refresh() + go func() { + // In order to prevent missing proxy endpoints in the first try: + // when given refresh interval of defaultRefreshInterval or greater + // and whenever there is no available proxy endpoints, + // give 1-second refreshInterval. + for { + es := d.endpoints() + ri := refreshInterval + if ri >= defaultRefreshInterval { + if len(es) == 0 { + ri = time.Second + } + } + if len(es) > 0 { + once.Do(func() { + var sl []string + for _, e := range es { + sl = append(sl, e.URL.String()) + } + lg.Info("endpoints found", zap.Strings("endpoints", sl)) + }) + } + time.Sleep(ri) + d.refresh() + } + }() + return d +} + +type director struct { + sync.Mutex + lg *zap.Logger + ep []*endpoint + uf GetProxyURLs + failureWait time.Duration +} + +func (d *director) refresh() { + urls := d.uf() + d.Lock() + defer d.Unlock() + var endpoints []*endpoint + for _, u := range urls { + uu, err := url.Parse(u) + if err != nil { + d.lg.Info("upstream URL invalid", zap.Error(err)) + continue + } + endpoints = append(endpoints, newEndpoint(d.lg, *uu, d.failureWait)) + } + + // shuffle array to avoid connections being "stuck" to a single endpoint + for i := range endpoints { + j := rand.Intn(i + 1) + endpoints[i], endpoints[j] = endpoints[j], endpoints[i] + } + + d.ep = endpoints +} + +func (d *director) endpoints() []*endpoint { + d.Lock() + defer d.Unlock() + filtered := make([]*endpoint, 0) + for _, ep := range d.ep { + if ep.Available { + filtered = append(filtered, ep) + } + } + + return filtered +} + +func newEndpoint(lg *zap.Logger, u url.URL, failureWait time.Duration) *endpoint { + ep := endpoint{ + lg: lg, + URL: u, + Available: true, + failFunc: timedUnavailabilityFunc(failureWait), + } + + return &ep +} + +type endpoint struct { + sync.Mutex + + lg *zap.Logger + URL url.URL + Available bool + + failFunc func(ep *endpoint) +} + +func (ep *endpoint) Failed() { + ep.Lock() + if !ep.Available { + ep.Unlock() + return + } + + ep.Available = false + ep.Unlock() + + if ep.lg != nil { + ep.lg.Info("marked endpoint unavailable", zap.String("endpoint", ep.URL.String())) + } + + if ep.failFunc == nil { + if ep.lg != nil { + ep.lg.Info( + "no failFunc defined, endpoint will be unavailable forever", + zap.String("endpoint", ep.URL.String()), + ) + } + return + } + + ep.failFunc(ep) +} + +func timedUnavailabilityFunc(wait time.Duration) func(*endpoint) { + return func(ep *endpoint) { + time.AfterFunc(wait, func() { + ep.Available = true + if ep.lg != nil { + ep.lg.Info( + "marked endpoint available, to retest connectivity", + zap.String("endpoint", ep.URL.String()), + ) + } + }) + } +} diff --git a/etcd/proxy/httpproxy/doc.go b/etcd/proxy/httpproxy/doc.go new file mode 100644 index 00000000000..7a45099120c --- /dev/null +++ b/etcd/proxy/httpproxy/doc.go @@ -0,0 +1,18 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httpproxy implements etcd httpproxy. The etcd proxy acts as a reverse +// http proxy forwarding client requests to active etcd cluster members, and does +// not participate in consensus. +package httpproxy diff --git a/etcd/proxy/httpproxy/proxy.go b/etcd/proxy/httpproxy/proxy.go new file mode 100644 index 00000000000..c8f27bf01df --- /dev/null +++ b/etcd/proxy/httpproxy/proxy.go @@ -0,0 +1,121 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpproxy + +import ( + "encoding/json" + "net/http" + "strings" + "time" + + "go.uber.org/zap" + "golang.org/x/net/http2" +) + +const ( + // DefaultMaxIdleConnsPerHost indicates the default maximum idle connection + // count maintained between proxy and each member. We set it to 128 to + // let proxy handle 128 concurrent requests in long term smoothly. + // If the number of concurrent requests is bigger than this value, + // proxy needs to create one new connection when handling each request in + // the delta, which is bad because the creation consumes resource and + // may eat up ephemeral ports. + DefaultMaxIdleConnsPerHost = 128 +) + +// GetProxyURLs is a function which should return the current set of URLs to +// which client requests should be proxied. This function will be queried +// periodically by the proxy Handler to refresh the set of available +// backends. +type GetProxyURLs func() []string + +// NewHandler creates a new HTTP handler, listening on the given transport, +// which will proxy requests to an etcd cluster. +// The handler will periodically update its view of the cluster. +func NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler { + if lg == nil { + lg = zap.NewNop() + } + if t.TLSClientConfig != nil { + // Enable http2, see Issue 5033. + err := http2.ConfigureTransport(t) + if err != nil { + lg.Info("Error enabling Transport HTTP/2 support", zap.Error(err)) + } + } + + p := &reverseProxy{ + lg: lg, + director: newDirector(lg, urlsFunc, failureWait, refreshInterval), + transport: t, + } + + mux := http.NewServeMux() + mux.Handle("/", p) + mux.HandleFunc("/v2/config/local/proxy", p.configHandler) + + return mux +} + +// NewReadonlyHandler wraps the given HTTP handler to allow only GET requests +func NewReadonlyHandler(hdlr http.Handler) http.Handler { + readonly := readonlyHandlerFunc(hdlr) + return http.HandlerFunc(readonly) +} + +func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, req *http.Request) { + if req.Method != "GET" { + w.WriteHeader(http.StatusNotImplemented) + return + } + + next.ServeHTTP(w, req) + } +} + +func (p *reverseProxy) configHandler(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r.Method, "GET") { + return + } + + eps := p.director.endpoints() + epstr := make([]string, len(eps)) + for i, e := range eps { + epstr[i] = e.URL.String() + } + + proxyConfig := struct { + Endpoints []string `json:"endpoints"` + }{ + Endpoints: epstr, + } + + json.NewEncoder(w).Encode(proxyConfig) +} + +// allowMethod verifies that the given method is one of the allowed methods, +// and if not, it writes an error to w. A boolean is returned indicating +// whether or not the method is allowed. +func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { + for _, meth := range ms { + if m == meth { + return true + } + } + w.Header().Set("Allow", strings.Join(ms, ",")) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} diff --git a/etcd/proxy/httpproxy/reverse.go b/etcd/proxy/httpproxy/reverse.go new file mode 100644 index 00000000000..c005fa36358 --- /dev/null +++ b/etcd/proxy/httpproxy/reverse.go @@ -0,0 +1,218 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpproxy + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "sync/atomic" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes" + + "go.uber.org/zap" +) + +// Hop-by-hop headers. These are removed when sent to the backend. +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html +// This list of headers borrowed from stdlib httputil.ReverseProxy +var singleHopHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "Te", // canonicalized version of "TE" + "Trailers", + "Transfer-Encoding", + "Upgrade", +} + +func removeSingleHopHeaders(hdrs *http.Header) { + for _, h := range singleHopHeaders { + hdrs.Del(h) + } +} + +type reverseProxy struct { + lg *zap.Logger + director *director + transport http.RoundTripper +} + +func (p *reverseProxy) ServeHTTP(rw http.ResponseWriter, clientreq *http.Request) { + proxyreq := new(http.Request) + *proxyreq = *clientreq + + var ( + proxybody []byte + err error + ) + + if clientreq.Body != nil { + proxybody, err = ioutil.ReadAll(clientreq.Body) + if err != nil { + msg := fmt.Sprintf("failed to read request body: %v", err) + p.lg.Info("failed to read request body", zap.Error(err)) + e := httptypes.NewHTTPError(http.StatusInternalServerError, "httpproxy: "+msg) + if we := e.WriteTo(rw); we != nil { + p.lg.Debug( + "error writing HTTPError to remote addr", + zap.String("remote-addr", clientreq.RemoteAddr), + zap.Error(we), + ) + } + return + } + } + + // deep-copy the headers, as these will be modified below + proxyreq.Header = make(http.Header) + copyHeader(proxyreq.Header, clientreq.Header) + + normalizeRequest(proxyreq) + removeSingleHopHeaders(&proxyreq.Header) + maybeSetForwardedFor(proxyreq) + + endpoints := p.director.endpoints() + if len(endpoints) == 0 { + msg := "zero endpoints currently available" + + // TODO: limit the rate of the error logging. + p.lg.Info(msg) + e := httptypes.NewHTTPError(http.StatusServiceUnavailable, "httpproxy: "+msg) + if we := e.WriteTo(rw); we != nil { + p.lg.Debug( + "error writing HTTPError to remote addr", + zap.String("remote-addr", clientreq.RemoteAddr), + zap.Error(we), + ) + } + return + } + + var requestClosed int32 + completeCh := make(chan bool, 1) + closeNotifier, ok := rw.(http.CloseNotifier) + ctx, cancel := context.WithCancel(context.Background()) + proxyreq = proxyreq.WithContext(ctx) + defer cancel() + if ok { + closeCh := closeNotifier.CloseNotify() + go func() { + select { + case <-closeCh: + atomic.StoreInt32(&requestClosed, 1) + p.lg.Info( + "client closed request prematurely", + zap.String("remote-addr", clientreq.RemoteAddr), + ) + cancel() + case <-completeCh: + } + }() + + defer func() { + completeCh <- true + }() + } + + var res *http.Response + + for _, ep := range endpoints { + if proxybody != nil { + proxyreq.Body = ioutil.NopCloser(bytes.NewBuffer(proxybody)) + } + redirectRequest(proxyreq, ep.URL) + + res, err = p.transport.RoundTrip(proxyreq) + if atomic.LoadInt32(&requestClosed) == 1 { + return + } + if err != nil { + p.lg.Info( + "failed to direct request", + zap.String("url", ep.URL.String()), + zap.Error(err), + ) + ep.Failed() + continue + } + + break + } + + if res == nil { + // TODO: limit the rate of the error logging. + msg := fmt.Sprintf("unable to get response from %d endpoint(s)", len(endpoints)) + p.lg.Info(msg) + e := httptypes.NewHTTPError(http.StatusBadGateway, "httpproxy: "+msg) + if we := e.WriteTo(rw); we != nil { + p.lg.Debug( + "error writing HTTPError to remote addr", + zap.String("remote-addr", clientreq.RemoteAddr), + zap.Error(we), + ) + } + return + } + + defer res.Body.Close() + removeSingleHopHeaders(&res.Header) + copyHeader(rw.Header(), res.Header) + + rw.WriteHeader(res.StatusCode) + io.Copy(rw, res.Body) +} + +func copyHeader(dst, src http.Header) { + for k, vv := range src { + for _, v := range vv { + dst.Add(k, v) + } + } +} + +func redirectRequest(req *http.Request, loc url.URL) { + req.URL.Scheme = loc.Scheme + req.URL.Host = loc.Host +} + +func normalizeRequest(req *http.Request) { + req.Proto = "HTTP/1.1" + req.ProtoMajor = 1 + req.ProtoMinor = 1 + req.Close = false +} + +func maybeSetForwardedFor(req *http.Request) { + clientIP, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + return + } + + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := req.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + req.Header.Set("X-Forwarded-For", clientIP) +} diff --git a/server/proxy/tcpproxy/doc.go b/etcd/proxy/tcpproxy/doc.go similarity index 100% rename from server/proxy/tcpproxy/doc.go rename to etcd/proxy/tcpproxy/doc.go diff --git a/server/proxy/tcpproxy/userspace.go b/etcd/proxy/tcpproxy/userspace.go similarity index 89% rename from server/proxy/tcpproxy/userspace.go rename to etcd/proxy/tcpproxy/userspace.go index a109c447c86..7011e33f03c 100644 --- a/server/proxy/tcpproxy/userspace.go +++ b/etcd/proxy/tcpproxy/userspace.go @@ -19,10 +19,11 @@ import ( "io" "math/rand" "net" - "strings" "sync" "time" + "github.com/ls-2018/etcd_cn/code_debug/conn" + "go.uber.org/zap" ) @@ -70,33 +71,17 @@ type TCPProxy struct { pickCount int // for round robin } -// The parameter host is returned by net.SplitHostPort previously, -// so it must be a valid host. This function is only to check whether -// it's an IPv6 IP address. -func isIPv6(host string) bool { - return strings.IndexRune(host, ':') != -1 -} - -// A literal IPv6 address in hostport must be enclosed in square -// brackets, as in "[::1]:80", "[::1%lo0]:80". -func formatAddr(host string, port uint16) string { - if isIPv6(host) { - return fmt.Sprintf("[%s]:%d", host, port) - } - return fmt.Sprintf("%s:%d", host, port) -} - func (tp *TCPProxy) Run() error { tp.donec = make(chan struct{}) if tp.MonitorInterval == 0 { tp.MonitorInterval = 5 * time.Minute } for _, srv := range tp.Endpoints { - addr := formatAddr(srv.Target, srv.Port) + addr := fmt.Sprintf("%s:%d", srv.Target, srv.Port) tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr}) } - var eps []string + eps := []string{} for _, ep := range tp.Endpoints { eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port)) } @@ -110,7 +95,7 @@ func (tp *TCPProxy) Run() error { if err != nil { return err } - + conn.PrintConn("TCPProxy", in) go tp.serve(in) } } diff --git a/server/verify/doc.go b/etcd/verify/doc.go similarity index 100% rename from server/verify/doc.go rename to etcd/verify/doc.go diff --git a/etcd/verify/over_verify.go b/etcd/verify/over_verify.go new file mode 100644 index 00000000000..eba7d2bef70 --- /dev/null +++ b/etcd/verify/over_verify.go @@ -0,0 +1,147 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package verify + +import ( + "fmt" + "os" + + "github.com/ls-2018/etcd_cn/etcd/datadir" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + wal2 "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/raft/raftpb" + "go.uber.org/zap" +) + +const ( + ENV_VERIFY = "ETCD_VERIFY" + ENV_VERIFY_ALL_VALUE = "all" +) + +type Config struct { + // DataDir is a root directory where the data being verified are stored. + DataDir string + + // ExactIndex requires consistent_index in backend exactly match the last committed WAL entry. + // Usually backend's consistent_index needs to be <= WAL.commit, but for backups the match + // is expected to be exact. + ExactIndex bool + + Logger *zap.Logger +} + +// Verify performs consistency checks of given etcd data-directory. +// The errors are reported as the returned error, but for some situations +// the function can also panic. +// The function is expected to work on not-in-use data model, i.e. +// no file-locks should be taken. Verify does not modified the data. +func Verify(cfg Config) error { + lg := cfg.Logger + if lg == nil { + lg = zap.NewNop() + } + + var err error + lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir)) + defer func() { + if err != nil { + lg.Error("verification of persisted state failed", + zap.String("data-dir", cfg.DataDir), + zap.Error(err)) + } else if r := recover(); r != nil { + lg.Error("verification of persisted state failed", + zap.String("data-dir", cfg.DataDir)) + panic(r) + } else { + lg.Info("verification of persisted state successful", zap.String("data-dir", cfg.DataDir)) + } + }() + + beConfig := backend.DefaultBackendConfig() + beConfig.Path = datadir.ToBackendFileName(cfg.DataDir) + beConfig.Logger = cfg.Logger + + be := backend.New(beConfig) + defer be.Close() + + snapshot, hardstate, err := validateWal(cfg) + if err != nil { + return err + } + + // TODO: Perform validation of consistency of membership between + // backend/members & WAL confstate (and maybe storev2 if still exists). + + return validateConsistentIndex(cfg, hardstate, snapshot, be) +} + +// VerifyIfEnabled 根据ETCD_VERIFY环境设置执行校验. +func VerifyIfEnabled(cfg Config) error { + if os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE { + return Verify(cfg) + } + return nil +} + +// MustVerifyIfEnabled 根据ETCD_VERIFY环境设置执行验证,发现问题就退出. +func MustVerifyIfEnabled(cfg Config) { + if err := VerifyIfEnabled(cfg); err != nil { + cfg.Logger.Fatal("验证失败", + zap.String("data-dir", cfg.DataDir), + zap.Error(err)) + } +} + +func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error { + tx := be.BatchTx() + index, term := cindex.ReadConsistentIndex(tx) + if cfg.ExactIndex && index != hardstate.Commit { + return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit) + } + if cfg.ExactIndex && term != hardstate.Term { + return fmt.Errorf("backend.Term (%v) expected == WAL.HardState.term, (%v)", term, hardstate.Term) + } + if index > hardstate.Commit { + return fmt.Errorf("backend.ConsistentIndex (%v)必须是<= WAL.HardState.commit (%v)", index, hardstate.Commit) + } + if term > hardstate.Term { + return fmt.Errorf("backend.Term (%v)必须是<= WAL.HardState.term, (%v)", term, hardstate.Term) + } + + if index < snapshot.Index { + return fmt.Errorf("backend.ConsistentIndex (%v)必须是>= last snapshot index (%v)", index, snapshot.Index) + } + + cfg.Logger.Info("verification: consistentIndex OK", zap.Uint64("backend-consistent-index", index), zap.Uint64("hardstate-commit", hardstate.Commit)) + return nil +} + +func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) { + walDir := datadir.ToWalDir(cfg.DataDir) + + walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir) + if err != nil { + return nil, nil, err + } + + snapshot := walSnaps[len(walSnaps)-1] + hardstate, err := wal2.Verify(cfg.Logger, walDir, snapshot) + if err != nil { + return nil, nil, err + } + return &snapshot, hardstate, nil +} diff --git a/etcd/wal/over_decoder.go b/etcd/wal/over_decoder.go new file mode 100644 index 00000000000..38ed3b15c0f --- /dev/null +++ b/etcd/wal/over_decoder.go @@ -0,0 +1,125 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bufio" + "bytes" + "hash" + "io" + "sync" + + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/pkg/crc" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +const minSectorSize = 512 + +type decoder struct { + mu sync.Mutex + brs []*bufio.Reader // 要读取的所有wal文件 + + // lastValidOff file offset following the last valid decoded record + lastValidOff int64 // 下一次decode的偏移量 + crc hash.Hash32 +} + +func newDecoder(r ...io.Reader) *decoder { + readers := make([]*bufio.Reader, len(r)) + for i := range r { + readers[i] = bufio.NewReader(r[i]) + } + return &decoder{ + brs: readers, + crc: crc.New(0, crcTable), + } +} + +func (d *decoder) decode(rec *walpb.Record) error { + rec.Reset() + d.mu.Lock() + defer d.mu.Unlock() + return d.decodeRecord(rec) +} + +// raft max message size is set to 1 MB in etcd etcd +// assume projects set reasonable message size limit, +// thus entry size should never exceed 10 MB + +func (d *decoder) decodeRecord(rec *walpb.Record) error { + if len(d.brs) == 0 { + return io.EOF + } + + line, _, err := bufio.NewReader(d.brs[0]).ReadLine() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + length := len(line) + a := make([]byte, length) + all := 0 + for _, item := range line { + if item == 0 { + all += 1 + } + } + if all == length { + return io.EOF + } + if bytes.Equal(a, line) { + return io.EOF + } + + if err := rec.Unmarshal(line); err != nil { + return err + } + + // skip crc checking if the record type is crcType + if rec.Type != crcType { + d.crc.Write(rec.Data) + if err := rec.Validate(d.crc.Sum32()); err != nil { + return err + } + } + d.lastValidOff += int64(len(line)) + 1 + return nil +} + +func (d *decoder) updateCRC(prevCrc uint32) { + d.crc = crc.New(prevCrc, crcTable) +} + +func (d *decoder) lastCRC() uint32 { + return d.crc.Sum32() +} + +func (d *decoder) lastOffset() int64 { return d.lastValidOff } + +func mustUnmarshalEntry(d []byte) raftpb.Entry { + var e raftpb.Entry + pbutil.MustUnmarshal(&e, d) + return e +} + +func mustUnmarshalState(d []byte) raftpb.HardState { + var s raftpb.HardState + pbutil.MustUnmarshal(&s, d) + return s +} diff --git a/etcd/wal/over_encoder.go b/etcd/wal/over_encoder.go new file mode 100644 index 00000000000..6c4fc0ae01a --- /dev/null +++ b/etcd/wal/over_encoder.go @@ -0,0 +1,83 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "hash" + "io" + "os" + "sync" + + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/pkg/crc" + "github.com/ls-2018/etcd_cn/pkg/ioutil" +) + +// walPageBytes +const walPageBytes = 8 * minSectorSize // 8字节对齐 +// encoder模块把会增量的计算crc和数据一起写入到wal文件中. 下面为encoder数据结构undefined +type encoder struct { + mu sync.Mutex + bw *ioutil.PageWriter + crc hash.Hash32 + buf []byte // 缓存空间,默认为1M,降低数据分配的压力undefined,序列化时使用 + uint64buf []byte // 将数据变成特定格式的数据,大端、小端 +} + +func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { + return &encoder{ + bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset), + crc: crc.New(prevCrc, crcTable), + // 1MB buffer + buf: make([]byte, 1024*1024), + uint64buf: make([]byte, 8), + } +} + +// newFileEncoder 使用当前文件偏移,创建一个encoder用于写数据 +func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { + // prevCrc之前的crc码 + offset, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + return newEncoder(f, prevCrc, int(offset)), nil +} + +func (e *encoder) encode(rec *walpb.Record) error { + e.mu.Lock() + defer e.mu.Unlock() + e.crc.Write(rec.Data) + rec.Crc = e.crc.Sum32() + var ( + data []byte + err error + ) + + data, err = rec.Marshal() + if err != nil { + return err + } + data = append(data, '\n') + _, err = e.bw.Write(data) + return err +} + +func (e *encoder) flush() error { + e.mu.Lock() + _, err := e.bw.FlushN() + e.mu.Unlock() + return err +} diff --git a/etcd/wal/over_file_pipeline.go b/etcd/wal/over_file_pipeline.go new file mode 100644 index 00000000000..786ce3a2c12 --- /dev/null +++ b/etcd/wal/over_file_pipeline.go @@ -0,0 +1,100 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + + "go.uber.org/zap" +) + +// filePipeline 分配磁盘空间的管道 +// wal新建新的文件时都是先新建一个tmp文件,当所有操作都完成后再重命名这个文件.wal使用file_pipeline这个模块在后台启动一个协程时刻准备一个临时文件以供使用,从而避免临时创建文件的开销 +type filePipeline struct { + lg *zap.Logger + dir string + size int64 + count int + filec chan *fileutil.LockedFile + errc chan error + donec chan struct{} +} + +func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline { + if lg == nil { + lg = zap.NewNop() + } + fp := &filePipeline{ + lg: lg, + dir: dir, + size: fileSize, // + filec: make(chan *fileutil.LockedFile), + errc: make(chan error, 1), + donec: make(chan struct{}), + } + go fp.run() // 建立一个tmp文件 + return fp +} + +// Open 返回一个新的文件供写入.在再次调用Open之前,请重命名该文件,否则会出现文件碰撞的情况. +func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { + select { + case f = <-fp.filec: + case err = <-fp.errc: + } + return f, err +} + +func (fp *filePipeline) Close() error { + close(fp.donec) + return <-fp.errc +} + +func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { + // count % 2,所以这个文件和上次发布的文件不一样. + fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) + if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return nil, err + } + if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { + fp.lg.Error("在创建一个新的WAL时,未能预先分配空间", zap.Int64("size", fp.size), zap.Error(err)) + f.Close() + return nil, err + } + fp.count++ + return f, nil +} + +func (fp *filePipeline) run() { + defer close(fp.errc) + for { + f, err := fp.alloc() + if err != nil { + fp.errc <- err + return + } + select { + case fp.filec <- f: + case <-fp.donec: + os.Remove(f.Name()) + f.Close() + return + } + } +} diff --git a/etcd/wal/over_util.go b/etcd/wal/over_util.go new file mode 100644 index 00000000000..1d828388258 --- /dev/null +++ b/etcd/wal/over_util.go @@ -0,0 +1,108 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "errors" + "fmt" + "strings" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + + "go.uber.org/zap" +) + +var errBadWALName = errors.New("bad wal name") + +// Exist 如果在给定的目录中存在任何文件,则返回true. +func Exist(dir string) bool { + names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal")) + if err != nil { + return false + } + return len(names) != 0 +} + +// searchIndex 返回 raft 索引部分等于或小于给定索引的名字的最后一个数组索引. +func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) { + for i := len(names) - 1; i >= 0; i-- { + name := names[i] + _, curIndex, err := parseWALName(name) + if err != nil { + lg.Panic("解析wal文件名字失败", zap.String("path", name), zap.Error(err)) + } + if index >= curIndex { + return i, true + } + } + return -1, false +} + +// isValidSeq 检查seq是否连续增加. +func isValidSeq(lg *zap.Logger, names []string) bool { + var lastSeq uint64 + for _, name := range names { + curSeq, _, err := parseWALName(name) + if err != nil { + lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) + } + if lastSeq != 0 && lastSeq != curSeq-1 { + return false + } + lastSeq = curSeq + } + return true +} + +// 返回指定目录下的所有wal文件 +func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) { + names, err := fileutil.ReadDir(dirpath) // 返回指定目录下所有经过排序的文件 + if err != nil { + return nil, err + } + wnames := checkWalNames(lg, names) + if len(wnames) == 0 { + return nil, ErrFileNotFound + } + return wnames, nil +} + +// 获取后缀是.wal的文件 +func checkWalNames(lg *zap.Logger, names []string) []string { + wnames := make([]string, 0) + for _, name := range names { + if _, _, err := parseWALName(name); err != nil { + if !strings.HasSuffix(name, ".tmp") { + lg.Warn("wal目录 忽略文件:%s", zap.String("path", name)) + } + continue + } + wnames = append(wnames, name) + } + return wnames +} + +// 解析文件名 +func parseWALName(str string) (seq, index uint64, err error) { + if !strings.HasSuffix(str, ".wal") { + return 0, 0, errBadWALName + } + _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) + return seq, index, err +} + +func walName(seq, index uint64) string { + return fmt.Sprintf("%016x-%016x.wal", seq, index) +} diff --git a/etcd/wal/over_wal.go b/etcd/wal/over_wal.go new file mode 100644 index 00000000000..3c2b472ecc4 --- /dev/null +++ b/etcd/wal/over_wal.go @@ -0,0 +1,959 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" +) + +const ( + metadataType int64 = iota + 1 // 元数据类型,元数据会保存当前的node id和cluster id. + entryType // 日志条目 + stateType // 存放的是集群当前的状态HardState,如果集群的状态有变化,就会在WAL中存放一个新集群状态数据.里面包括当前Term,当前竞选者、当前已经commit的日志. + crcType // 存放crc校验字段.读取数据时,会根据这个记录里的crc字段对前面已经读出来的数据进行校验. + snapshotType // 存放snapshot的日志点.包括日志的Index和Term. + warnSyncDuration = time.Second // 是指在记录警告之前分配给fsync的时间量. +) + +var ( + // SegmentSizeBytes is the preallocated size of each wal segment file. + // The actual size might be larger than this. In general, the default + // value should be used, but this is defined as an exported variable + // so that tests can set a different segment size. + SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB + + ErrMetadataConflict = errors.New("wal: conflicting metadata found") + ErrFileNotFound = errors.New("wal: file not found") + ErrCRCMismatch = errors.New("wal: crc mismatch") + ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") + ErrSnapshotNotFound = errors.New("wal: snapshot not found") + ErrSliceOutOfRange = errors.New("wal: slice bounds out of range") + ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded") + ErrDecoderNotFound = errors.New("wal: decoder not found") + crcTable = crc32.MakeTable(crc32.Castagnoli) +) + +// WAL is a logical representation of the stable storage. +// WAL is either in read mode or append mode but not both. +// A newly created WAL is in append mode, and ready for appending records. +// A just opened WAL is in read mode, and ready for reading records. +// The WAL will be ready for appending after reading out all the previous records. +// WAL是稳定存储的一个逻辑表示.WAL要么处于读取模式,要么处于追加模式,但不能同时进行. +// 一个新创建的WAL处于追加模式,并准备好追加记录.一个刚打开的WAL处于读模式,并准备好读取记录. +// 在读出所有以前的记录后,WAL将准备好进行追加. +type WAL struct { + lg *zap.Logger + dir string // wal文件的存储目录 + dirFile *os.File // 是一个用于重命名时同步的wal目录的fd. + metadata []byte // wal文件构建后会写的第一个metadata记录 + state raftpb.HardState // wal文件构建后会写的第一个state记录 + start walpb.Snapshot // wal开始的snapshot,代表读取wal时从这个snapshot的记录之后开始 + decoder *decoder // wal记录的反序列化器 + readClose func() error // 关闭反序列化器 + unsafeNoSync bool // 非安全存储 默认是 false + mu sync.Mutex + enti uint64 // 保存到wal的最新日志索引 + encoder *encoder // encoder to encode records + locks []*fileutil.LockedFile // 底层数据文件列表 + fp *filePipeline +} + +// Create 创建一个准备用于添加记录的WAL.给定的元数据被记录在每个WAL文件的头部,并且可以在文件打开后用ReadAll检索. +func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { + if Exist(dirpath) { + return nil, os.ErrExist + } + + if lg == nil { + lg = zap.NewNop() + } + + // 保持临时的WAL目录,这样WAL的初始化就会显得很原子化. + tmpdirpath := filepath.Clean(dirpath) + ".tmp" + if fileutil.Exist(tmpdirpath) { + if err := os.RemoveAll(tmpdirpath); err != nil { + return nil, err + } + } + defer os.RemoveAll(tmpdirpath) + + if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + lg.Warn( + "无法创建wal临时目录", + zap.String("tmp-dir-path", tmpdirpath), + zap.String("dir-path", dirpath), + zap.Error(err), + ) + return nil, err + } + + p := filepath.Join(tmpdirpath, walName(0, 0)) + f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) // 阻塞 + if err != nil { + lg.Warn( + "未能存入一个初始WAL文件", + zap.String("path", p), + zap.Error(err), + ) + return nil, err + } + // 跳到末尾 + if _, err = f.Seek(0, io.SeekEnd); err != nil { + lg.Warn( + "未能寻找到一个初始的WAL文件", + zap.String("path", p), + zap.Error(err), + ) + return nil, err + } + // 预分配文件,大小为SegmentSizeBytes(64MB) + if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { + lg.Warn( + "未能预先分配一个初始的WAL文件", + zap.String("path", p), + zap.Int64("segment-bytes", SegmentSizeBytes), + zap.Error(err), + ) + return nil, err + } + + w := &WAL{ + lg: lg, + dir: dirpath, + metadata: metadata, + } + w.encoder, err = newFileEncoder(f.File, 0) + if err != nil { + return nil, err + } + w.locks = append(w.locks, f) + if err = w.saveCrc(0); err != nil { + return nil, err + } + // 将metadataType类型的record记录在wal的header处 + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { + return nil, err + } + // 保存空的snapshot + if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { + return nil, err + } + logDirPath := w.dir + // 重命名,之前以.tmp结尾的文件,初始化完成之后重命名,类似原子操作 + if w, err = w.renameWAL(tmpdirpath); err != nil { + lg.Warn( + fmt.Sprintf("重命名失败 .%s.tmp --> %s", tmpdirpath, w.dir), + zap.String("tmp-dir-path", tmpdirpath), + zap.String("dir-path", logDirPath), + zap.Error(err), + ) + return nil, err + } + var perr error + defer func() { + if perr != nil { + w.cleanupWAL(lg) + } + }() + + // 目录被重新命名;同步父目录以保持重命名. + pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) // ./raftexample/db + if perr != nil { + lg.Warn( + "未能打开父数据目录", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + return nil, perr + } + dirCloser := func() error { + if perr = pdir.Close(); perr != nil { + lg.Warn( + "failed to close the parent data directory file", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + return perr + } + return nil + } + if perr = fileutil.Fsync(pdir); perr != nil { + dirCloser() + lg.Warn( + "未能同步父数据目录文件", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + return nil, perr + } + // 关闭目录 + if err = dirCloser(); err != nil { + return nil, err + } + + return w, nil +} + +// SetUnsafeNoFsync ok +func (w *WAL) SetUnsafeNoFsync() { + w.unsafeNoSync = true // 非安全存储 默认是 false +} + +func (w *WAL) cleanupWAL(lg *zap.Logger) { + var err error + if err = w.Close(); err != nil { + lg.Panic("failed to close WAL during cleanup", zap.Error(err)) + } + brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999")) + if err = os.Rename(w.dir, brokenDirName); err != nil { + lg.Panic( + "failed to rename WAL during cleanup", + zap.Error(err), + zap.String("source-path", w.dir), + zap.String("rename-path", brokenDirName), + ) + } +} + +// raftexample/db/raftexample-1.tmp ---> raftexample/db/raftexample-1 +func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) { + if err := os.RemoveAll(w.dir); err != nil { // 删除 raftexample/db/raftexample-1 + return nil, err + } + // 在非Windows平台上,重命名时要按住锁.释放锁并试图快速重新获得它可能是不稳定的,因为在此过程中,进程可能会分叉产生一个进程. + // Go运行时将fds设置为执行时关闭,但在分叉和执行之间存在一个窗口,另一个进程持有锁. + if err := os.Rename(tmpdirpath, w.dir); err != nil { // raftexample/db/raftexample-1.tmp ---> raftexample/db/raftexample-1 + if _, ok := err.(*os.LinkError); ok { + return w.renameWALUnlock(tmpdirpath) + } + return nil, err + } + w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes) + df, err := fileutil.OpenDir(w.dir) + w.dirFile = df + return w, err +} + +func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) { + // rename of directory with locked files doesn't work on windows/cifs; + // close the WAL to release the locks so the directory can be renamed. + w.lg.Info( + "closing WAL to release flock and retry directory renaming", + zap.String("from", tmpdirpath), + zap.String("to", w.dir), + ) + w.Close() + + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + + // reopen and relock + newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{}) + if oerr != nil { + return nil, oerr + } + if _, _, _, err := newWAL.ReadAll(); err != nil { + newWAL.Close() + return nil, err + } + return newWAL, nil +} + +// Open 在给定的快照处打开WAL.这个快照应该是先前保存在WAL中的,否则下面的ReadAll会失败. +// 返回的WAL已经准备好读取,第一条记录将是给定sap之后的那条.在读出所有之前的记录之前,不能对WAL进行追加. +func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { + w, err := openAtIndex(lg, dirpath, snap, true) + if err != nil { + return nil, err + } + if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { // ./raftexample/db/raftexample-1 + return nil, err + } + return w, nil +} + +// OpenForRead only opens the wal files for read. +// Write on a read only wal panics. +func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { + return openAtIndex(lg, dirpath, snap, false) +} + +// 在指定位置打开wal +func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { + if lg == nil { + lg = zap.NewNop() + } + names, nameIndex, err := selectWALFiles(lg, dirpath, snap) // 选择合适的wal文件 + if err != nil { + return nil, err + } + + rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write) // 打开所有wal文件 + if err != nil { + return nil, err + } + + // 创建一个WAL准备读取 + w := &WAL{ + lg: lg, + dir: dirpath, + start: snap, + decoder: newDecoder(rs...), + readClose: closer, + locks: ls, + } + + if write { // true + // 写入重用读出的文件描述符;不要关闭,以便 + w.readClose = nil + if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil { + closer() + return nil, err + } + w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes) + } + + return w, nil +} + +// 选择合适的wal文件 +func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) { + names, err := readWALNames(lg, dirpath) // 返回指定目录下的所有wal文件 + if err != nil { + return nil, -1, err + } + + nameIndex, ok := searchIndex(lg, names, snap.Index) // 查找小于快照的第一个wal日志 + if !ok || !isValidSeq(lg, names[nameIndex:]) { // 校验wal索引是否是增序 + err = ErrFileNotFound + return nil, -1, err + } + + return names, nameIndex, nil +} + +// ok +func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) { + rcs := make([]io.ReadCloser, 0) + rs := make([]io.Reader, 0) + ls := make([]*fileutil.LockedFile, 0) + for _, name := range names[nameIndex:] { + p := filepath.Join(dirpath, name) + if write { + l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) + if err != nil { + closeAll(lg, rcs...) + return nil, nil, nil, err + } + ls = append(ls, l) + rcs = append(rcs, l) + } else { + rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) + if err != nil { + closeAll(lg, rcs...) + return nil, nil, nil, err + } + ls = append(ls, nil) + rcs = append(rcs, rf) + } + rs = append(rs, rcs[len(rcs)-1]) + } + + closer := func() error { return closeAll(lg, rcs...) } + + return rs, ls, closer, nil +} + +// ReadAll 读取所有的wal里的日志 +func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { + w.mu.Lock() + defer w.mu.Unlock() + rec := &walpb.Record{} + if w.decoder == nil { + return nil, state, nil, ErrDecoderNotFound + } + decoder := w.decoder + var match bool + for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + switch rec.Type { + case entryType: + e := mustUnmarshalEntry(rec.Data) + // 0 <= e.Index-w.start.Index - 1 < len(ents) + if e.Index > w.start.Index { + // 防止 "panic:运行时错误:切片边界超出范围[:13038096702221461992],容量为0" + up := e.Index - w.start.Index - 1 // + if up > uint64(len(ents)) { + // 在调用append前返回错误导致运行时恐慌 + return nil, state, nil, ErrSliceOutOfRange + } + // 下面这一行有可能覆盖一些 "未提交 "的条目. + // wal只关注写入日志,不会校验日志的index是否重复, + ents = append(ents[:up], e) + } + w.enti = e.Index // 保存到wal的最新日志索引 + + case stateType: // 集群状态变化 + state = mustUnmarshalState(rec.Data) + + case metadataType: + if metadata != nil && !bytes.Equal(metadata, rec.Data) { + state.Reset() + return nil, state, nil, ErrMetadataConflict + } + metadata = rec.Data + + case crcType: // 4 + crc := decoder.crc.Sum32() + // current crc of decoder must match the crc of the record. + // do no need to match 0 crc, since the decoder is a new one at this case. + if crc != 0 && rec.Validate(crc) != nil { + state.Reset() + return nil, state, nil, ErrCRCMismatch + } + decoder.updateCRC(rec.Crc) + + case snapshotType: + var snap walpb.Snapshot + pbutil.MustUnmarshal(&snap, rec.Data) + if snap.Index == w.start.Index { + if snap.Term != w.start.Term { + state.Reset() + return nil, state, nil, ErrSnapshotMismatch + } + match = true + } + + default: + state.Reset() + return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) + } + } + + switch w.tail() { + case nil: + if err != io.EOF && err != io.ErrUnexpectedEOF { + state.Reset() + return nil, state, nil, err + } + default: + // 如果WAL是以写模式打开的,我们必须读取所有的条目. + if err != io.EOF { + state.Reset() + return nil, state, nil, err + } + + if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { // 跳到末尾 + return nil, state, nil, err + } + if err = fileutil.ZeroToEnd(w.tail().File); err != nil { // 清空wal文件当前之后的数据,并固定分配文件空间 + return nil, state, nil, err + } + } + + err = nil + if !match { // wal 中没有发现当前的快照记录 + err = ErrSnapshotNotFound + } + + // 关闭decoder,禁止读取 + if w.readClose != nil { + w.readClose() + w.readClose = nil + } + w.start = walpb.Snapshot{} + + w.metadata = metadata + + if w.tail() != nil { // wal文件 + // 创建编码器(与解码器连锁crc),启用追加功能 + w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC()) + if err != nil { + return + } + } + w.decoder = nil + + return metadata, state, ents, err +} + +// ValidSnapshotEntries 返回给定目录下wal日志中的所有有效快照条目.如果快照条目的索引小于或等于最近提交的hardstate,则为有效. +func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) { + var snaps []walpb.Snapshot + var state raftpb.HardState + var err error + + rec := &walpb.Record{} + names, err := readWALNames(lg, walDir) // 获取wal目录下的所有wal文件 + if err != nil { + return nil, err + } + + // 在读模式下打开WAL文件,这样,当在其他地方以写模式打开同样的WAL时,就不会有冲突. + rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false) + if err != nil { + return nil, err + } + defer func() { + if closer != nil { + closer() + } + }() + + // 从WAL文件的读者中创建一个新的解码器 + decoder := newDecoder(rs...) + + for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + switch rec.Type { + case snapshotType: // 5 + var loadedSnap walpb.Snapshot + pbutil.MustUnmarshal(&loadedSnap, rec.Data) + snaps = append(snaps, loadedSnap) + case stateType: // 3 + state = mustUnmarshalState(rec.Data) + case crcType: // 4 + crc := decoder.crc.Sum32() + // 解码器的当前crc必须与记录的crc相匹配 + if crc != 0 && rec.Validate(crc) != nil { + return nil, ErrCRCMismatch + } + decoder.updateCRC(rec.Crc) + } + } + if err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + + // 过滤任何打快照的行为 + n := 0 + for _, s := range snaps { + if s.Index <= state.Commit { + snaps[n] = s + n++ + } + } + snaps = snaps[:n:n] + return snaps, nil +} + +// Verify reads through the given WAL and verifies that it is not corrupted. +// It creates a new decoder to read through the records of the given WAL. +// It does not conflict with any open WAL, but it is recommended not to +// call this function after opening the WAL for writing. +// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. +// If the loaded snap doesn't match with the expected one, it will +// return error ErrSnapshotMismatch. +func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) { + var metadata []byte + var err error + var match bool + var state raftpb.HardState + + rec := &walpb.Record{} + + if lg == nil { + lg = zap.NewNop() + } + names, nameIndex, err := selectWALFiles(lg, walDir, snap) + if err != nil { + return nil, err + } + + // open wal files in read mode, so that there is no conflict + // when the same WAL is opened elsewhere in write mode + rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false) + if err != nil { + return nil, err + } + defer func() { + if closer != nil { + closer() + } + }() + + // create a new decoder from the readers on the WAL files + decoder := newDecoder(rs...) + + for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { + switch rec.Type { + case metadataType: + if metadata != nil && !bytes.Equal(metadata, rec.Data) { + return nil, ErrMetadataConflict + } + metadata = rec.Data + case crcType: + crc := decoder.crc.Sum32() + // Current crc of decoder must match the crc of the record. + // We need not match 0 crc, since the decoder is a new one at this point. + if crc != 0 && rec.Validate(crc) != nil { + return nil, ErrCRCMismatch + } + decoder.updateCRC(rec.Crc) + case snapshotType: + var loadedSnap walpb.Snapshot + pbutil.MustUnmarshal(&loadedSnap, rec.Data) + if loadedSnap.Index == snap.Index { + if loadedSnap.Term != snap.Term { + return nil, ErrSnapshotMismatch + } + match = true + } + // We ignore all entry and state type records as these + // are not necessary for validating the WAL contents + case entryType: + case stateType: + pbutil.MustUnmarshal(&state, rec.Data) + default: + return nil, fmt.Errorf("unexpected block type %d", rec.Type) + } + } + + // We do not have to read out all the WAL entries + // as the decoder is opened in read mode. + if err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + + if !match { + return nil, ErrSnapshotNotFound + } + + return &state, nil +} + +// cut 当日志数据大于默认的64M时就会生成新的文件写入日志,新文件的第一条记录就是上一个wal文件最后的crc +func (w *WAL) cut() error { + // close old wal file; truncate to avoid wasting space if an early cut + off, serr := w.tail().Seek(0, io.SeekCurrent) + if serr != nil { + return serr + } + + if err := w.tail().Truncate(off); err != nil { + return err + } + + if err := w.sync(); err != nil { // 日志截断? + return err + } + + fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1)) + + // create a temp wal file with name sequence + 1, or truncate the existing one + newTail, err := w.fp.Open() + if err != nil { + return err + } + + // update writer and save the previous crc + w.locks = append(w.locks, newTail) + prevCrc := w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + + if err = w.saveCrc(prevCrc); err != nil { + return err + } + + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { + return err + } + + if err = w.saveState(&w.state); err != nil { + return err + } + + // atomically move temp wal file to wal file + if err = w.sync(); err != nil { // 移动临时wal文件到wal文件 + return err + } + + off, err = w.tail().Seek(0, io.SeekCurrent) + if err != nil { + return err + } + + if err = os.Rename(newTail.Name(), fpath); err != nil { + return err + } + + if err = fileutil.Fsync(w.dirFile); err != nil { + return err + } + + // reopen newTail with its new path so calls to Name() match the wal filename format + newTail.Close() + + if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { + return err + } + if _, err = newTail.Seek(off, io.SeekStart); err != nil { + return err + } + + w.locks[len(w.locks)-1] = newTail + + prevCrc = w.encoder.crc.Sum32() + w.encoder, err = newFileEncoder(w.tail().File, prevCrc) + if err != nil { + return err + } + + w.lg.Info("created a new WAL segment", zap.String("path", fpath)) + return nil +} + +// 强制wal日志刷盘 +func (w *WAL) sync() error { + if w.encoder != nil { + if err := w.encoder.flush(); err != nil { + return err + } + } + fmt.Println("wal flush") + + if w.unsafeNoSync { // 非安全存储 默认是 false + return nil + } + + start := time.Now() + // Fdatasync类似于fsync(),但不会刷新修改后的元数据,除非为了允许正确处理后续的数据检索而需要这些元数据. + err := fileutil.Fdatasync(w.tail().File) + + took := time.Since(start) + if took > warnSyncDuration { + w.lg.Warn("缓慢 fdatasync", zap.Duration("took", took), zap.Duration("expected-duration", warnSyncDuration)) + } + return err +} + +// Sync 强制wal日志刷盘 +func (w *WAL) Sync() error { + return w.sync() // 强制刷盘 +} + +// ReleaseLockTo 释放锁,这些锁的索引比给定的索引小,但其中最大的一个除外. +// 例如,如果WAL持有锁1,2,3,4,5,6,ReleaseLockTo(4)将释放 锁1,2,但保留3.ReleaseLockTo(5)将释放1,2,3,但保留4. +func (w *WAL) ReleaseLockTo(index uint64) error { + w.mu.Lock() + defer w.mu.Unlock() + + if len(w.locks) == 0 { + return nil + } + + var smaller int + found := false + for i, l := range w.locks { + _, lockIndex, err := parseWALName(filepath.Base(l.Name())) + if err != nil { + return err + } + if lockIndex >= index { + smaller = i - 1 + found = true + break + } + } + + // if no lock index is greater than the release index, we can + // release lock up to the last one(excluding). + if !found { + smaller = len(w.locks) - 1 + } + + if smaller <= 0 { + return nil + } + + for i := 0; i < smaller; i++ { + if w.locks[i] == nil { + continue + } + w.locks[i].Close() + } + w.locks = w.locks[smaller:] + + return nil +} + +// Close closes the current WAL file and directory. +func (w *WAL) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.fp != nil { + w.fp.Close() + w.fp = nil + } + + if w.tail() != nil { + if err := w.sync(); err != nil { // 文件关闭时 + return err + } + } + for _, l := range w.locks { + if l == nil { + continue + } + if err := l.Close(); err != nil { + w.lg.Error("failed to close WAL", zap.Error(err)) + } + } + + return w.dirFile.Close() +} + +// 将日志保存到wal,更新wal写入的最新索引 +func (w *WAL) saveEntry(e *raftpb.Entry) error { + b := pbutil.MustMarshal(e) + rec := &walpb.Record{Type: entryType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + w.enti = e.Index + return nil +} + +// 写当前的存储状态 +func (w *WAL) saveState(s *raftpb.HardState) error { + if raft.IsEmptyHardState(*s) { + return nil + } + w.state = *s + b := pbutil.MustMarshal(s) + rec := &walpb.Record{Type: stateType, Data: b} + return w.encoder.encode(rec) +} + +// Save 日志发送给Follower的同时,Leader会将日志落盘,即写到WAL中, +// 将raft交给上层应用的一些commit信息保存到wal +func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { + // 获取wal的写锁 + w.mu.Lock() + defer w.mu.Unlock() + // HardState变化或者新的日志条目则需要写wal + if raft.IsEmptyHardState(st) && len(ents) == 0 { + return nil + } + // 是否需要同步刷新磁盘 + mustSync := raft.MustSync(st, w.state, len(ents)) + + // 将日志保存到wal,更新wal写入的最新索引 + for i := range ents { + fmt.Printf("待刷盘---> wal.Save %s\n", string(ents[i].Data)) + if err := w.saveEntry(&ents[i]); err != nil { + return err + } + } + // 持久化HardState, HardState表示服务器当前状态,定义在raft.pb.go,主要包含Term、Vote、Commit + if err := w.saveState(&st); err != nil { + return err + } + // 判断文件大小是否超过最大值 + // 获取最后一个LockedFile的大小(已经使用的) + curOff, err := w.tail().Seek(0, io.SeekCurrent) + if err != nil { + return err + } + if curOff < SegmentSizeBytes { + if mustSync { + return w.sync() // 写日志时,判断是否刷盘 + } + return nil + } + // 否则执行切割(也就是说明,WAL文件是可以超过64MB的) + return w.cut() +} + +// SaveSnapshot 保存一条生成快照的日志 +func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { + b := pbutil.MustMarshal(&e) + + w.mu.Lock() + defer w.mu.Unlock() + + rec := &walpb.Record{Type: snapshotType, Data: b} + if err := w.encoder.encode(rec); err != nil { + return err + } + // 只有当快照领先于最后的索引时才更新enti + if w.enti < e.Index { + w.enti = e.Index + } + return w.sync() // 保存快照时,刷盘 +} + +// 保存 +func (w *WAL) saveCrc(prevCrc uint32) error { + return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) +} + +// 返回最后一个锁文件 +func (w *WAL) tail() *fileutil.LockedFile { + if len(w.locks) > 0 { + return w.locks[len(w.locks)-1] // 返回最后一个锁文件 + } + return nil +} + +func (w *WAL) seq() uint64 { + t := w.tail() + if t == nil { + return 0 + } + seq, _, err := parseWALName(filepath.Base(t.Name())) + if err != nil { + w.lg.Fatal("解析WAL名称失败", zap.String("name", t.Name()), zap.Error(err)) + } + return seq +} + +func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error { + stringArr := make([]string, 0) + for _, f := range rcs { + if err := f.Close(); err != nil { + lg.Warn("failed to close: ", zap.Error(err)) + stringArr = append(stringArr, err.Error()) + } + } + if len(stringArr) == 0 { + return nil + } + return errors.New(strings.Join(stringArr, ", ")) +} diff --git a/server/storage/wal/repair.go b/etcd/wal/repair.go similarity index 76% rename from server/storage/wal/repair.go rename to etcd/wal/repair.go index 53734045167..7ab391fea95 100644 --- a/server/storage/wal/repair.go +++ b/etcd/wal/repair.go @@ -15,16 +15,13 @@ package wal import ( - "errors" "io" "os" "path/filepath" - "time" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" ) // Repair tries to repair ErrUnexpectedEOF in the @@ -42,34 +39,33 @@ func Repair(lg *zap.Logger, dirpath string) bool { lg.Info("repairing", zap.String("path", f.Name())) rec := &walpb.Record{} - decoder := NewDecoder(fileutil.NewFileReader(f.File)) + decoder := newDecoder(f) for { - lastOffset := decoder.LastOffset() - err := decoder.Decode(rec) - switch { - case err == nil: + lastOffset := decoder.lastOffset() + err := decoder.decode(rec) + switch err { + case nil: // update crc of the decoder when necessary switch rec.Type { - case CrcType: - crc := decoder.LastCRC() + case crcType: + crc := decoder.crc.Sum32() // current crc of decoder must match the crc of the record. // do no need to match 0 crc, since the decoder is a new one at this case. if crc != 0 && rec.Validate(crc) != nil { return false } - decoder.UpdateCRC(rec.Crc) + decoder.updateCRC(rec.Crc) } continue - case errors.Is(err, io.EOF): + case io.EOF: lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF)) return true - case errors.Is(err, io.ErrUnexpectedEOF): - brokenName := f.Name() + ".broken" - bf, bferr := os.Create(brokenName) + case io.ErrUnexpectedEOF: + bf, bferr := os.Create(f.Name() + ".broken") if bferr != nil { - lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr)) + lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr)) return false } defer bf.Close() @@ -80,7 +76,7 @@ func Repair(lg *zap.Logger, dirpath string) bool { } if _, err = io.Copy(bf, f); err != nil { - lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err)) + lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err)) return false } @@ -89,13 +85,10 @@ func Repair(lg *zap.Logger, dirpath string) bool { return false } - start := time.Now() if err = fileutil.Fsync(f.File); err != nil { lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err)) return false } - walFsyncSec.Observe(time.Since(start).Seconds()) - lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF)) return true diff --git a/etcd/wal/walpb/over_self_serilize.go b/etcd/wal/walpb/over_self_serilize.go new file mode 100644 index 00000000000..2ce5dbb938c --- /dev/null +++ b/etcd/wal/walpb/over_self_serilize.go @@ -0,0 +1,39 @@ +package walpb + +import ( + "encoding/json" +) + +type Temp struct { + Type int64 + Crc uint32 + Data string +} + +func (m *Record) Marshal() (dAtA []byte, err error) { + return json.Marshal(Temp{ + Type: m.Type, + Crc: m.Crc, + Data: string(m.Data), + }) +} + +func (m *Record) Unmarshal(dAtA []byte) error { + a := Temp{} + err := json.Unmarshal(dAtA, &a) + if err != nil { + return err + } + m.Type = a.Type + m.Crc = a.Crc + m.Data = []byte(a.Data) + return nil +} + +func (m *Snapshot) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} diff --git a/etcd/wal/walpb/record.go b/etcd/wal/walpb/record.go new file mode 100644 index 00000000000..47146a86601 --- /dev/null +++ b/etcd/wal/walpb/record.go @@ -0,0 +1,27 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package walpb + +import "errors" + +var ErrCRCMismatch = errors.New("walpb: crc mismatch") + +func (rec *Record) Validate(crc uint32) error { + if rec.Crc == crc { + return nil + } + rec.Reset() + return ErrCRCMismatch +} diff --git a/etcd/wal/walpb/record.pb.go b/etcd/wal/walpb/record.pb.go new file mode 100644 index 00000000000..e3eb7f0319c --- /dev/null +++ b/etcd/wal/walpb/record.pb.go @@ -0,0 +1,95 @@ +// Code generated by protoc-gen-gogo. +// source: record.proto + +package walpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + raftpb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Record struct { + Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` + Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` +} + +func (m *Record) Reset() { *m = Record{} } +func (m *Record) String() string { return proto.CompactTextString(m) } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { + return fileDescriptor_bf94fd919e302a1d, []int{0} +} + +// Keep in sync with raftpb.SnapshotMetadata. +type Snapshot struct { + Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` + // Field populated since >=etcd-3.5.0. + ConfState *raftpb.ConfState `protobuf:"bytes,3,opt,name=conf_state,json=confState" json:"conf_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_bf94fd919e302a1d, []int{1} +} + +func init() { + proto.RegisterType((*Record)(nil), "walpb.Record") + proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") +} + +func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) } + +var fileDescriptor_bf94fd919e302a1d = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0x41, 0x4e, 0xc3, 0x30, + 0x10, 0x45, 0x63, 0xe2, 0x22, 0x18, 0xca, 0x02, 0xab, 0xaa, 0xa2, 0x2c, 0x4c, 0xd4, 0x55, 0x56, + 0x29, 0xe2, 0x08, 0x65, 0xcf, 0x22, 0x3d, 0x00, 0x72, 0x1d, 0xa7, 0x20, 0xd1, 0x8c, 0x35, 0xb5, + 0x04, 0xdc, 0x84, 0x23, 0x65, 0xc9, 0x09, 0x10, 0x84, 0x8b, 0xa0, 0x8c, 0x03, 0x1b, 0xfb, 0xeb, + 0x7d, 0xf9, 0x7d, 0xc3, 0x9c, 0x9c, 0x45, 0x6a, 0x2a, 0x4f, 0x18, 0x50, 0xcd, 0x5e, 0xcc, 0xb3, + 0xdf, 0xe5, 0x8b, 0x3d, 0xee, 0x91, 0xc9, 0x7a, 0x4c, 0xb1, 0xcc, 0x97, 0x64, 0xda, 0xb0, 0x1e, + 0x0f, 0xbf, 0xe3, 0x2b, 0xf2, 0xd5, 0x3d, 0x9c, 0xd6, 0x2c, 0x51, 0x19, 0xc8, 0xf0, 0xe6, 0x5d, + 0x26, 0x0a, 0x51, 0xa6, 0x1b, 0xd9, 0x7f, 0x5e, 0x27, 0x35, 0x13, 0xb5, 0x84, 0xd4, 0x92, 0xcd, + 0x4e, 0x0a, 0x51, 0x5e, 0x4e, 0xc5, 0x08, 0x94, 0x02, 0xd9, 0x98, 0x60, 0xb2, 0xb4, 0x10, 0xe5, + 0xbc, 0xe6, 0xbc, 0x22, 0x38, 0xdb, 0x76, 0xc6, 0x1f, 0x1f, 0x31, 0xa8, 0x1c, 0x66, 0x4f, 0x5d, + 0xe3, 0x5e, 0x59, 0x29, 0xa7, 0x97, 0x11, 0xf1, 0x9a, 0xa3, 0x03, 0x4b, 0xe5, 0xff, 0x9a, 0xa3, + 0x83, 0xba, 0x01, 0xb0, 0xd8, 0xb5, 0x0f, 0xc7, 0x60, 0x82, 0x63, 0xf7, 0xc5, 0xed, 0x55, 0x15, + 0x7f, 0x5e, 0xdd, 0x61, 0xd7, 0x6e, 0xc7, 0xa2, 0x3e, 0xb7, 0x7f, 0x71, 0xb3, 0xe8, 0xbf, 0x75, + 0xd2, 0x0f, 0x5a, 0x7c, 0x0c, 0x5a, 0x7c, 0x0d, 0x5a, 0xbc, 0xff, 0xe8, 0xe4, 0x37, 0x00, 0x00, + 0xff, 0xff, 0xc3, 0x36, 0x0c, 0xad, 0x1d, 0x01, 0x00, 0x00, +} + +func (m *Record) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Snapshot) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} diff --git a/server/storage/wal/walpb/record.proto b/etcd/wal/walpb/record.proto similarity index 95% rename from server/storage/wal/walpb/record.proto rename to etcd/wal/walpb/record.proto index aed4351d315..536fa6c19c1 100644 --- a/server/storage/wal/walpb/record.proto +++ b/etcd/wal/walpb/record.proto @@ -2,7 +2,7 @@ syntax = "proto2"; package walpb; import "gogoproto/gogo.proto"; -import "raftpb/raft.proto"; +import "raft/raftpb/raft.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; diff --git a/etcd3-multinode-systemd.md b/etcd3-multinode-systemd.md new file mode 100644 index 00000000000..6e9f1de24cb --- /dev/null +++ b/etcd3-multinode-systemd.md @@ -0,0 +1,173 @@ +# etcd3 multi-node cluster + +Here's how to deploy etcd cluster with systemd. + +## Set up data directory + +etcd needs data directory on host machine. Configure the data directory accessible to systemd as: + +``` +sudo mkdir -p /var/lib/etcd +sudo chown -R root:$(whoami) /var/lib/etcd +sudo chmod -R a+rw /var/lib/etcd +``` + +## Write systemd service file + +In each machine, write etcd systemd service files: + +``` +cat > /tmp/my-etcd-1.service < /tmp/my-etcd-2.service < /tmp/my-etcd-3.service < \ -PUT assigns the specified value with the specified key. If key already holds a value, it is overwritten. - -RPC: Put - #### Options -- lease -- lease ID (in hexadecimal) to attach to the key. - -- prev-kv -- return the previous key-value pair before modification. - -- ignore-value -- updates the key using its current value. - -- ignore-lease -- updates the key using its current lease. - -#### Output - -`OK` +- lease -- 租约ID(十六进制),以附加到key上. +- prev-kv -- 返回修改前的键值对. +- ignore-value -- 使用其当前值更新该键. +- ignore-lease -- 使用其当前租约更新key. #### Examples ```bash -./etcdctl put foo bar --lease=1234abcd -# OK -./etcdctl get foo -# foo -# bar -./etcdctl put foo --ignore-value # to detache lease -# OK +leaseID=`echo $(etcdctl lease grant 5)|awk '{print $2}'` +echo $leaseID +etcdctl put foo bar --lease=$leaseID +etcdctl get foo +etcdctl put foo --ignore-value # 移除租期 +sleep 6 +etcdctl get foo ``` ```bash -./etcdctl put foo bar --lease=1234abcd +leaseID=`echo $(etcdctl lease grant 5)|awk '{print $2}'` +echo $leaseID +etcdctl put foo bar --lease=$$leaseID # OK -./etcdctl put foo bar1 --ignore-lease # to use existing lease 1234abcd +etcdctl put foo bar1 --ignore-lease # to use existing lease 1234abcd # OK -./etcdctl get foo +etcdctl get foo # foo # bar1 -``` - -```bash -./etcdctl put foo bar1 --prev-kv +etcdctl put foo bar1 --prev-kv # OK # foo # bar -./etcdctl get foo -# foo # bar1 ``` -#### Remarks - -If \ isn't given as command line argument, this command tries to read the value from standard input. - -When \ begins with '-', \ is interpreted as a flag. -Insert '--' for workaround: - -```bash -./etcdctl put -- -./etcdctl put -- ``` - -Providing \ in a new line after using `carriage return` is not supported and etcdctl may hang in that case. For example, following case is not supported: - -```bash -./etcdctl put \r - -``` - -A \ can have multiple lines or spaces but it must be provided with a double-quote as demonstrated below: - -```bash -./etcdctl put foo "bar1 2 3" +echo -e 'demo +test' |etcdctl put asd -- + +etcdctl put -- a b ``` ### GET [options] \ [range_end] -GET gets the key or a range of keys [key, range_end) if range_end is given. +GET 获取键或键的范围 [key, range_end) if range_end is given. RPC: Range @@ -134,20 +99,20 @@ RPC: Range First, populate etcd with some keys: ```bash -./etcdctl put foo bar +etcdctl put foo bar # OK -./etcdctl put foo1 bar1 +etcdctl put foo1 bar1 # OK -./etcdctl put foo2 bar2 +etcdctl put foo2 bar2 # OK -./etcdctl put foo3 bar3 +etcdctl put foo3 bar3 # OK ``` Get the key named `foo`: ```bash -./etcdctl get foo +etcdctl get foo # foo # bar ``` @@ -155,7 +120,7 @@ Get the key named `foo`: Get all keys: ```bash -./etcdctl get --from-key '' +etcdctl get --from-key '' # foo # bar # foo1 @@ -169,7 +134,7 @@ Get all keys: Get all keys with names greater than or equal to `foo1`: ```bash -./etcdctl get --from-key foo1 +etcdctl get --from-key foo1 # foo1 # bar1 # foo2 @@ -181,7 +146,7 @@ Get all keys with names greater than or equal to `foo1`: Get keys with names greater than or equal to `foo1` and less than `foo3`: ```bash -./etcdctl get foo1 foo3 +etcdctl get foo1 foo3 # foo1 # bar1 # foo2 @@ -190,21 +155,22 @@ Get keys with names greater than or equal to `foo1` and less than `foo3`: #### Remarks -If any key or value contains non-printable characters or control characters, simple formatted output can be ambiguous due to new lines. To resolve this issue, set `--hex` to hex encode all strings. +If any key or value contains non-printable characters or control characters, simple formatted output can backend ambiguous +due to new lines. To resolve this issue, set `--hex` to hex encode all strings. ### DEL [options] \ [range_end] -Removes the specified key or range of keys [key, range_end) if range_end is given. +移除指定的键或键的范围 [key, range_end) if range_end is given. RPC: DeleteRange #### Options -- prefix -- delete keys by matching prefix +- prefix -- 通过匹配前缀删除键 -- prev-kv -- return deleted key-value pairs +- prev-kv -- 返回删除的k,v 键值对 -- from-key -- delete keys that are greater than or equal to the given key using byte compare +- from-key -- 使用字节比较法删除大于或等于给定键的键. #### Output @@ -213,51 +179,52 @@ Prints the number of keys that were removed in decimal if DEL succeeded. #### Examples ```bash -./etcdctl put foo bar +etcdctl put foo bar # OK -./etcdctl del foo +etcdctl del foo # 1 -./etcdctl get foo +etcdctl get foo ``` ```bash -./etcdctl put key val +etcdctl put key val # OK -./etcdctl del --prev-kv key +etcdctl del --prev-kv key # 1 # key # val -./etcdctl get key +etcdctl get key ``` ```bash -./etcdctl put a 123 +etcdctl put a 123 # OK -./etcdctl put b 456 +etcdctl put b 456 # OK -./etcdctl put z 789 +etcdctl put z 789 # OK -./etcdctl del --from-key a +etcdctl del --from-key a # 3 -./etcdctl get --from-key a +etcdctl get --from-key a ``` ```bash -./etcdctl put zoo val +etcdctl put zoo val # OK -./etcdctl put zoo1 val1 +etcdctl put zoo1 val1 # OK -./etcdctl put zoo2 val2 +etcdctl put zoo2 val2 # OK -./etcdctl del --prefix zoo +etcdctl del --prefix zoo # 3 -./etcdctl get zoo2 +etcdctl get zoo2 ``` ### TXN [options] -TXN reads multiple etcd requests from standard input and applies them as a single atomic transaction. -A transaction consists of list of conditions, a list of requests to apply if all the conditions are true, and a list of requests to apply if any condition is false. +TXN reads multiple etcd requests from standard input and applies them as a single atomic transaction. A transaction +consists of list of conditions, a list of requests to apply if all the conditions are true, and a list of requests to +apply if any condition is false. RPC: Txn @@ -268,6 +235,7 @@ RPC: Txn - interactive -- input transaction with interactive prompting. #### Input Format + ```ebnf ::= * "\n" "\n" "\n" ::= (||||) "\n" @@ -275,7 +243,7 @@ RPC: Txn := ("c"|"create")"("")" ::= ("m"|"mod")"("")" ::= ("val"|"value")"("")" - ::= ("ver"|"version")"("")" + ::= ("versionCount"|"version")"("")" ::= "lease("")" ::= * ::= * @@ -289,13 +257,15 @@ RPC: Txn #### Output -`SUCCESS` if etcd processed the transaction success list, `FAILURE` if etcd processed the transaction failure list. Prints the output for each command in the executed request list, each separated by a blank line. +`SUCCESS` if etcd processed the transaction success list, `FAILURE` if etcd processed the transaction failure list. +Prints the output for each command in the executed request list, each separated by a blank line. #### Examples txn in interactive mode: + ```bash -./etcdctl txn -i +etcdctl txn -i # compares: mod("key1") > "0" @@ -314,8 +284,9 @@ put key2 "some extra key" ``` txn in non-interactive mode: + ```bash -./etcdctl txn <<<'mod("key1") > "0" +etcdctl txn <<<'mod("key1") > "0" put key1 "overwrote-key1" @@ -333,10 +304,12 @@ put key2 "some extra key" #### Remarks -When using multi-line values within a TXN command, newlines must be represented as `\n`. Literal newlines will cause parsing failures. This differs from other commands (such as PUT) where the shell will convert literal newlines for us. For example: +When using multi-line values within a TXN command, newlines必须是represented as `\n`. Literal newlines will cause +parsing failures. This differs from other commands (such as PUT) where the shell will convert literal newlines for us. +For example: ```bash -./etcdctl txn <<<'mod("key1") > "0" +etcdctl txn <<<'mod("key1") > "0" put key1 "overwrote-key1" @@ -356,27 +329,30 @@ put key2 "this is\na multi-line\nvalue" COMPACTION discards all etcd event history prior to a given revision. Since etcd uses a multiversion concurrency control model, it preserves all key updates as event history. When the event history up to some revision is no longer needed, -all superseded keys may be compacted away to reclaim storage space in the etcd backend database. +all superseded keys may backend compacted away to reclaim storage space in the etcd backend database. RPC: Compact #### Options -- physical -- 'true' to wait for compaction to physically remove all old revisions +- physical -- 'true' 等待压缩以实际删除所有旧修订 #### Output Prints the compacted revision. #### Example + ```bash -./etcdctl compaction 1234 +etcdctl compaction 1234 # compacted revision 1234 ``` ### WATCH [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...] -Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command runs until it encounters an error or is terminated by the user. If range_end is given, it must be lexicographically greater than key or "\x00". +Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command +runs until it encounters an error or is terminated by the user. If range_end is given, it必须是lexicographically +greater than key or "\x00". RPC: Watch @@ -409,14 +385,14 @@ watch [options] \n ##### Non-interactive ```bash -./etcdctl watch foo +etcdctl watch foo # PUT # foo # bar ``` ```bash -ETCDCTL_WATCH_KEY=foo ./etcdctl watch +ETCDCTL_WATCH_KEY=foo etcdctl watch # PUT # foo # bar @@ -425,7 +401,7 @@ ETCDCTL_WATCH_KEY=foo ./etcdctl watch Receive events and execute `echo watch event received`: ```bash -./etcdctl watch foo -- echo watch event received +etcdctl watch foo -- echo watch event received # PUT # foo # bar @@ -435,7 +411,7 @@ Receive events and execute `echo watch event received`: Watch response is set via `ETCD_WATCH_*` environmental variables: ```bash -./etcdctl watch foo -- sh -c "env | grep ETCD_WATCH_" +etcdctl watch foo -- sh -c "env | grep ETCD_WATCH_" # PUT # foo @@ -450,7 +426,7 @@ Watch with environmental variables and execute `echo watch event received`: ```bash export ETCDCTL_WATCH_KEY=foo -./etcdctl watch -- echo watch event received +etcdctl watch -- echo watch event received # PUT # foo # bar @@ -460,7 +436,7 @@ export ETCDCTL_WATCH_KEY=foo ```bash export ETCDCTL_WATCH_KEY=foo export ETCDCTL_WATCH_RANGE_END=foox -./etcdctl watch -- echo watch event received +etcdctl watch -- echo watch event received # PUT # fob # bar @@ -470,7 +446,7 @@ export ETCDCTL_WATCH_RANGE_END=foox ##### Interactive ```bash -./etcdctl watch -i +etcdctl watch -i watch foo watch foo # PUT @@ -484,7 +460,7 @@ watch foo Receive events and execute `echo watch event received`: ```bash -./etcdctl watch -i +etcdctl watch -i watch foo -- echo watch event received # PUT # foo @@ -496,7 +472,7 @@ Watch with environmental variables and execute `echo watch event received`: ```bash export ETCDCTL_WATCH_KEY=foo -./etcdctl watch -i +etcdctl watch -i watch -- echo watch event received # PUT # foo @@ -507,7 +483,7 @@ watch -- echo watch event received ```bash export ETCDCTL_WATCH_KEY=foo export ETCDCTL_WATCH_RANGE_END=foox -./etcdctl watch -i +etcdctl watch -i watch -- echo watch event received # PUT # fob @@ -521,8 +497,8 @@ LEASE provides commands for key lease management. ### LEASE GRANT \ -LEASE GRANT creates a fresh lease with a server-selected time-to-live in seconds -greater than or equal to the requested TTL value. +LEASE GRANT creates a fresh lease with a server-selected time-to-live in seconds greater than or equal to the requested +TTL value. RPC: LeaseGrant @@ -533,7 +509,7 @@ Prints a message with the granted lease ID. #### Example ```bash -./etcdctl lease grant 60 +etcdctl lease grant 60 # lease 32695410dcc0ca06 granted with TTL(60s) ``` @@ -550,7 +526,7 @@ Prints a message indicating the lease is revoked. #### Example ```bash -./etcdctl lease revoke 32695410dcc0ca06 +etcdctl lease revoke 32695410dcc0ca06 # lease 32695410dcc0ca06 revoked ``` @@ -562,7 +538,7 @@ RPC: LeaseTimeToLive #### Options -- keys -- Get keys attached to this lease +- keys -- 获取租约附加到了哪些key上 #### Output @@ -571,28 +547,28 @@ Prints lease information. #### Example ```bash -./etcdctl lease grant 500 +etcdctl lease grant 500 # lease 2d8257079fa1bc0c granted with TTL(500s) -./etcdctl put foo1 bar --lease=2d8257079fa1bc0c +etcdctl put foo1 bar --lease=2d8257079fa1bc0c # OK -./etcdctl put foo2 bar --lease=2d8257079fa1bc0c +etcdctl put foo2 bar --lease=2d8257079fa1bc0c # OK -./etcdctl lease timetolive 2d8257079fa1bc0c +etcdctl lease timetolive 2d8257079fa1bc0c # lease 2d8257079fa1bc0c granted with TTL(500s), remaining(481s) -./etcdctl lease timetolive 2d8257079fa1bc0c --keys +etcdctl lease timetolive 2d8257079fa1bc0c --keys # lease 2d8257079fa1bc0c granted with TTL(500s), remaining(472s), attached keys([foo2 foo1]) -./etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json +etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json # {"cluster_id":17186838941855831277,"member_id":4845372305070271874,"revision":3,"raft_term":2,"id":3279279168933706764,"ttl":465,"granted-ttl":500,"keys":null} -./etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json --keys +etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json --keys # {"cluster_id":17186838941855831277,"member_id":4845372305070271874,"revision":3,"raft_term":2,"id":3279279168933706764,"ttl":459,"granted-ttl":500,"keys":["Zm9vMQ==","Zm9vMg=="]} -./etcdctl lease timetolive 2d8257079fa1bc0c +etcdctl lease timetolive 2d8257079fa1bc0c # lease 2d8257079fa1bc0c already expired ``` @@ -609,10 +585,10 @@ Prints a message with a list of active leases. #### Example ```bash -./etcdctl lease grant 60 +etcdctl lease grant 60 # lease 32695410dcc0ca06 granted with TTL(60s) -./etcdctl lease list +etcdctl lease list 32695410dcc0ca06 ``` @@ -627,8 +603,9 @@ RPC: LeaseKeepAlive Prints a message for every keep alive sent or prints a message indicating the lease is gone. #### Example + ```bash -./etcdctl lease keep-alive 32695410dcc0ca0 +etcdctl lease keep-alive 32695410dcc0ca0 # lease 32695410dcc0ca0 keepalived with TTL(100) # lease 32695410dcc0ca0 keepalived with TTL(100) # lease 32695410dcc0ca0 keepalived with TTL(100) @@ -658,7 +635,7 @@ Prints the member ID of the new member and the cluster ID. #### Example ```bash -./etcdctl member add newMember --peer-urls=https://127.0.0.1:12345 +etcdctl member add newMember --peer-urls=https://127.0.0.1:12345 Member ced000fda4d05edf added to cluster 8c4281cc65c7b112 @@ -684,7 +661,7 @@ Prints the member ID of the updated member and the cluster ID. #### Example ```bash -./etcdctl member update 2be1eb8f84b7f63e --peer-urls=https://127.0.0.1:11112 +etcdctl member update 2be1eb8f84b7f63e --peer-urls=https://127.0.0.1:11112 # Member 2be1eb8f84b7f63e updated in cluster ef37ad9dc622a7c4 ``` @@ -701,7 +678,7 @@ Prints the member ID of the removed member and the cluster ID. #### Example ```bash -./etcdctl member remove 2be1eb8f84b7f63e +etcdctl member remove 2be1eb8f84b7f63e # Member 2be1eb8f84b7f63e removed from cluster ef37ad9dc622a7c4 ``` @@ -718,19 +695,19 @@ Prints a humanized table of the member IDs, statuses, names, peer addresses, and #### Examples ```bash -./etcdctl member list +etcdctl member list # 8211f1d0f64f3269, started, infra1, http://127.0.0.1:12380, http://127.0.0.1:2379 # 91bc3c398fb3c146, started, infra2, http://127.0.0.1:22380, http://127.0.0.1:22379 # fd422379fda50e48, started, infra3, http://127.0.0.1:32380, http://127.0.0.1:32379 ``` ```bash -./etcdctl -w json member list +etcdctl -w json member list # {"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"raft_term":2},"members":[{"ID":9372538179322589801,"name":"infra1","peerURLs":["http://127.0.0.1:12380"],"clientURLs":["http://127.0.0.1:2379"]},{"ID":10501334649042878790,"name":"infra2","peerURLs":["http://127.0.0.1:22380"],"clientURLs":["http://127.0.0.1:22379"]},{"ID":18249187646912138824,"name":"infra3","peerURLs":["http://127.0.0.1:32380"],"clientURLs":["http://127.0.0.1:32379"]}]} ``` ```bash -./etcdctl -w table member list +etcdctl -w table member list +------------------+---------+--------+------------------------+------------------------+ | ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+--------+------------------------+------------------------+ @@ -750,26 +727,27 @@ ENDPOINT provides commands for querying individual endpoints. ### ENDPOINT HEALTH -ENDPOINT HEALTH checks the health of the list of endpoints with respect to cluster. An endpoint is unhealthy -when it cannot participate in consensus with the rest of the cluster. +ENDPOINT HEALTH checks the health of the list of endpoints with respect to cluster. An endpoint is unhealthy when it +cannot participate in consensus with the rest of the cluster. #### Output -If an endpoint can participate in consensus, prints a message indicating the endpoint is healthy. If an endpoint fails to participate in consensus, prints a message indicating the endpoint is unhealthy. +If an endpoint can participate in consensus, prints a message indicating the endpoint is healthy. If an endpoint fails +to participate in consensus, prints a message indicating the endpoint is unhealthy. #### Example Check the default endpoint's health: ```bash -./etcdctl endpoint health +etcdctl endpoint health # 127.0.0.1:2379 is healthy: successfully committed proposal: took = 2.095242ms ``` Check all endpoints for the cluster associated with the default endpoint: ```bash -./etcdctl endpoint --cluster health +etcdctl endpoint --cluster health # http://127.0.0.1:2379 is healthy: successfully committed proposal: took = 1.060091ms # http://127.0.0.1:22379 is healthy: successfully committed proposal: took = 903.138µs # http://127.0.0.1:32379 is healthy: successfully committed proposal: took = 1.113848ms @@ -783,39 +761,41 @@ ENDPOINT STATUS queries the status of each endpoint in the given endpoint list. ##### Simple format -Prints a humanized table of each endpoint URL, ID, version, database size, leadership status, raft term, and raft status. +Prints a humanized table of each endpoint URL, ID, version, database size, leadership status, raft term, and raft +status. ##### JSON format -Prints a line of JSON encoding each endpoint URL, ID, version, database size, leadership status, raft term, and raft status. +Prints a line of JSON encoding each endpoint URL, ID, version, database size, leadership status, raft term, and raft +status. #### Examples Get the status for the default endpoint: ```bash -./etcdctl endpoint status +etcdctl endpoint status # 127.0.0.1:2379, 8211f1d0f64f3269, 3.0.0, 25 kB, false, 2, 63 ``` Get the status for the default endpoint as JSON: ```bash -./etcdctl -w json endpoint status +etcdctl -w json endpoint status # [{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}}] ``` Get the status for all endpoints in the cluster associated with the default endpoint: ```bash -./etcdctl -w table endpoint --cluster status -+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+ -| ENDPOINT | ID | VERSION | STORAGE VERSION | DB SIZE | DB SIZE IN USE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | -+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+ -| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | | -| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | true | false | 2 | 8 | 8 | | -| http://127.0.0.1:32379 | fd422379fda50e48 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | | -+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+ +etcdctl -w table endpoint --cluster status ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ +| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 | +| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 | +| http://127.0.0.1:32379 | fd422379fda50e48 | 3.2.0-rc.1+git | 25 kB | true | 2 | 8 | ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ ``` ### ENDPOINT HASHKV @@ -837,73 +817,28 @@ Prints a line of JSON encoding each endpoint URL and KV history hash. Get the hash for the default endpoint: ```bash -./etcdctl endpoint hashkv --cluster -http://127.0.0.1:2379, 2064120424, 13 -http://127.0.0.1:22379, 2064120424, 13 -http://127.0.0.1:32379, 2064120424, 13 +etcdctl endpoint hashkv +# 127.0.0.1:2379, 1084519789 ``` Get the status for the default endpoint as JSON: ```bash -./etcdctl endpoint hash --cluster -w json | jq -[ - { - "Endpoint": "http://127.0.0.1:2379", - "HashKV": { - "header": { - "cluster_id": 17237436991929494000, - "member_id": 9372538179322590000, - "revision": 13, - "raft_term": 2 - }, - "hash": 2064120424, - "compact_revision": -1, - "hash_revision": 13 - } - }, - { - "Endpoint": "http://127.0.0.1:22379", - "HashKV": { - "header": { - "cluster_id": 17237436991929494000, - "member_id": 10501334649042878000, - "revision": 13, - "raft_term": 2 - }, - "hash": 2064120424, - "compact_revision": -1, - "hash_revision": 13 - } - }, - { - "Endpoint": "http://127.0.0.1:32379", - "HashKV": { - "header": { - "cluster_id": 17237436991929494000, - "member_id": 18249187646912140000, - "revision": 13, - "raft_term": 2 - }, - "hash": 2064120424, - "compact_revision": -1, - "hash_revision": 13 - } - } -] +etcdctl -w json endpoint hashkv +# [{"Endpoint":"127.0.0.1:2379","Hash":{"header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":1,"raft_term":3},"hash":1084519789,"compact_revision":-1}}] ``` Get the status for all endpoints in the cluster associated with the default endpoint: ```bash -$ ./etcdctl endpoint hash --cluster -w table -+------------------------+-----------+---------------+ -| ENDPOINT | HASH | HASH REVISION | -+------------------------+-----------+---------------+ -| http://127.0.0.1:2379 | 784522900 | 16 | -| http://127.0.0.1:22379 | 784522900 | 16 | -| http://127.0.0.1:32379 | 784522900 | 16 | -+------------------------+-----------+---------------+ +etcdctl -w table endpoint --cluster hashkv ++------------------------+------------+ +| ENDPOINT | HASH | ++------------------------+------------+ +| http://127.0.0.1:2379 | 1084519789 | +| http://127.0.0.1:22379 | 1084519789 | +| http://127.0.0.1:32379 | 1084519789 | ++------------------------+------------+ ``` ### ALARM \ @@ -912,7 +847,7 @@ Provides alarm related commands ### ALARM DISARM -`alarm disarm` Disarms all alarms +`alarm disarm` 解除所有警报 RPC: Alarm @@ -923,19 +858,19 @@ RPC: Alarm #### Examples ```bash -./etcdctl alarm disarm +etcdctl alarm disarm ``` If NOSPACE alarm is present: ```bash -./etcdctl alarm disarm +etcdctl alarm disarm # alarm:NOSPACE ``` ### ALARM LIST -`alarm list` lists all alarms. +`alarm list` 列出所有警报. RPC: Alarm @@ -946,26 +881,35 @@ RPC: Alarm #### Examples ```bash -./etcdctl alarm list +etcdctl alarm list ``` If NOSPACE alarm is present: ```bash -./etcdctl alarm list +etcdctl alarm list # alarm:NOSPACE ``` ### DEFRAG [options] -DEFRAG defragments the backend database file for a set of given endpoints while etcd is running. When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system. +DEFRAG defragments the backend database file for a set of given endpoints while etcd is running, ~~or directly +defragments an etcd data directory while etcd is not running~~. When an etcd member reclaims storage space from deleted +and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the +database, the etcd member releases this free space back to the file system. **Note: to defragment offline (`--data-dir` flag), use: `etcutl defrag` instead** -**Note that defragmentation to a live member blocks the system from reading and writing data while rebuilding its states.** +**Note that defragmentation to a live member blocks the system from reading and writing data while rebuilding its +states.** + +**Note that defragmentation request does not get replicated over cluster. That is, the request is only applied to the +local node. Specify all members in `--endpoints` flag or `--cluster` flag to automatically find all cluster members.** -**Note that defragmentation request does not get replicated over cluster. That is, the request is only applied to the local node. Specify all members in `--endpoints` flag or `--cluster` flag to automatically find all cluster members.** +#### Options +- data-dir -- Optional. **Deprecated**. If present, defragments a data directory not in use by etcd. To backend removed in + v3.6. #### Output @@ -974,7 +918,7 @@ For each endpoints, prints a message indicating whether the endpoint was success #### Example ```bash -./etcdctl --endpoints=localhost:2379,badendpoint:2379 defrag +etcdctl --endpoints=localhost:2379,badendpoint:2379 defrag # Finished defragmenting etcd member[localhost:2379] # Failed to defragment etcd member[badendpoint:2379] (grpc: timed out trying to connect) ``` @@ -982,12 +926,22 @@ For each endpoints, prints a message indicating whether the endpoint was success Run defragment operations for all endpoints in the cluster associated with the default endpoint: ```bash -./etcdctl defrag --cluster +etcdctl defrag --cluster Finished defragmenting etcd member[http://127.0.0.1:2379] Finished defragmenting etcd member[http://127.0.0.1:22379] Finished defragmenting etcd member[http://127.0.0.1:32379] ``` +To defragment a data directory directly, use the `etcdutl` with `--data-dir` flag +(`etcdctl` will remove this flag in v3.6): + +``` bash +# Defragment while etcd is not running +etcdutl defrag --data-dir default.etcd +# success (exit status 0) +# Error: cannot open database at default.etcd/member/snap/db +``` + #### Remarks DEFRAG returns a zero exit code only if it succeeded defragmenting all given endpoints. @@ -1007,110 +961,125 @@ The backend snapshot is written to the given file path. #### Example Save a snapshot to "snapshot.db": + ``` -./etcdctl snapshot save snapshot.db +etcdctl snapshot save snapshot.db ``` ### SNAPSHOT RESTORE [options] \ -Removed in v3.6. Use `etcdutl snapshot restore` instead. +Note: Deprecated. Use `etcdutl snapshot restore` instead. To backend removed in v3.6. +SNAPSHOT RESTORE creates an etcd data directory for an etcd cluster member from a backend database snapshot and a new +cluster configuration. Restoring the snapshot into each member for a new cluster configuration will initialize a new +etcd cluster preloaded by the snapshot data. -### SNAPSHOT STATUS \ +#### Options -Removed in v3.6. Use `etcdutl snapshot status` instead. +The snapshot restore options closely resemble to those used in the `etcd` command for defining a cluster. -### MOVE-LEADER \ +- data-dir -- Path to the data directory. Uses \.etcd if none given. -MOVE-LEADER transfers leadership from the leader to another member in the cluster. +- wal-dir -- Path to the WAL directory. Uses data directory if none given. -#### Example +- initial-cluster -- The initial cluster configuration for the restored etcd cluster. -```bash -# to choose transferee -transferee_id=$(./etcdctl \ - --endpoints localhost:2379,localhost:22379,localhost:32379 \ - endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}') -echo ${transferee_id} -# c89feb932daef420 +- initial-cluster-token -- Initial cluster token for the restored etcd cluster. -# endpoints should include leader node -./etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id} -# Error: no leader endpoint given at [localhost:22379 localhost:32379] +- initial-advertise-peer-urls -- List of peer URLs for the member being restored. -# request to leader with target node ID -./etcdctl --endpoints ${leader_ep} move-leader ${transferee_id} -# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420 -``` +- name -- Human-readable name for the etcd cluster member being restored. -### DOWNGRADE \ +- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory) -NOTICE: Downgrades is an experimental feature in v3.6 and is not recommended for production clusters. +#### Output -Downgrade provides commands to downgrade cluster. -Normally etcd members cannot be downgraded due to cluster version mechanism. +A new etcd data directory initialized with the snapshot. -After initial bootstrap, cluster members agree on the cluster version. Every 5 seconds, leader checks versions of all members and picks lowers minor version. -New members will refuse joining cluster with cluster version newer than theirs, thus preventing cluster from downgrading. -Downgrade commands allow cluster administrator to force cluster version to be lowered to previous minor version, thus allowing to downgrade the cluster. +#### Example -Downgrade should be executed in stages: -1. Verify that cluster is ready to be downgraded by running `etcdctl downgrade validate ` -2. Start the downgrade process by running `etcdctl downgrade enable ` -3. For each cluster member: - 1. Ensure that member is ready for downgrade by confirming that it wrote `The server is ready to downgrade` log. - 2. Replace member binary with one with older version. - 3. Confirm that member has correctly started and joined the cluster. -4. Ensure that downgrade process has succeeded by checking leader log for `the cluster has been downgraded` +Save a snapshot, restore into a new 3 node cluster, and start the cluster: -Downgrade can be canceled by running `etcdctl downgrade cancel` command. +``` +etcdctl snapshot save snapshot.db -In case of downgrade being canceled, cluster version will return to its normal behavior (pick the lowest member minor version). -If no members were downgraded, cluster version will return to original value. -If at least one member was downgraded, cluster version will stay at the `` until downgraded members are upgraded back. +# restore members +bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' +bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' +bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' -### DOWNGRADE VALIDATE \ +# launch members +bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 & +bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 & +bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 & +``` -DOWNGRADE VALIDATE validate downgrade capability before starting downgrade. +### SNAPSHOT STATUS \ -#### Example +Note: Deprecated. Use `etcdutl snapshot restore` instead. To backend removed in v3.6. -```bash -./etcdctl downgrade validate 3.5 -Downgrade validate success, cluster version 3.6 +SNAPSHOT STATUS lists information about a given backend database snapshot file. -./etcdctl downgrade validate 3.4 -Error: etcdserver: invalid downgrade target version +#### Output -``` +##### Simple format -### DOWNGRADE ENABLE \ +Prints a humanized table of the database hash, revision, total keys, and size. -DOWNGRADE ENABLE starts a downgrade action to cluster. +##### JSON format -#### Example +Prints a line of JSON encoding the database hash, revision, total keys, and size. + +#### Examples ```bash -./etcdctl downgrade enable 3.5 -Downgrade enable success, cluster version 3.6 +etcdctl snapshot status file.db +# cf1550fb, 3, 3, 25 kB ``` -### DOWNGRADE CANCEL \ +```bash +etcdctl --write-out=json snapshot status file.db +# {"hash":3474280699,"revision":3,"totalKey":3,"totalSize":24576} +``` + +```bash +etcdctl --write-out=table snapshot status file.db ++----------+----------+------------+------------+ +| HASH | REVISION | TOTAL KEYS | TOTAL SIZE | ++----------+----------+------------+------------+ +| cf1550fb | 3 | 3 | 25 kB | ++----------+----------+------------+------------+ +``` -DOWNGRADE CANCEL cancels the ongoing downgrade action to cluster. +### MOVE-LEADER \ + +MOVE-LEADER transfers leadership from the leader to another member in the cluster. #### Example ```bash -./etcdctl downgrade cancel -Downgrade cancel success, cluster version 3.5 +# to choose transferee +transferee_id=$(etcdctl \ + --endpoints localhost:2379,localhost:22379,localhost:32379 \ + endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}') +echo ${transferee_id} +# c89feb932daef420 + +# endpoints should include leader node +etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id} +# Error: no leader endpoint given at [localhost:22379 localhost:32379] + +# request to leader with target node ID +etcdctl --endpoints ${leader_ep} move-leader ${transferee_id} +# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420 ``` ## Concurrency commands ### LOCK [options] \ [command arg1 arg2 ...] -LOCK acquires a distributed mutex with a given name. Once the lock is acquired, it will be held until etcdctl is terminated. +LOCK acquires a distributed mutex with a given name. Once the lock is acquired, it will backend held until etcdctl is +terminated. #### Options @@ -1120,27 +1089,29 @@ LOCK acquires a distributed mutex with a given name. Once the lock is acquired, Once the lock is acquired but no command is given, the result for the GET on the unique lock holder key is displayed. -If a command is given, it will be executed with environment variables `ETCD_LOCK_KEY` and `ETCD_LOCK_REV` set to the lock's holder key and revision. +If a command is given, it will backend executed with environment variables `ETCD_LOCK_KEY` and `ETCD_LOCK_REV` set to the +lock's holder key and revision. #### Example Acquire lock with standard output display: ```bash -./etcdctl lock mylock +etcdctl lock mylock # mylock/1234534535445 ``` Acquire lock and execute `echo lock acquired`: ```bash -./etcdctl lock mylock echo lock acquired +etcdctl lock mylock echo lock acquired # lock acquired ``` Acquire lock and execute `etcdctl put` command + ```bash -./etcdctl lock mylock ./etcdctl put foo bar +etcdctl lock mylock etcdctl put foo bar # OK ``` @@ -1148,13 +1119,14 @@ Acquire lock and execute `etcdctl put` command LOCK returns a zero exit code only if it is terminated by a signal and releases the lock. -If LOCK is abnormally terminated or fails to contact the cluster to release the lock, the lock will remain held until the lease expires. Progress may be delayed by up to the default lease length of 60 seconds. +If LOCK is abnormally terminated or fails to contact the cluster to release the lock, the lock will remain held until +the lease expires. Progress may backend delayed by up to the default lease length of 60 seconds. ### ELECT [options] \ [proposal] -ELECT participates on a named election. A node announces its candidacy in the election by providing -a proposal value. If a node wishes to observe the election, ELECT listens for new leaders values. -Whenever a leader is elected, its proposal is given as output. +ELECT participates on a named election. A node announces its candidacy in the election by providing a proposal value. If +a node wishes to observe the election, ELECT listens for new leaders values. Whenever a leader is elected, its proposal +is given as output. #### Options @@ -1169,7 +1141,7 @@ Whenever a leader is elected, its proposal is given as output. #### Example ```bash -./etcdctl elect myelection foo +etcdctl elect myelection foo # myelection/1456952310051373265 # foo ``` @@ -1178,13 +1150,15 @@ Whenever a leader is elected, its proposal is given as output. ELECT returns a zero exit code only if it is terminated by a signal and can revoke its candidacy or leadership, if any. -If a candidate is abnormally terminated, election progress may be delayed by up to the default lease length of 60 seconds. +If a candidate is abnormally terminated, election rogress may backend delayed by up to the default lease length of 60 +seconds. ## Authentication commands ### AUTH \ -`auth enable` activates authentication on an etcd cluster and `auth disable` deactivates. When authentication is enabled, etcd checks all requests for appropriate authorization. +`auth enable` activates authentication on an etcd cluster and `auth disable` deactivates. When authentication is +enabled, etcd checks all requests for appropriate authorization. RPC: AuthEnable/AuthDisable @@ -1195,28 +1169,28 @@ RPC: AuthEnable/AuthDisable #### Examples ```bash -./etcdctl user add root +etcdctl user add root # Password of root:#type password for root # Type password of root again for confirmation:#re-type password for root # User root created -./etcdctl user grant-role root root +etcdctl user grant-role root root # Role root is granted to user root -./etcdctl user get root +etcdctl user get root # User: root # Roles: root -./etcdctl role add root +etcdctl role add root # Role root created -./etcdctl role get root +etcdctl role get root # Role root # KV Read: # KV Write: -./etcdctl auth enable +etcdctl auth enable # Authentication Enabled ``` ### ROLE \ -ROLE is used to specify different roles which can be assigned to etcd user(s). +ROLE is used to specify different roles which can backend assigned to etcd user(s). ### ROLE ADD \ @@ -1231,7 +1205,7 @@ RPC: RoleAdd #### Examples ```bash -./etcdctl --user=root:123 role add myrole +etcdctl --user=root:123 role add myrole # Role myrole created ``` @@ -1248,7 +1222,7 @@ Detailed role information. #### Examples ```bash -./etcdctl --user=root:123 role get myrole +etcdctl --user=root:123 role get myrole # Role myrole # KV Read: # foo @@ -1269,7 +1243,7 @@ RPC: RoleDelete #### Examples ```bash -./etcdctl --user=root:123 role delete myrole +etcdctl --user=root:123 role delete myrole # Role myrole deleted ``` @@ -1286,7 +1260,7 @@ A role per line. #### Examples ```bash -./etcdctl --user=root:123 role list +etcdctl --user=root:123 role list # roleA # roleB # myrole @@ -1313,14 +1287,14 @@ RPC: RoleGrantPermission Grant read and write permission on the key `foo` to role `myrole`: ```bash -./etcdctl --user=root:123 role grant-permission myrole readwrite foo +etcdctl --user=root:123 role grant-permission myrole readwrite foo # Role myrole updated ``` Grant read permission on the wildcard key pattern `foo/*` to role `myrole`: ```bash -./etcdctl --user=root:123 role grant-permission --prefix myrole readwrite foo/ +etcdctl --user=root:123 role grant-permission --prefix myrole readwrite foo/ # Role myrole updated ``` @@ -1338,12 +1312,13 @@ RPC: RoleRevokePermission #### Output -`Permission of key is revoked from role ` for single key. `Permission of range [, ) is revoked from role ` for a key range. Exit code is zero. +`Permission of key is revoked from role ` for single +key. `Permission of range [, ) is revoked from role ` for a key range. Exit code is zero. #### Examples ```bash -./etcdctl --user=root:123 role revoke-permission myrole foo +etcdctl --user=root:123 role revoke-permission myrole foo # Permission of key foo is revoked from role myrole ``` @@ -1368,7 +1343,7 @@ RPC: UserAdd #### Examples ```bash -./etcdctl --user=root:123 user add myuser +etcdctl --user=root:123 user add myuser # Password of myuser: #type password for my user # Type password of myuser again for confirmation:#re-type password for my user # User myuser created @@ -1391,7 +1366,7 @@ Detailed user information. #### Examples ```bash -./etcdctl --user=root:123 user get myuser +etcdctl --user=root:123 user get myuser # User: myuser # Roles: ``` @@ -1409,7 +1384,7 @@ RPC: UserDelete #### Examples ```bash -./etcdctl --user=root:123 user delete myuser +etcdctl --user=root:123 user delete myuser # User myuser deleted ``` @@ -1426,7 +1401,7 @@ RPC: UserList #### Examples ```bash -./etcdctl --user=root:123 user list +etcdctl --user=root:123 user list # user1 # user2 # myuser @@ -1449,7 +1424,7 @@ RPC: UserChangePassword #### Examples ```bash -./etcdctl --user=root:123 user passwd myuser +etcdctl --user=root:123 user passwd myuser # Password of myuser: #type new password for my user # Type password of myuser again for confirmation: #re-type the new password for my user # Password updated @@ -1468,7 +1443,7 @@ RPC: UserGrantRole #### Examples ```bash -./etcdctl --user=root:123 user grant-role userA roleA +etcdctl --user=root:123 user grant-role userA roleA # Role roleA is granted to user userA ``` @@ -1485,7 +1460,7 @@ RPC: UserRevokeRole #### Examples ```bash -./etcdctl --user=root:123 user revoke-role userA roleA +etcdctl --user=root:123 user revoke-role userA roleA # Role roleA is revoked from user userA ``` @@ -1511,8 +1486,6 @@ RPC: UserRevokeRole - dest-insecure-transport -- Disable transport security for client connections -- max-txn-ops -- Maximum number of operations permitted in a transaction during syncing updates - #### Output The approximate total number of keys transferred to the destination cluster, updated every 30 seconds. @@ -1520,13 +1493,12 @@ The approximate total number of keys transferred to the destination cluster, upd #### Examples ``` -./etcdctl make-mirror mirror.example.com:2379 +etcdctl make-mirror mirror.example.com:2379 # 10 # 18 ``` -[mirror]: ./doc/mirror_maker.md - +[mirror]: doc/mirror_maker.md ### VERSION @@ -1539,7 +1511,7 @@ Prints etcd version and API version. #### Examples ```bash -./etcdctl version +etcdctl version # etcdctl version: 3.1.0-alpha.0+git # API version: 3.1 ``` @@ -1550,27 +1522,9 @@ CHECK provides commands for checking properties of the etcd cluster. ### CHECK PERF [options] -CHECK PERF checks the performance of the etcd cluster for 60 seconds. Running the `check perf` often can create a large keyspace history which can be auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as described below. - -Notice that different workload models use different configurations in terms of number of clients and throughtput. Here is the configuration for each load: - - -| Load | Number of clients | Number of put requests (requests/sec) | -|---------|------|---------| -| Small | 50 | 10000 | -| Medium | 200 | 100000 | -| Large | 500 | 1000000 | -| xLarge | 1000 | 3000000 | - -The test checks for the following conditions: - -- The throughput should be at least 90% of the issued requets -- All the requests should be done in less than 500 ms -- The standard deviation of the requests should be less than 100 ms - - -Hence, a workload model may work while another one might fail. - +CHECK PERF checks the performance of the etcd cluster for 60 seconds. Running the `check perf` often can create a large +keyspace history which can backend auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as +described below. RPC: CheckPerf @@ -1586,20 +1540,22 @@ RPC: CheckPerf #### Output -Prints the result of performance check on different criteria like throughput. Also prints an overall status of the check as pass or fail. +Prints the result of performance check on different criteria like throughput. Also prints an overall status of the check +as pass or fail. #### Examples -Shows examples of both, pass and fail, status. The failure is due to the fact that a large workload was tried on a single node etcd cluster running on a laptop environment created for development and testing purpose. +Shows examples of both, pass and fail, status. The failure is due to the fact that a large workload was tried on a +single node etcd cluster running on a laptop environment created for development and testing purpose. ```bash -./etcdctl check perf --load="s" +etcdctl check perf --load="s" # 60 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo! 100.00%1m0s # PASS: Throughput is 150 writes/s # PASS: Slowest request took 0.087509s # PASS: Stddev is 0.011084s # PASS -./etcdctl check perf --load="l" +etcdctl check perf --load="l" # 60 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo! 100.00%1m0s # FAIL: Throughput too low: 6808 writes/s # PASS: Slowest request took 0.228191s @@ -1609,7 +1565,9 @@ Shows examples of both, pass and fail, status. The failure is due to the fact th ### CHECK DATASCALE [options] -CHECK DATASCALE checks the memory usage of holding data for different workloads on a given server endpoint. Running the `check datascale` often can create a large keyspace history which can be auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as described below. +CHECK DATASCALE checks the memory usage of holding data for different workloads on a given server endpoint. Running +the `check datascale` often can create a large keyspace history which can backend auto compacted and defragmented using +the `--auto-compact` and `--auto-defrag` options as described below. RPC: CheckDatascale @@ -1625,12 +1583,13 @@ RPC: CheckDatascale #### Output -Prints the system memory usage for a given workload. Also prints status of compact and defragment if related options are passed. +Prints the system memory usage for a given workload. Also prints status of compact and defragment if related options are +passed. #### Examples ```bash -./etcdctl check datascale --load="s" --auto-compact=true --auto-defrag=true +etcdctl check datascale --load="s" --auto-compact=true --auto-defrag=true # Start data scale check for work load [10000 key-value pairs, 1024 bytes per key-value, 50 concurrent clients]. # Compacting with revision 18346204 # Compacted with revision 18346204 @@ -1645,47 +1604,50 @@ For all commands, a successful execution return a zero exit code. All failures w ## Output formats -All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output format, which is meant to be human-readable. The simple format is listed in each command's `Output` description since it is customized for each command. If a command has a corresponding RPC, it will respect all output formats. +All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output +format, which is meant to backend human-readable. The simple format is listed in each command's `Output` description since it +is customized for each command. If a command has a corresponding RPC, it will respect all output formats. -If a command fails, returning a non-zero exit code, an error string will be written to standard error regardless of output format. +If a command fails, returning a non-zero exit code, an error string will backend written to standard error regardless of +output format. ### Simple -A format meant to be easy to parse and human-readable. Specific to each command. +A format meant to backend easy to parse and human-readable. Specific to each command. ### JSON -The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will encode keys and values in base64. +The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will +encode keys and values in base64. Some commands without an RPC also support JSON; see the command's `Output` description. ### Protobuf -The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will be concetenated. If an RPC is not given for a command, the protobuf output is not defined. +The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will backend +concetenated. If an RPC is not given for a command, the protobuf output is not defined. ### Fields -An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q` where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`). +An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line +in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q` +where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`). ## Compatibility Support -etcdctl is still in its early stage. We try out best to ensure fully compatible releases, however we might break compatibility to fix bugs or improve commands. If we intend to release a version of etcdctl with backward incompatibilities, we will provide notice prior to release and have instructions on how to upgrade. +etcdctl is still in its early stage. We try out best to ensure fully compatible releases, however we might break +compatibility to fix bugs or improve commands. If we intend to release a version of etcdctl with backward +incompatibilities, we will provide notice prior to release and have instructions on how to upgrade. ### Input Compatibility -Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal commands in non-interactive mode. +Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal +commands in non-interactive mode. ### Output Compatibility -Output includes output from etcdctl and its exit code. etcdctl provides `simple` output format by default. -We ensure compatibility for the `simple` output format of normal commands in non-interactive mode. Currently, we do not ensure -backward compatibility for `JSON` format and the format in non-interactive mode. Currently, we do not ensure backward compatibility of utility commands. - -### TODO: compatibility with etcd server +Output includes output from etcdctl and its exit code. etcdctl provides `simple` output format by default. We ensure +compatibility for the `simple` output format of normal commands in non-interactive mode. Currently, we do not ensure +backward compatibility for `JSON` format and the format in non-interactive mode. Currently, we do not ensure backward +compatibility of utility commands. -[etcd]: https://github.com/coreos/etcd -[READMEv2]: READMEv2.md -[v2key]: ../store/node_extern.go#L28-L37 -[v3key]: ../api/mvccpb/kv.proto#L12-L29 -[etcdrpc]: ../api/etcdserverpb/rpc.proto -[storagerpc]: ../api/mvccpb/kv.proto diff --git a/etcdctl/READMEv2.md b/etcdctl/READMEv2.md deleted file mode 100644 index 8c7fc1e564b..00000000000 --- a/etcdctl/READMEv2.md +++ /dev/null @@ -1,336 +0,0 @@ -etcdctl -======== - -`etcdctl` is a command line client for [etcd][etcd]. -It can be used in scripts or for administrators to explore an etcd cluster. - -## Getting etcdctl - -The latest release is available as a binary at [Github][github-release] along with etcd. - -etcdctl can also be built from source using the build script found in the parent directory. - -## Configuration -### --debug -+ output cURL commands which can be used to reproduce the request - -### --no-sync -+ don't synchronize cluster information before sending request -+ Use this to access non-published client endpoints -+ Without this flag, values from `--endpoint` flag will be overwritten by etcd cluster when it does internal sync. - -### --output, -o -+ output response in the given format (`simple`, `extended` or `json`) -+ default: `"simple"` - -### --discovery-srv, -D -+ domain name to query for SRV records describing cluster endpoints -+ default: none -+ env variable: ETCDCTL_DISCOVERY_SRV - -### --peers -+ a comma-delimited list of machine addresses in the cluster -+ default: `"http://127.0.0.1:2379"` -+ env variable: ETCDCTL_PEERS - -### --endpoint -+ a comma-delimited list of machine addresses in the cluster -+ default: `"http://127.0.0.1:2379"` -+ env variable: ETCDCTL_ENDPOINT -+ Without `--no-sync` flag, this will be overwritten by etcd cluster when it does internal sync. - -### --cert-file -+ identify HTTPS client using this SSL certificate file -+ default: none -+ env variable: ETCDCTL_CERT_FILE - -### --key-file -+ identify HTTPS client using this SSL key file -+ default: none -+ env variable: ETCDCTL_KEY_FILE - -### --ca-file -+ verify certificates of HTTPS-enabled servers using this CA bundle -+ default: none -+ env variable: ETCDCTL_CA_FILE - -### --username, -u -+ provide username[:password] and prompt if password is not supplied -+ default: none -+ env variable: ETCDCTL_USERNAME - -### --timeout -+ connection timeout per request -+ default: `"1s"` - -### --total-timeout -+ timeout for the command execution (except watch) -+ default: `"5s"` - -## Usage - -### Setting Key Values - -Set a value on the `/foo/bar` key: - -```sh -$ etcdctl set /foo/bar "Hello world" -Hello world -``` - -Set a value on the `/foo/bar` key with a value that expires in 60 seconds: - -```sh -$ etcdctl set /foo/bar "Hello world" --ttl 60 -Hello world -``` - -Conditionally set a value on `/foo/bar` if the previous value was "Hello world": - -```sh -$ etcdctl set /foo/bar "Goodbye world" --swap-with-value "Hello world" -Goodbye world -``` - -Conditionally set a value on `/foo/bar` if the previous etcd index was 12: - -```sh -$ etcdctl set /foo/bar "Goodbye world" --swap-with-index 12 -Goodbye world -``` - -Create a new key `/foo/bar`, only if the key did not previously exist: - -```sh -$ etcdctl mk /foo/new_bar "Hello world" -Hello world -``` - -Create a new in-order key under dir `/fooDir`: - -```sh -$ etcdctl mk --in-order /fooDir "Hello world" -``` - -Create a new dir `/fooDir`, only if the key did not previously exist: - -```sh -$ etcdctl mkdir /fooDir -``` - -Update an existing key `/foo/bar`, only if the key already existed: - -```sh -$ etcdctl update /foo/bar "Hola mundo" -Hola mundo -``` - -Create or update a directory called `/mydir`: - -```sh -$ etcdctl setdir /mydir -``` - - -### Retrieving a key value - -Get the current value for a single key in the local etcd node: - -```sh -$ etcdctl get /foo/bar -Hello world -``` - -Get the value of a key with additional metadata in a parseable format: - -```sh -$ etcdctl -o extended get /foo/bar -Key: /foo/bar -Modified-Index: 72 -TTL: 0 -Etcd-Index: 72 -Raft-Index: 5611 -Raft-Term: 1 - -Hello World -``` - -### Listing a directory - -Explore the keyspace using the `ls` command - -```sh -$ etcdctl ls -/akey -/adir -$ etcdctl ls /adir -/adir/key1 -/adir/key2 -``` - -Add `--recursive` to recursively list subdirectories encountered. - -```sh -$ etcdctl ls --recursive -/akey -/adir -/adir/key1 -/adir/key2 -``` - -Directories can also have a trailing `/` added to output using `-p`. - -```sh -$ etcdctl ls -p -/akey -/adir/ -``` - -### Deleting a key - -Delete a key: - -```sh -$ etcdctl rm /foo/bar -``` - -Delete an empty directory or a key-value pair - -```sh -$ etcdctl rmdir /path/to/dir -``` - -or - -```sh -$ etcdctl rm /path/to/dir --dir -``` - -Recursively delete a key and all child keys: - -```sh -$ etcdctl rm /path/to/dir --recursive -``` - -Conditionally delete `/foo/bar` if the previous value was "Hello world": - -```sh -$ etcdctl rm /foo/bar --with-value "Hello world" -``` - -Conditionally delete `/foo/bar` if the previous etcd index was 12: - -```sh -$ etcdctl rm /foo/bar --with-index 12 -``` - -### Watching for changes - -Watch for only the next change on a key: - -```sh -$ etcdctl watch /foo/bar -Hello world -``` - -Continuously watch a key: - -```sh -$ etcdctl watch /foo/bar --forever -Hello world -.... client hangs forever until ctrl+C printing values as key change -``` - -Continuously watch a key, starting with a given etcd index: - -```sh -$ etcdctl watch /foo/bar --forever --index 12 -Hello world -.... client hangs forever until ctrl+C printing values as key change -``` - -Continuously watch a key and exec a program: - -```sh -$ etcdctl exec-watch /foo/bar -- sh -c "env | grep ETCD" -ETCD_WATCH_ACTION=set -ETCD_WATCH_VALUE=My configuration stuff -ETCD_WATCH_MODIFIED_INDEX=1999 -ETCD_WATCH_KEY=/foo/bar -ETCD_WATCH_ACTION=set -ETCD_WATCH_VALUE=My new configuration stuff -ETCD_WATCH_MODIFIED_INDEX=2000 -ETCD_WATCH_KEY=/foo/bar -``` - -Continuously and recursively watch a key and exec a program: -```sh -$ etcdctl exec-watch --recursive /foo -- sh -c "env | grep ETCD" -ETCD_WATCH_ACTION=set -ETCD_WATCH_VALUE=My configuration stuff -ETCD_WATCH_MODIFIED_INDEX=1999 -ETCD_WATCH_KEY=/foo/bar -ETCD_WATCH_ACTION=set -ETCD_WATCH_VALUE=My new configuration stuff -ETCD_WATCH_MODIFIED_INDEX=2000 -ETCD_WATCH_KEY=/foo/barbar -``` - -## Return Codes - -The following exit codes can be returned from etcdctl: - -``` -0 Success -1 Malformed etcdctl arguments -2 Failed to connect to host -3 Failed to auth (client cert rejected, ca validation failure, etc) -4 400 error from etcd -5 500 error from etcd -``` - -## Endpoint - -If the etcd cluster isn't available on `http://127.0.0.1:2379`, specify a `--endpoint` flag or `ETCDCTL_ENDPOINT` environment variable. One endpoint or a comma-separated list of endpoints can be listed. This option is ignored if the `--discovery-srv` option is provided. - -```sh -ETCDCTL_ENDPOINT="http://10.0.28.1:4002" etcdctl set my-key to-a-value -ETCDCTL_ENDPOINT="http://10.0.28.1:4002,http://10.0.28.2:4002,http://10.0.28.3:4002" etcdctl set my-key to-a-value -etcdctl --endpoint http://10.0.28.1:4002 my-key to-a-value -etcdctl --endpoint http://10.0.28.1:4002,http://10.0.28.2:4002,http://10.0.28.3:4002 etcdctl set my-key to-a-value -``` - -## Username and Password - -If the etcd cluster is protected by [authentication][authentication], specify username and password using the [`--username`][username-flag] or `ETCDCTL_USERNAME` environment variable. When `--username` flag or `ETCDCTL_USERNAME` environment variable doesn't contain password, etcdctl will prompt password in interactive mode. - -```sh -ETCDCTL_USERNAME="root:password" etcdctl set my-key to-a-value -``` - -## DNS Discovery - -To discover the etcd cluster through domain SRV records, specify a `--discovery-srv` flag or `ETCDCTL_DISCOVERY_SRV` environment variable. This option takes precedence over the `--endpoint` flag. - -```sh -ETCDCTL_DISCOVERY_SRV="some-domain" etcdctl set my-key to-a-value -etcdctl --discovery-srv some-domain set my-key to-a-value -``` - -## Project Details - -### Versioning - -etcdctl uses [semantic versioning][semver]. -Releases will follow lockstep with the etcd release cycle. - -### License - -etcdctl is under the Apache 2.0 license. See the [LICENSE][license] file for details. - -[authentication]: https://github.com/etcd-io/website/blob/main/content/docs/v2/authentication.md -[etcd]: https://github.com/coreos/etcd -[github-release]: https://github.com/coreos/etcd/releases/ -[license]: ../LICENSE -[semver]: http://semver.org/ -[username-flag]: #--username--u diff --git a/etcdctl/ctlv3/command/alarm_command.go b/etcdctl/ctlv3/command/alarm_command.go deleted file mode 100644 index 679f9d98f27..00000000000 --- a/etcdctl/ctlv3/command/alarm_command.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - - "github.com/spf13/cobra" - - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -// NewAlarmCommand returns the cobra command for "alarm". -func NewAlarmCommand() *cobra.Command { - ac := &cobra.Command{ - Use: "alarm ", - Short: "Alarm related commands", - } - - ac.AddCommand(NewAlarmDisarmCommand()) - ac.AddCommand(NewAlarmListCommand()) - - return ac -} - -func NewAlarmDisarmCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "disarm", - Short: "Disarms all alarms", - Run: alarmDisarmCommandFunc, - } - return &cmd -} - -// alarmDisarmCommandFunc executes the "alarm disarm" command. -func alarmDisarmCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm disarm command accepts no arguments")) - } - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).AlarmDisarm(ctx, &v3.AlarmMember{}) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.Alarm(*resp) -} - -func NewAlarmListCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "list", - Short: "Lists all alarms", - Run: alarmListCommandFunc, - } - return &cmd -} - -// alarmListCommandFunc executes the "alarm list" command. -func alarmListCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm list command accepts no arguments")) - } - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).AlarmList(ctx) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.Alarm(*resp) -} diff --git a/etcdctl/ctlv3/command/auth_command.go b/etcdctl/ctlv3/command/auth_command.go index 0e443450013..ef61ea4b4ee 100644 --- a/etcdctl/ctlv3/command/auth_command.go +++ b/etcdctl/ctlv3/command/auth_command.go @@ -17,17 +17,16 @@ package command import ( "fmt" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/pkg/v3/cobrautl" ) // NewAuthCommand returns the cobra command for "auth". func NewAuthCommand() *cobra.Command { ac := &cobra.Command{ Use: "auth ", - Short: "Enable or disable authentication", + Short: "启用或禁用身份验证", } ac.AddCommand(newAuthEnableCommand()) @@ -40,7 +39,7 @@ func NewAuthCommand() *cobra.Command { func newAuthStatusCommand() *cobra.Command { return &cobra.Command{ Use: "status", - Short: "Returns authentication status", + Short: "返回验证状态", Run: authStatusCommandFunc, } } @@ -48,7 +47,7 @@ func newAuthStatusCommand() *cobra.Command { // authStatusCommandFunc executes the "auth status" command. func authStatusCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth status command does not accept any arguments")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth status命令不接受任何参数")) } ctx, cancel := commandCtx(cmd) @@ -64,7 +63,7 @@ func authStatusCommandFunc(cmd *cobra.Command, args []string) { func newAuthEnableCommand() *cobra.Command { return &cobra.Command{ Use: "enable", - Short: "Enables authentication", + Short: "启用身份验证", Run: authEnableCommandFunc, } } @@ -72,7 +71,7 @@ func newAuthEnableCommand() *cobra.Command { // authEnableCommandFunc executes the "auth enable" command. func authEnableCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth enable command does not accept any arguments")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth enable命令不接受任何参数")) } ctx, cancel := commandCtx(cmd) @@ -96,13 +95,13 @@ func authEnableCommandFunc(cmd *cobra.Command, args []string) { cobrautl.ExitWithError(cobrautl.ExitError, err) } - fmt.Println("Authentication Enabled") + fmt.Println("身份验证启用") } func newAuthDisableCommand() *cobra.Command { return &cobra.Command{ Use: "disable", - Short: "Disables authentication", + Short: "禁用身份验证", Run: authDisableCommandFunc, } } @@ -110,7 +109,7 @@ func newAuthDisableCommand() *cobra.Command { // authDisableCommandFunc executes the "auth disable" command. func authDisableCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth disable command does not accept any arguments")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth disable命令不接受任何参数")) } ctx, cancel := commandCtx(cmd) @@ -120,5 +119,5 @@ func authDisableCommandFunc(cmd *cobra.Command, args []string) { cobrautl.ExitWithError(cobrautl.ExitError, err) } - fmt.Println("Authentication Disabled") + fmt.Println("身份验证禁用") } diff --git a/etcdctl/ctlv3/command/check.go b/etcdctl/ctlv3/command/check.go index 354e78aa31b..3daf832a2bf 100644 --- a/etcdctl/ctlv3/command/check.go +++ b/etcdctl/ctlv3/command/check.go @@ -26,18 +26,21 @@ import ( "sync" "time" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/pkg/v3/report" + "github.com/ls-2018/etcd_cn/code_debug/conf" + + "gopkg.in/cheggaaa/pb.v1" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/report" - "github.com/cheggaaa/pb/v3" "github.com/spf13/cobra" "golang.org/x/time/rate" ) var ( checkPerfLoad string - checkPerfPrefix string + checkPerfPrefix string // 写入的数据的key前缀 checkDatascaleLoad string checkDatascalePrefix string autoCompact bool @@ -45,8 +48,8 @@ var ( ) type checkPerfCfg struct { - limit int - clients int + limit int // 每秒并发数 + clients int // 客户端 duration int } @@ -59,7 +62,7 @@ var checkPerfCfgMap = map[string]checkPerfCfg{ }, "m": { limit: 1000, - clients: 200, + clients: 200, // duration: 60, }, "l": { @@ -108,7 +111,7 @@ var checkDatascaleCfgMap = map[string]checkDatascaleCfg{ func NewCheckCommand() *cobra.Command { cc := &cobra.Command{ Use: "check ", - Short: "commands for checking properties of the etcd cluster", + Short: "etcd集群属性检查命令", } cc.AddCommand(NewCheckPerfCommand()) @@ -121,25 +124,22 @@ func NewCheckCommand() *cobra.Command { func NewCheckPerfCommand() *cobra.Command { cmd := &cobra.Command{ Use: "perf [options]", - Short: "Check the performance of the etcd cluster", + Short: "查看etcd集群的性能", Run: newCheckPerfCommand, } - // TODO: support customized configuration - cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge). Different workload models use different configurations in terms of number of clients and expected throughtput.") - cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.") - cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.") - cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.") - cmd.RegisterFlagCompletionFunc("load", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"small", "medium", "large", "xLarge"}, cobra.ShellCompDirectiveDefault - }) + cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "性能检查的工作负载模型.接受工作负载: s(small), m(medium), l(large), xl(xLarge)") + cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "写性能检查键的前缀.") + cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "测试完成后,压缩修订版本") + cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "测试完成后 碎片整理") return cmd } // newCheckPerfCommand executes the "check perf" command. func newCheckPerfCommand(cmd *cobra.Command, args []string) { - var checkPerfAlias = map[string]string{ + conf.Perf = true + checkPerfAlias := map[string]string{ "s": "s", "small": "s", "m": "m", "medium": "m", "l": "l", "large": "l", @@ -152,40 +152,46 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { } cfg := checkPerfCfgMap[model] - requests := make(chan v3.Op, cfg.clients) + requests := make(chan v3.Op, cfg.clients) // 并发数 limit := rate.NewLimiter(rate.Limit(cfg.limit), 1) cc := clientConfigFromCmd(cmd) clients := make([]*v3.Client, cfg.clients) for i := 0; i < cfg.clients; i++ { - clients[i] = mustClient(cc) + clients[i] = cc.mustClient() } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) defer cancel() - ctx, icancel := interruptableContext(ctx, func() { attemptCleanup(clients[0], false) }) + ctx, icancel := interruptableContext(ctx, func() { + attemptCleanup(clients[0], false) // 压缩修订版本 + }) defer icancel() gctx, gcancel := context.WithCancel(ctx) + // 判断前缀有没有值 resp, err := clients[0].Get(gctx, checkPerfPrefix, v3.WithPrefix(), v3.WithLimit(1)) gcancel() if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } if len(resp.Kvs) > 0 { - cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with 'etcdctl del --prefix %s' first", checkPerfPrefix, checkPerfPrefix)) + cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("前缀 %q 有值了. Delete with 'etcdctl del --prefix %s' first", checkPerfPrefix, checkPerfPrefix)) } ksize, vsize := 256, 1024 k, v := make([]byte, ksize), string(make([]byte, vsize)) + // display bar := pb.New(cfg.duration) + bar.Format("Bom !") bar.Start() r := report.NewReport("%4.4f") var wg sync.WaitGroup wg.Add(len(clients)) + for i := range clients { go func(c *v3.Client) { defer wg.Done() @@ -206,7 +212,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { } close(requests) }() - + // 倒计时 go func() { for i := 0; i < cfg.duration; i++ { time.Sleep(time.Second) @@ -231,7 +237,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { ok = true if len(s.ErrorDist) != 0 { - fmt.Println("FAIL: too many errors") + fmt.Println("FAIL: 错误太多") for k, v := range s.ErrorDist { fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v) } @@ -239,19 +245,19 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { } if s.RPS/float64(cfg.limit) <= 0.9 { - fmt.Printf("FAIL: Throughput too low: %d writes/s\n", int(s.RPS)+1) + fmt.Printf("FAIL: 吞吐量太慢: %d writes/s\n", int(s.RPS)+1) ok = false } else { - fmt.Printf("PASS: Throughput is %d writes/s\n", int(s.RPS)+1) + fmt.Printf("PASS: 吞吐量 is %d writes/s\n", int(s.RPS)+1) } if s.Slowest > 0.5 { // slowest request > 500ms - fmt.Printf("Slowest request took too long: %fs\n", s.Slowest) + fmt.Printf("最慢的请求耗时太长: %fs\n", s.Slowest) ok = false } else { fmt.Printf("PASS: Slowest request took %fs\n", s.Slowest) } if s.Stddev > 0.1 { // stddev > 100ms - fmt.Printf("Stddev too high: %fs\n", s.Stddev) + fmt.Printf("Stddev太高: %fs\n", s.Stddev) ok = false } else { fmt.Printf("PASS: Stddev is %fs\n", s.Stddev) @@ -265,16 +271,17 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { } } +// 尝试清理 func attemptCleanup(client *v3.Client, autoCompact bool) { dctx, dcancel := context.WithTimeout(context.Background(), 30*time.Second) defer dcancel() dresp, err := client.Delete(dctx, checkPerfPrefix, v3.WithPrefix()) if err != nil { - fmt.Printf("FAIL: Cleanup failed during key deletion: ERROR(%v)\n", err) + fmt.Printf("FAIL:删除键时清除失败 : ERROR(%v)\n", err) return } if autoCompact { - compact(client, dresp.Header.Revision) + compact(client, dresp.Header.Revision) // 压缩修订版本 } } @@ -297,22 +304,22 @@ func interruptableContext(ctx context.Context, attemptCleanup func()) (context.C func NewCheckDatascaleCommand() *cobra.Command { cmd := &cobra.Command{ Use: "datascale [options]", - Short: "Check the memory usage of holding data for different workloads on a given server endpoint.", - Long: "If no endpoint is provided, localhost will be used. If multiple endpoints are provided, first endpoint will be used.", + Short: "检查给定etcd端点上保存不同工作负载的数据的内存使用情况.", + Long: "如果没有提供端点,则将使用localhost.如果提供了多个端点,则将使用第一个端点.", Run: newCheckDatascaleCommand, } - cmd.Flags().StringVar(&checkDatascaleLoad, "load", "s", "The datascale check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)") - cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "The prefix for writing the datascale check's keys.") - cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.") - cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.") + cmd.Flags().StringVar(&checkDatascaleLoad, "load", "s", "数据刻度检查的工作负载模型.接受工作负载: s(small), m(medium), l(large), xl(xLarge)") + cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "用于写入数据刻度校验键的前缀.") + cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "测试完成后压缩修订版本") + cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "测试完成后碎片整理") return cmd } // newCheckDatascaleCommand executes the "check datascale" command. func newCheckDatascaleCommand(cmd *cobra.Command, args []string) { - var checkDatascaleAlias = map[string]string{ + checkDatascaleAlias := map[string]string{ "s": "s", "small": "s", "m": "m", "medium": "m", "l": "l", "large": "l", @@ -330,7 +337,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) { cc := clientConfigFromCmd(cmd) clients := make([]*v3.Client, cfg.clients) for i := 0; i < cfg.clients; i++ { - clients[i] = mustClient(cc) + clients[i] = cc.mustClient() } // get endpoints @@ -361,12 +368,13 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) { // get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations bytesBefore := endpointMemoryMetrics(eps[0], sec) if bytesBefore == 0 { - fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.") + fmt.Println("FAIL: 在put操作之前无法读取process_resident_memory_bytes.") os.Exit(cobrautl.ExitError) } - fmt.Println(fmt.Sprintf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients)) + fmt.Println(fmt.Sprintf("启动工作负载的数据规模检查[%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients)) bar := pb.New(cfg.limit) + bar.Format("Bom !") bar.Start() for i := range clients { diff --git a/etcdctl/ctlv3/command/compaction_command.go b/etcdctl/ctlv3/command/compaction_command.go deleted file mode 100644 index 5c0bb1019a9..00000000000 --- a/etcdctl/ctlv3/command/compaction_command.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - "strconv" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var compactPhysical bool - -// NewCompactionCommand returns the cobra command for "compaction". -func NewCompactionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "compaction [options] ", - Short: "Compacts the event history in etcd", - Run: compactionCommandFunc, - } - cmd.Flags().BoolVar(&compactPhysical, "physical", false, "'true' to wait for compaction to physically remove all old revisions") - return cmd -} - -// compactionCommandFunc executes the "compaction" command. -func compactionCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("compaction command needs 1 argument")) - } - - rev, err := strconv.ParseInt(args[0], 10, 64) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - var opts []clientv3.CompactOption - if compactPhysical { - opts = append(opts, clientv3.WithCompactPhysical()) - } - - c := mustClientFromCmd(cmd) - ctx, cancel := commandCtx(cmd) - _, cerr := c.Compact(ctx, rev, opts...) - cancel() - if cerr != nil { - cobrautl.ExitWithError(cobrautl.ExitError, cerr) - } - fmt.Println("compacted revision", rev) -} diff --git a/etcdctl/ctlv3/command/completion_command.go b/etcdctl/ctlv3/command/completion_command.go deleted file mode 100644 index 66a213cd3a6..00000000000 --- a/etcdctl/ctlv3/command/completion_command.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "os" - - "github.com/spf13/cobra" -) - -func NewCompletionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: `To load completions: - -Bash: - - $ source <(etcdctl completion bash) - - # To load completions for each session, execute once: - # Linux: - $ etcdctl completion bash > /etc/bash_completion.d/etcdctl - # macOS: - $ etcdctl completion bash > /usr/local/etc/bash_completion.d/etcdctl - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ etcdctl completion zsh > "${fpath[1]}/_etcdctl" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ etcdctl completion fish | source - - # To load completions for each session, execute once: - $ etcdctl completion fish > ~/.config/fish/completions/etcdctl.fish - -PowerShell: - - PS> etcdctl completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> etcdctl completion powershell > etcdctl.ps1 - # and source this file from your PowerShell profile. -`, - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, - } - - return cmd -} diff --git a/etcdctl/ctlv3/command/defrag_command.go b/etcdctl/ctlv3/command/defrag_command.go index 253847746a8..196a54e3df2 100644 --- a/etcdctl/ctlv3/command/defrag_command.go +++ b/etcdctl/ctlv3/command/defrag_command.go @@ -17,42 +17,46 @@ package command import ( "fmt" "os" - "time" + "github.com/ls-2018/etcd_cn/etcdutl/etcdutl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" - - "go.etcd.io/etcd/pkg/v3/cobrautl" ) -// NewDefragCommand returns the cobra command for "Defrag". +var defragDataDir string + func NewDefragCommand() *cobra.Command { cmd := &cobra.Command{ Use: "defrag", - Short: "Defragments the storage of the etcd members with given endpoints", + Short: "对给定端点的etcd成员的存储进行碎片整理", Run: defragCommandFunc, } - cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") + cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "使用集群成员列表中的所有端点") + cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "可选的.如果存在,对etcd不使用的数据目录进行碎片整理.") return cmd } func defragCommandFunc(cmd *cobra.Command, args []string) { + if len(defragDataDir) > 0 { + fmt.Fprintf(os.Stderr, "Use `etcdutl defrag` instead. The --data-dir is going to be decomissioned in v3.6.\n\n") + err := etcdutl.DefragData(defragDataDir) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + } + failures := 0 - cfg := clientConfigFromCmd(cmd) + c := mustClientFromCmd(cmd) for _, ep := range endpointsFromCluster(cmd) { - cfg.Endpoints = []string{ep} - c := mustClient(cfg) ctx, cancel := commandCtx(cmd) - start := time.Now() _, err := c.Defragment(ctx, ep) - d := time.Now().Sub(start) cancel() if err != nil { - fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s]. took %s. (%v)\n", ep, d.String(), err) + fmt.Fprintf(os.Stderr, "整理etcd成员失败 [%s] (%v)\n", ep, err) failures++ } else { - fmt.Printf("Finished defragmenting etcd member[%s]. took %s\n", ep, d.String()) + fmt.Printf("整理etcd成员完成[%s]\n", ep) } - c.Close() } if failures != 0 { diff --git a/etcdctl/ctlv3/command/del_command.go b/etcdctl/ctlv3/command/del_command.go deleted file mode 100644 index 51b7abb3edf..00000000000 --- a/etcdctl/ctlv3/command/del_command.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - "os" - "time" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var ( - delPrefix bool - delPrevKV bool - delFromKey bool - delRange bool -) - -// NewDelCommand returns the cobra command for "del". -func NewDelCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "del [options] [range_end]", - Short: "Removes the specified key or range of keys [key, range_end)", - Run: delCommandFunc, - } - - cmd.Flags().BoolVar(&delPrefix, "prefix", false, "delete keys with matching prefix") - cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "return deleted key-value pairs") - cmd.Flags().BoolVar(&delFromKey, "from-key", false, "delete keys that are greater than or equal to the given key using byte compare") - cmd.Flags().BoolVar(&delRange, "range", false, "delete range of keys") - return cmd -} - -// delCommandFunc executes the "del" command. -func delCommandFunc(cmd *cobra.Command, args []string) { - key, opts := getDelOp(args) - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).Delete(ctx, key, opts...) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.Del(*resp) -} - -func getDelOp(args []string) (string, []clientv3.OpOption) { - if len(args) == 0 || len(args) > 2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("del command needs one argument as key and an optional argument as range_end")) - } - - if delPrefix && delFromKey { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) - } - - var opts []clientv3.OpOption - key := args[0] - if len(args) > 1 { - if delPrefix || delFromKey { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set")) - } - opts = append(opts, clientv3.WithRange(args[1])) - if !delRange { - fmt.Fprintf(os.Stderr, "Warning: Keys between %q and %q will be deleted. Please interrupt the command within next 2 seconds to cancel. "+ - "You can provide `--range` flag to avoid the delay.\n", args[0], args[1]) - time.Sleep(2 * time.Second) - } - } - - if delPrefix { - if len(key) == 0 { - key = "\x00" - opts = append(opts, clientv3.WithFromKey()) - } else { - opts = append(opts, clientv3.WithPrefix()) - } - } - if delPrevKV { - opts = append(opts, clientv3.WithPrevKV()) - } - - if delFromKey { - if len(key) == 0 { - key = "\x00" - } - opts = append(opts, clientv3.WithFromKey()) - } - - return key, opts -} diff --git a/etcdctl/ctlv3/command/downgrade_command.go b/etcdctl/ctlv3/command/downgrade_command.go deleted file mode 100644 index 8b6ab9cd19e..00000000000 --- a/etcdctl/ctlv3/command/downgrade_command.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "errors" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -// NewDowngradeCommand returns the cobra command for "downgrade". -func NewDowngradeCommand() *cobra.Command { - dc := &cobra.Command{ - Use: "downgrade ", - Short: "Downgrade related commands", - } - - dc.AddCommand(NewDowngradeValidateCommand()) - dc.AddCommand(NewDowngradeEnableCommand()) - dc.AddCommand(NewDowngradeCancelCommand()) - - return dc -} - -// NewDowngradeValidateCommand returns the cobra command for "downgrade validate". -func NewDowngradeValidateCommand() *cobra.Command { - cc := &cobra.Command{ - Use: "validate ", - Short: "Validate downgrade capability before starting downgrade", - - Run: downgradeValidateCommandFunc, - } - return cc -} - -// NewDowngradeEnableCommand returns the cobra command for "downgrade enable". -func NewDowngradeEnableCommand() *cobra.Command { - cc := &cobra.Command{ - Use: "enable ", - Short: "Start a downgrade action to cluster", - - Run: downgradeEnableCommandFunc, - } - return cc -} - -// NewDowngradeCancelCommand returns the cobra command for "downgrade cancel". -func NewDowngradeCancelCommand() *cobra.Command { - cc := &cobra.Command{ - Use: "cancel", - Short: "Cancel the ongoing downgrade action to cluster", - - Run: downgradeCancelCommandFunc, - } - return cc -} - -// downgradeValidateCommandFunc executes the "downgrade validate" command. -func downgradeValidateCommandFunc(cmd *cobra.Command, args []string) { - if len(args) < 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided")) - } - if len(args) > 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments")) - } - targetVersion := args[0] - - if len(targetVersion) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided")) - } - - ctx, cancel := commandCtx(cmd) - cli := mustClientFromCmd(cmd) - - resp, err := cli.Downgrade(ctx, clientv3.DowngradeValidate, targetVersion) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.DowngradeValidate(*resp) -} - -// downgradeEnableCommandFunc executes the "downgrade enable" command. -func downgradeEnableCommandFunc(cmd *cobra.Command, args []string) { - if len(args) < 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided")) - } - if len(args) > 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments")) - } - targetVersion := args[0] - - if len(targetVersion) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided")) - } - - ctx, cancel := commandCtx(cmd) - cli := mustClientFromCmd(cmd) - - resp, err := cli.Downgrade(ctx, clientv3.DowngradeEnable, targetVersion) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.DowngradeEnable(*resp) -} - -// downgradeCancelCommandFunc executes the "downgrade cancel" command. -func downgradeCancelCommandFunc(cmd *cobra.Command, args []string) { - ctx, cancel := commandCtx(cmd) - cli := mustClientFromCmd(cmd) - - resp, err := cli.Downgrade(ctx, clientv3.DowngradeCancel, "") - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.DowngradeCancel(*resp) -} diff --git a/etcdctl/ctlv3/command/elect_command.go b/etcdctl/ctlv3/command/elect_command.go index 14feb13f5bf..15be42e8fe9 100644 --- a/etcdctl/ctlv3/command/elect_command.go +++ b/etcdctl/ctlv3/command/elect_command.go @@ -17,47 +17,47 @@ package command import ( "context" "errors" + "fmt" "os" "os/signal" "syscall" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/pkg/v3/cobrautl" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) -var ( - electListen bool -) +var electListen bool // NewElectCommand returns the cobra command for "elect". func NewElectCommand() *cobra.Command { cmd := &cobra.Command{ Use: "elect [proposal]", - Short: "Observes and participates in leader election", + Short: "观察并参与leader选举", Run: electCommandFunc, } - cmd.Flags().BoolVarP(&electListen, "listen", "l", false, "observation mode") + cmd.Flags().BoolVarP(&electListen, "listen", "l", false, "观察模式") return cmd } func electCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 && len(args) != 2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("elect takes one election name argument and an optional proposal argument")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("elect -l id")) } c := mustClientFromCmd(cmd) var err error if len(args) == 1 { if !electListen { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("no proposal argument but -l not set")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("没有proposal参数,并且-l没有设置")) } err = observe(c, args[0]) } else { if electListen { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("proposal given but -l is set")) + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("有proposal参数,但是-l设置了")) } err = campaign(c, args[0], args[1]) } @@ -66,11 +66,13 @@ func electCommandFunc(cmd *cobra.Command, args []string) { } } +// 观察 func observe(c *clientv3.Client, election string) error { s, err := concurrency.NewSession(c) if err != nil { return err } + fmt.Println("election:----->", election) e := concurrency.NewElection(s, election) ctx, cancel := context.WithCancel(context.TODO()) @@ -94,12 +96,13 @@ func observe(c *clientv3.Client, election string) error { select { case <-ctx.Done(): default: - return errors.New("elect: observer lost") + return errors.New("elect: 观察者丢失") } return nil } +// 运动 func campaign(c *clientv3.Client, election string, prop string) error { s, err := concurrency.NewSession(c) if err != nil { @@ -121,7 +124,6 @@ func campaign(c *clientv3.Client, election string, prop string) error { return err } - // print key since elected resp, err := c.Get(ctx, e.Key()) if err != nil { return err @@ -131,7 +133,7 @@ func campaign(c *clientv3.Client, election string, prop string) error { select { case <-donec: case <-s.Done(): - return errors.New("elect: session expired") + return errors.New("elect: 会话过期") } return e.Resign(context.TODO()) diff --git a/etcdctl/ctlv3/command/ep_command.go b/etcdctl/ctlv3/command/ep_command.go deleted file mode 100644 index 0964f564c69..00000000000 --- a/etcdctl/ctlv3/command/ep_command.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - "os" - "sync" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/logutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/pkg/v3/flags" - - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var epClusterEndpoints bool -var epHashKVRev int64 - -// NewEndpointCommand returns the cobra command for "endpoint". -func NewEndpointCommand() *cobra.Command { - ec := &cobra.Command{ - Use: "endpoint ", - Short: "Endpoint related commands", - } - - ec.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") - ec.AddCommand(newEpHealthCommand()) - ec.AddCommand(newEpStatusCommand()) - ec.AddCommand(newEpHashKVCommand()) - - return ec -} - -func newEpHealthCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "health", - Short: "Checks the healthiness of endpoints specified in `--endpoints` flag", - Run: epHealthCommandFunc, - } - - return cmd -} - -func newEpStatusCommand() *cobra.Command { - return &cobra.Command{ - Use: "status", - Short: "Prints out the status of endpoints specified in `--endpoints` flag", - Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint. -The items in the lists are endpoint, ID, version, db size, is leader, is learner, raft term, raft index, raft applied index, errors. -`, - Run: epStatusCommandFunc, - } -} - -func newEpHashKVCommand() *cobra.Command { - hc := &cobra.Command{ - Use: "hashkv", - Short: "Prints the KV history hash for each endpoint in --endpoints", - Run: epHashKVCommandFunc, - } - hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: all revisions)") - return hc -} - -type epHealth struct { - Ep string `json:"endpoint"` - Health bool `json:"health"` - Took string `json:"took"` - Error string `json:"error,omitempty"` -} - -// epHealthCommandFunc executes the "endpoint-health" command. -func epHealthCommandFunc(cmd *cobra.Command, args []string) { - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - flags.SetPflagsFromEnv(lg, "ETCDCTL", cmd.InheritedFlags()) - initDisplayFromCmd(cmd) - - sec := secureCfgFromCmd(cmd) - dt := dialTimeoutFromCmd(cmd) - ka := keepAliveTimeFromCmd(cmd) - kat := keepAliveTimeoutFromCmd(cmd) - auth := authCfgFromCmd(cmd) - var cfgs []*clientv3.Config - for _, ep := range endpointsFromCluster(cmd) { - cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{ - Endpoints: []string{ep}, - DialTimeout: dt, - KeepAliveTime: ka, - KeepAliveTimeout: kat, - Secure: sec, - Auth: auth, - }, lg) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) - } - cfgs = append(cfgs, cfg) - } - - var wg sync.WaitGroup - hch := make(chan epHealth, len(cfgs)) - for _, cfg := range cfgs { - wg.Add(1) - go func(cfg *clientv3.Config) { - defer wg.Done() - ep := cfg.Endpoints[0] - cfg.Logger = lg.Named("client") - cli, err := clientv3.New(*cfg) - if err != nil { - hch <- epHealth{Ep: ep, Health: false, Error: err.Error()} - return - } - st := time.Now() - // get a random key. As long as we can get the response without an error, the - // endpoint is health. - ctx, cancel := commandCtx(cmd) - _, err = cli.Get(ctx, "health") - eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()} - // permission denied is OK since proposal goes through consensus to get it - if err == nil || err == rpctypes.ErrPermissionDenied { - eh.Health = true - } else { - eh.Error = err.Error() - } - - if eh.Health { - resp, err := cli.AlarmList(ctx) - if err == nil && len(resp.Alarms) > 0 { - eh.Health = false - eh.Error = "Active Alarm(s): " - for _, v := range resp.Alarms { - switch v.Alarm { - case etcdserverpb.AlarmType_NOSPACE: - eh.Error = eh.Error + "NOSPACE " - case etcdserverpb.AlarmType_CORRUPT: - eh.Error = eh.Error + "CORRUPT " - default: - eh.Error = eh.Error + "UNKNOWN " - } - } - } else if err != nil { - eh.Health = false - eh.Error = "Unable to fetch the alarm list" - } - } - cancel() - hch <- eh - }(cfg) - } - - wg.Wait() - close(hch) - - errs := false - var healthList []epHealth - for h := range hch { - healthList = append(healthList, h) - if h.Error != "" { - errs = true - } - } - display.EndpointHealth(healthList) - if errs { - cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("unhealthy cluster")) - } -} - -type epStatus struct { - Ep string `json:"Endpoint"` - Resp *clientv3.StatusResponse `json:"Status"` -} - -func epStatusCommandFunc(cmd *cobra.Command, args []string) { - cfg := clientConfigFromCmd(cmd) - - var statusList []epStatus - var err error - for _, ep := range endpointsFromCluster(cmd) { - cfg.Endpoints = []string{ep} - c := mustClient(cfg) - ctx, cancel := commandCtx(cmd) - resp, serr := c.Status(ctx, ep) - cancel() - c.Close() - if serr != nil { - err = serr - fmt.Fprintf(os.Stderr, "Failed to get the status of endpoint %s (%v)\n", ep, serr) - continue - } - statusList = append(statusList, epStatus{Ep: ep, Resp: resp}) - } - - display.EndpointStatus(statusList) - - if err != nil { - os.Exit(cobrautl.ExitError) - } -} - -type epHashKV struct { - Ep string `json:"Endpoint"` - Resp *clientv3.HashKVResponse `json:"HashKV"` -} - -func epHashKVCommandFunc(cmd *cobra.Command, args []string) { - cfg := clientConfigFromCmd(cmd) - - var hashList []epHashKV - var err error - for _, ep := range endpointsFromCluster(cmd) { - cfg.Endpoints = []string{ep} - c := mustClient(cfg) - ctx, cancel := commandCtx(cmd) - resp, serr := c.HashKV(ctx, ep, epHashKVRev) - cancel() - c.Close() - if serr != nil { - err = serr - fmt.Fprintf(os.Stderr, "Failed to get the hash of endpoint %s (%v)\n", ep, serr) - continue - } - hashList = append(hashList, epHashKV{Ep: ep, Resp: resp}) - } - - display.EndpointHashKV(hashList) - - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } -} - -func endpointsFromCluster(cmd *cobra.Command) []string { - if !epClusterEndpoints { - endpoints, err := cmd.Flags().GetStringSlice("endpoints") - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - return endpoints - } - - sec := secureCfgFromCmd(cmd) - dt := dialTimeoutFromCmd(cmd) - ka := keepAliveTimeFromCmd(cmd) - kat := keepAliveTimeoutFromCmd(cmd) - eps, err := endpointsFromCmd(cmd) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - // exclude auth for not asking needless password (MemberList() doesn't need authentication) - lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel) - cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{ - Endpoints: eps, - DialTimeout: dt, - KeepAliveTime: ka, - KeepAliveTimeout: kat, - Secure: sec, - }, lg) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - c, err := clientv3.New(*cfg) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - ctx, cancel := commandCtx(cmd) - defer func() { - c.Close() - cancel() - }() - membs, err := c.MemberList(ctx) - if err != nil { - err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err) - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - var ret []string - for _, m := range membs.Members { - ret = append(ret, m.ClientURLs...) - } - return ret -} diff --git a/etcdctl/ctlv3/command/get_command.go b/etcdctl/ctlv3/command/get_command.go deleted file mode 100644 index a18cc32b97c..00000000000 --- a/etcdctl/ctlv3/command/get_command.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var ( - getConsistency string - getLimit int64 - getSortOrder string - getSortTarget string - getPrefix bool - getFromKey bool - getRev int64 - getKeysOnly bool - getCountOnly bool - printValueOnly bool -) - -// NewGetCommand returns the cobra command for "get". -func NewGetCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "get [options] [range_end]", - Short: "Gets the key or a range of keys", - Run: getCommandFunc, - } - - cmd.Flags().StringVar(&getConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)") - cmd.Flags().StringVar(&getSortOrder, "order", "", "Order of results; ASCEND or DESCEND (ASCEND by default)") - cmd.Flags().StringVar(&getSortTarget, "sort-by", "", "Sort target; CREATE, KEY, MODIFY, VALUE, or VERSION") - cmd.Flags().Int64Var(&getLimit, "limit", 0, "Maximum number of results") - cmd.Flags().BoolVar(&getPrefix, "prefix", false, "Get keys with matching prefix") - cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare") - cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision") - cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys") - cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count") - cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`) - - cmd.RegisterFlagCompletionFunc("consistency", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"l", "s"}, cobra.ShellCompDirectiveDefault - }) - cmd.RegisterFlagCompletionFunc("order", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"ASCEND", "DESCEND"}, cobra.ShellCompDirectiveDefault - }) - cmd.RegisterFlagCompletionFunc("sort-by", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"CREATE", "KEY", "MODIFY", "VALUE", "VERSION"}, cobra.ShellCompDirectiveDefault - }) - - return cmd -} - -// getCommandFunc executes the "get" command. -func getCommandFunc(cmd *cobra.Command, args []string) { - key, opts := getGetOp(args) - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).Get(ctx, key, opts...) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - if getCountOnly { - if _, fields := display.(*fieldsPrinter); !fields { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`")) - } - } - - if printValueOnly { - dp, simple := (display).(*simplePrinter) - if !simple { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("print-value-only is only for `--write-out=simple`")) - } - dp.valueOnly = true - } - display.Get(*resp) -} - -func getGetOp(args []string) (string, []clientv3.OpOption) { - if len(args) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("get command needs one argument as key and an optional argument as range_end")) - } - - if getPrefix && getFromKey { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) - } - - if getKeysOnly && getCountOnly { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one")) - } - - var opts []clientv3.OpOption - switch getConsistency { - case "s": - opts = append(opts, clientv3.WithSerializable()) - case "l": - default: - cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown consistency flag %q", getConsistency)) - } - - key := args[0] - if len(args) > 1 { - if getPrefix || getFromKey { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set")) - } - opts = append(opts, clientv3.WithRange(args[1])) - } - - opts = append(opts, clientv3.WithLimit(getLimit)) - if getRev > 0 { - opts = append(opts, clientv3.WithRev(getRev)) - } - - sortByOrder := clientv3.SortNone - sortOrder := strings.ToUpper(getSortOrder) - switch { - case sortOrder == "ASCEND": - sortByOrder = clientv3.SortAscend - case sortOrder == "DESCEND": - sortByOrder = clientv3.SortDescend - case sortOrder == "": - // nothing - default: - cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort order %v", getSortOrder)) - } - - sortByTarget := clientv3.SortByKey - sortTarget := strings.ToUpper(getSortTarget) - switch { - case sortTarget == "CREATE": - sortByTarget = clientv3.SortByCreateRevision - case sortTarget == "KEY": - sortByTarget = clientv3.SortByKey - case sortTarget == "MODIFY": - sortByTarget = clientv3.SortByModRevision - case sortTarget == "VALUE": - sortByTarget = clientv3.SortByValue - case sortTarget == "VERSION": - sortByTarget = clientv3.SortByVersion - case sortTarget == "": - // nothing - default: - cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort target %v", getSortTarget)) - } - - opts = append(opts, clientv3.WithSort(sortByTarget, sortByOrder)) - - if getPrefix { - if len(key) == 0 { - key = "\x00" - opts = append(opts, clientv3.WithFromKey()) - } else { - opts = append(opts, clientv3.WithPrefix()) - } - } - - if getFromKey { - if len(key) == 0 { - key = "\x00" - } - opts = append(opts, clientv3.WithFromKey()) - } - - if getKeysOnly { - opts = append(opts, clientv3.WithKeysOnly()) - } - - if getCountOnly { - opts = append(opts, clientv3.WithCountOnly()) - } - - return key, opts -} diff --git a/etcdctl/ctlv3/command/global.go b/etcdctl/ctlv3/command/global.go index 6997b94bdb6..a177ed535cb 100644 --- a/etcdctl/ctlv3/command/global.go +++ b/etcdctl/ctlv3/command/global.go @@ -15,22 +15,21 @@ package command import ( + "crypto/tls" "errors" "fmt" "io" + "io/ioutil" "os" "strings" "time" "github.com/bgentry/speakeasy" - - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/srv" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/pkg/v3/flags" - + "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/flags" "github.com/spf13/cobra" "github.com/spf13/pflag" "go.uber.org/zap" @@ -61,6 +60,21 @@ type GlobalFlags struct { Debug bool } +type secureCfg struct { + cert string + key string + cacert string + serverName string + + insecureTransport bool + insecureSkipVerify bool +} + +type authCfg struct { + username string + password string +} + type discoveryCfg struct { domain string insecure bool @@ -83,14 +97,23 @@ func initDisplayFromCmd(cmd *cobra.Command) { } } +type clientConfig struct { + endpoints []string + dialTimeout time.Duration + keepAliveTime time.Duration + keepAliveTimeout time.Duration + scfg *secureCfg + acfg *authCfg +} + type discardValue struct{} func (*discardValue) String() string { return "" } func (*discardValue) Set(string) error { return nil } func (*discardValue) Type() string { return "" } -func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec { - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) +func clientConfigFromCmd(cmd *cobra.Command) *clientConfig { + lg, err := zap.NewProduction() if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } @@ -117,21 +140,21 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec { // too many routine connection disconnects to turn on by default. // // See https://github.com/etcd-io/etcd/pull/9623 for background - grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, os.Stderr)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr)) } - cfg := &clientv3.ConfigSpec{} - cfg.Endpoints, err = endpointsFromCmd(cmd) + cfg := &clientConfig{} + cfg.endpoints, err = endpointsFromCmd(cmd) if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } - cfg.DialTimeout = dialTimeoutFromCmd(cmd) - cfg.KeepAliveTime = keepAliveTimeFromCmd(cmd) - cfg.KeepAliveTimeout = keepAliveTimeoutFromCmd(cmd) + cfg.dialTimeout = dialTimeoutFromCmd(cmd) + cfg.keepAliveTime = keepAliveTimeFromCmd(cmd) + cfg.keepAliveTimeout = keepAliveTimeoutFromCmd(cmd) - cfg.Secure = secureCfgFromCmd(cmd) - cfg.Auth = authCfgFromCmd(cmd) + cfg.scfg = secureCfgFromCmd(cmd) + cfg.acfg = authCfgFromCmd(cmd) initDisplayFromCmd(cmd) return cfg @@ -139,8 +162,7 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec { func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config { cc := clientConfigFromCmd(cmd) - lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel) - cfg, err := clientv3.NewClientConfig(cc, lg) + cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg) if err != nil { cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } @@ -148,13 +170,12 @@ func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config { } func mustClientFromCmd(cmd *cobra.Command) *clientv3.Client { - cfg := clientConfigFromCmd(cmd) - return mustClient(cfg) + cfg := clientConfigFromCmd(cmd) // ok + return cfg.mustClient() } -func mustClient(cc *clientv3.ConfigSpec) *clientv3.Client { - lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel) - cfg, err := clientv3.NewClientConfig(cc, lg) +func (cc *clientConfig) mustClient() *clientv3.Client { + cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg) if err != nil { cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } @@ -167,11 +188,71 @@ func mustClient(cc *clientv3.ConfigSpec) *clientv3.Client { return client } +func newClientCfg(endpoints []string, dialTimeout, keepAliveTime, keepAliveTimeout time.Duration, scfg *secureCfg, acfg *authCfg) (*clientv3.Config, error) { + var cfgtls *transport.TLSInfo + tlsinfo := transport.TLSInfo{} + tlsinfo.Logger, _ = zap.NewProduction() + if scfg.cert != "" { + tlsinfo.CertFile = scfg.cert + cfgtls = &tlsinfo + } + + if scfg.key != "" { + tlsinfo.KeyFile = scfg.key + cfgtls = &tlsinfo + } + + if scfg.cacert != "" { + tlsinfo.TrustedCAFile = scfg.cacert + cfgtls = &tlsinfo + } + + if scfg.serverName != "" { + tlsinfo.ServerName = scfg.serverName + cfgtls = &tlsinfo + } + + cfg := &clientv3.Config{ + Endpoints: endpoints, + DialTimeout: dialTimeout, + DialKeepAliveTime: keepAliveTime, + DialKeepAliveTimeout: keepAliveTimeout, + } + + if cfgtls != nil { + clientTLS, err := cfgtls.ClientConfig() + if err != nil { + return nil, err + } + cfg.TLS = clientTLS + } + + // if key/cert is not given but user wants secure connection, we + // should still setup an empty tls configuration for gRPC to setup + // secure connection. + if cfg.TLS == nil && !scfg.insecureTransport { + cfg.TLS = &tls.Config{} + } + + // If the user wants to skip TLS verification then we should set + // the InsecureSkipVerify flag in tls configuration. + if scfg.insecureSkipVerify && cfg.TLS != nil { + cfg.TLS.InsecureSkipVerify = true + } + + if acfg != nil { + cfg.Username = acfg.username + cfg.Password = acfg.password + } + + return cfg, nil +} + func argOrStdin(args []string, stdin io.Reader, i int) (string, error) { if i < len(args) { return args[i], nil } - bytes, err := io.ReadAll(stdin) + bytes, err := ioutil.ReadAll(stdin) if string(bytes) == "" || err != nil { return "", errors.New("no available argument and stdin") } @@ -202,7 +283,7 @@ func keepAliveTimeoutFromCmd(cmd *cobra.Command) time.Duration { return keepAliveTimeout } -func secureCfgFromCmd(cmd *cobra.Command) *clientv3.SecureConfig { +func secureCfgFromCmd(cmd *cobra.Command) *secureCfg { cert, key, cacert := keyAndCertFromCmd(cmd) insecureTr := insecureTransportFromCmd(cmd) skipVerify := insecureSkipVerifyFromCmd(cmd) @@ -212,14 +293,14 @@ func secureCfgFromCmd(cmd *cobra.Command) *clientv3.SecureConfig { discoveryCfg.domain = "" } - return &clientv3.SecureConfig{ - Cert: cert, - Key: key, - Cacert: cacert, - ServerName: discoveryCfg.domain, + return &secureCfg{ + cert: cert, + key: key, + cacert: cacert, + serverName: discoveryCfg.domain, - InsecureTransport: insecureTr, - InsecureSkipVerify: skipVerify, + insecureTransport: insecureTr, + insecureSkipVerify: skipVerify, } } @@ -262,7 +343,7 @@ func keyAndCertFromCmd(cmd *cobra.Command) (cert, key, cacert string) { return cert, key, cacert } -func authCfgFromCmd(cmd *cobra.Command) *clientv3.AuthConfig { +func authCfgFromCmd(cmd *cobra.Command) *authCfg { userFlag, err := cmd.Flags().GetString("user") if err != nil { cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) @@ -276,62 +357,58 @@ func authCfgFromCmd(cmd *cobra.Command) *clientv3.AuthConfig { return nil } - var cfg clientv3.AuthConfig + var cfg authCfg if passwordFlag == "" { splitted := strings.SplitN(userFlag, ":", 2) if len(splitted) < 2 { - cfg.Username = userFlag - cfg.Password, err = speakeasy.Ask("Password: ") + cfg.username = userFlag + cfg.password, err = speakeasy.Ask("Password: ") if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } } else { - cfg.Username = splitted[0] - cfg.Password = splitted[1] + cfg.username = splitted[0] + cfg.password = splitted[1] } } else { - cfg.Username = userFlag - cfg.Password = passwordFlag + cfg.username = userFlag + cfg.password = passwordFlag } return &cfg } -func insecureDiscoveryFromCmd(cmd *cobra.Command) bool { - discovery, err := cmd.Flags().GetBool("insecure-discovery") - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - return discovery -} +func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) { + discoveryCfg := discoveryCfgFromCmd(cmd) -func discoverySrvFromCmd(cmd *cobra.Command) string { - domainStr, err := cmd.Flags().GetString("discovery-srv") - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + // If we still don't have domain discovery, return nothing + if discoveryCfg.domain == "" { + return []string{}, nil } - return domainStr -} -func discoveryDNSClusterServiceNameFromCmd(cmd *cobra.Command) string { - serviceNameStr, err := cmd.Flags().GetString("discovery-srv-name") + srvs, err := srv.GetClient("etcd-client", discoveryCfg.domain, discoveryCfg.serviceName) if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + return nil, err } - return serviceNameStr -} - -func discoveryCfgFromCmd(cmd *cobra.Command) *discoveryCfg { - return &discoveryCfg{ - domain: discoverySrvFromCmd(cmd), - insecure: insecureDiscoveryFromCmd(cmd), - serviceName: discoveryDNSClusterServiceNameFromCmd(cmd), + eps := srvs.Endpoints + if discoveryCfg.insecure { + return eps, err + } + // strip insecure connections + ret := []string{} + for _, ep := range eps { + if strings.HasPrefix(ep, "http://") { + fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep) + continue + } + ret = append(ret, ep) } + return ret, err } func endpointsFromCmd(cmd *cobra.Command) ([]string, error) { - eps, err := endpointsFromFlagValue(cmd) + eps, err := endpointsFromFlagValue(cmd) // 获取endpoints if err != nil { return nil, err } @@ -347,30 +424,34 @@ func endpointsFromCmd(cmd *cobra.Command) ([]string, error) { return eps, err } -func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) { - discoveryCfg := discoveryCfgFromCmd(cmd) - - // If we still don't have domain discovery, return nothing - if discoveryCfg.domain == "" { - return []string{}, nil +func insecureDiscoveryFromCmd(cmd *cobra.Command) bool { + discovery, err := cmd.Flags().GetBool("insecure-discovery") + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) } + return discovery +} - srvs, err := srv.GetClient("etcd-client", discoveryCfg.domain, discoveryCfg.serviceName) +func discoverySrvFromCmd(cmd *cobra.Command) string { + domainStr, err := cmd.Flags().GetString("discovery-srv") if err != nil { - return nil, err + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } - eps := srvs.Endpoints - if discoveryCfg.insecure { - return eps, err + return domainStr +} + +func discoveryDNSClusterServiceNameFromCmd(cmd *cobra.Command) string { + serviceNameStr, err := cmd.Flags().GetString("discovery-srv-name") + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } - // strip insecure connections - var ret []string - for _, ep := range eps { - if strings.HasPrefix(ep, "http://") { - fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep) - continue - } - ret = append(ret, ep) + return serviceNameStr +} + +func discoveryCfgFromCmd(cmd *cobra.Command) *discoveryCfg { + return &discoveryCfg{ + domain: discoverySrvFromCmd(cmd), // string + insecure: insecureDiscoveryFromCmd(cmd), // bool + serviceName: discoveryDNSClusterServiceNameFromCmd(cmd), // string } - return ret, err } diff --git a/etcdctl/ctlv3/command/lease_command.go b/etcdctl/ctlv3/command/lease_command.go deleted file mode 100644 index 97cacdfaf30..00000000000 --- a/etcdctl/ctlv3/command/lease_command.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "context" - "fmt" - "strconv" - - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" - - "github.com/spf13/cobra" -) - -// NewLeaseCommand returns the cobra command for "lease". -func NewLeaseCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "lease ", - Short: "Lease related commands", - } - - lc.AddCommand(NewLeaseGrantCommand()) - lc.AddCommand(NewLeaseRevokeCommand()) - lc.AddCommand(NewLeaseTimeToLiveCommand()) - lc.AddCommand(NewLeaseListCommand()) - lc.AddCommand(NewLeaseKeepAliveCommand()) - - return lc -} - -// NewLeaseGrantCommand returns the cobra command for "lease grant". -func NewLeaseGrantCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "grant ", - Short: "Creates leases", - - Run: leaseGrantCommandFunc, - } - - return lc -} - -// leaseGrantCommandFunc executes the "lease grant" command. -func leaseGrantCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease grant command needs TTL argument")) - } - - ttl, err := strconv.ParseInt(args[0], 10, 64) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad TTL (%v)", err)) - } - - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to grant lease (%v)", err)) - } - display.Grant(*resp) -} - -// NewLeaseRevokeCommand returns the cobra command for "lease revoke". -func NewLeaseRevokeCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "revoke ", - Short: "Revokes leases", - - Run: leaseRevokeCommandFunc, - } - - return lc -} - -// leaseRevokeCommandFunc executes the "lease grant" command. -func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument")) - } - - id := leaseFromArgs(args[0]) - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).Revoke(ctx, id) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%v)", err)) - } - display.Revoke(id, *resp) -} - -var timeToLiveKeys bool - -// NewLeaseTimeToLiveCommand returns the cobra command for "lease timetolive". -func NewLeaseTimeToLiveCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "timetolive [options]", - Short: "Get lease information", - - Run: leaseTimeToLiveCommandFunc, - } - lc.Flags().BoolVar(&timeToLiveKeys, "keys", false, "Get keys attached to this lease") - - return lc -} - -// leaseTimeToLiveCommandFunc executes the "lease timetolive" command. -func leaseTimeToLiveCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease timetolive command needs lease ID as argument")) - } - var opts []v3.LeaseOption - if timeToLiveKeys { - opts = append(opts, v3.WithAttachedKeys()) - } - resp, rerr := mustClientFromCmd(cmd).TimeToLive(context.TODO(), leaseFromArgs(args[0]), opts...) - if rerr != nil { - cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr) - } - display.TimeToLive(*resp, timeToLiveKeys) -} - -// NewLeaseListCommand returns the cobra command for "lease list". -func NewLeaseListCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "list", - Short: "List all active leases", - Run: leaseListCommandFunc, - } - return lc -} - -// leaseListCommandFunc executes the "lease list" command. -func leaseListCommandFunc(cmd *cobra.Command, args []string) { - resp, rerr := mustClientFromCmd(cmd).Leases(context.TODO()) - if rerr != nil { - cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr) - } - display.Leases(*resp) -} - -var ( - leaseKeepAliveOnce bool -) - -// NewLeaseKeepAliveCommand returns the cobra command for "lease keep-alive". -func NewLeaseKeepAliveCommand() *cobra.Command { - lc := &cobra.Command{ - Use: "keep-alive [options] ", - Short: "Keeps leases alive (renew)", - - Run: leaseKeepAliveCommandFunc, - } - - lc.Flags().BoolVar(&leaseKeepAliveOnce, "once", false, "Resets the keep-alive time to its original value and cobrautl.Exits immediately") - - return lc -} - -// leaseKeepAliveCommandFunc executes the "lease keep-alive" command. -func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease keep-alive command needs lease ID as argument")) - } - - id := leaseFromArgs(args[0]) - - if leaseKeepAliveOnce { - respc, kerr := mustClientFromCmd(cmd).KeepAliveOnce(context.TODO(), id) - if kerr != nil { - cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr) - } - display.KeepAlive(*respc) - return - } - - respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), id) - if kerr != nil { - cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr) - } - for resp := range respc { - display.KeepAlive(*resp) - } - - if _, ok := (display).(*simplePrinter); ok { - fmt.Printf("lease %016x expired or revoked.\n", id) - } -} - -func leaseFromArgs(arg string) v3.LeaseID { - id, err := strconv.ParseInt(arg, 16, 64) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err)) - } - return v3.LeaseID(id) -} diff --git a/etcdctl/ctlv3/command/lock_command.go b/etcdctl/ctlv3/command/lock_command.go index 0a3d866cdbc..064f9dfaaa9 100644 --- a/etcdctl/ctlv3/command/lock_command.go +++ b/etcdctl/ctlv3/command/lock_command.go @@ -23,9 +23,10 @@ import ( "os/signal" "syscall" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/pkg/v3/cobrautl" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) @@ -36,7 +37,7 @@ var lockTTL = 10 func NewLockCommand() *cobra.Command { c := &cobra.Command{ Use: "lock [exec-command arg1 arg2 ...]", - Short: "Acquires a named lock", + Short: "获取命名锁", Run: lockCommandFunc, } c.Flags().IntVarP(&lockTTL, "ttl", "", lockTTL, "timeout for session") diff --git a/etcdctl/ctlv3/command/make_mirror_command.go b/etcdctl/ctlv3/command/make_mirror_command.go index 1665330e835..aaf30fe793e 100644 --- a/etcdctl/ctlv3/command/make_mirror_command.go +++ b/etcdctl/ctlv3/command/make_mirror_command.go @@ -23,21 +23,16 @@ import ( "time" "github.com/bgentry/speakeasy" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" - "go.etcd.io/etcd/pkg/v3/cobrautl" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/mirror" + "github.com/ls-2018/etcd_cn/client_sdk/v3/mirror" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" "github.com/spf13/cobra" ) -const ( - defaultMaxTxnOps = uint(128) -) - var ( mminsecureTr bool mmcert string @@ -48,57 +43,52 @@ var ( mmuser string mmpassword string mmnodestprefix bool - mmrev int64 - mmmaxTxnOps uint ) // NewMakeMirrorCommand returns the cobra command for "makeMirror". func NewMakeMirrorCommand() *cobra.Command { c := &cobra.Command{ Use: "make-mirror [options] ", - Short: "Makes a mirror at the destination etcd cluster", + Short: "在目标etcd集群上创建镜像", Run: makeMirrorCommandFunc, } - c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror") - c.Flags().Int64Var(&mmrev, "rev", 0, "Specify the kv revision to start to mirror") - c.Flags().UintVar(&mmmaxTxnOps, "max-txn-ops", defaultMaxTxnOps, "Maximum number of operations permitted in a transaction during syncing updates.") - c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster") - c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster") - c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster") - c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file") - c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle") - // TODO: secure by default when etcd enables secure gRPC by default. - c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections") - c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)") - c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)") + c.Flags().StringVar(&mmprefix, "prefix", "", "为那个前缀打快照") + c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "将一个source前缀 镜像到 目标集群中的另一个前缀") + c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "kv镜像到另一个集群的根目录下") + c.Flags().StringVar(&mmcert, "dest-cert", "", "使用此TLS证书文件为目标集群识别安全客户端") + c.Flags().StringVar(&mmkey, "dest-key", "", "使用此TLS私钥文件为目标集群识别安全客户端") + c.Flags().StringVar(&mmcacert, "dest-cacert", "", "使用此CA包验证启用TLS的安全服务器的证书") + c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "为客户端连接禁用传输安全性") + c.Flags().StringVar(&mmuser, "dest-user", "", "目标集群的 username[:password]") + c.Flags().StringVar(&mmpassword, "dest-password", "", "目标集群的密码") return c } -func authDestCfg() *clientv3.AuthConfig { +func authDestCfg() *authCfg { if mmuser == "" { return nil } - var cfg clientv3.AuthConfig + var cfg authCfg if mmpassword == "" { splitted := strings.SplitN(mmuser, ":", 2) if len(splitted) < 2 { var err error - cfg.Username = mmuser - cfg.Password, err = speakeasy.Ask("Destination Password: ") + cfg.username = mmuser + cfg.password, err = speakeasy.Ask("Destination Password: ") if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } } else { - cfg.Username = splitted[0] - cfg.Password = splitted[1] + cfg.username = splitted[0] + cfg.password = splitted[1] } } else { - cfg.Username = mmuser - cfg.Password = mmpassword + cfg.username = mmuser + cfg.password = mmpassword } return &cfg @@ -112,24 +102,24 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) { dialTimeout := dialTimeoutFromCmd(cmd) keepAliveTime := keepAliveTimeFromCmd(cmd) keepAliveTimeout := keepAliveTimeoutFromCmd(cmd) - sec := &clientv3.SecureConfig{ - Cert: mmcert, - Key: mmkey, - Cacert: mmcacert, - InsecureTransport: mminsecureTr, + sec := &secureCfg{ + cert: mmcert, + key: mmkey, + cacert: mmcacert, + insecureTransport: mminsecureTr, } auth := authDestCfg() - cc := &clientv3.ConfigSpec{ - Endpoints: []string{args[0]}, - DialTimeout: dialTimeout, - KeepAliveTime: keepAliveTime, - KeepAliveTimeout: keepAliveTimeout, - Secure: sec, - Auth: auth, + cc := &clientConfig{ + endpoints: []string{args[0]}, + dialTimeout: dialTimeout, + keepAliveTime: keepAliveTime, + keepAliveTimeout: keepAliveTimeout, + scfg: sec, + acfg: auth, } - dc := mustClient(cc) + dc := cc.mustClient() // 目标集群 c := mustClientFromCmd(cmd) err := makeMirror(context.TODO(), c, dc) @@ -139,49 +129,40 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) { func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error { total := int64(0) - // if destination prefix is specified and remove destination prefix is true return error - if mmnodestprefix && len(mmdestprefix) > 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one")) - } - go func() { for { time.Sleep(30 * time.Second) - fmt.Println(atomic.LoadInt64(&total)) + fmt.Println("total--->:", atomic.LoadInt64(&total)) } }() - startRev := mmrev - 1 - if startRev < 0 { - startRev = 0 - } + s := mirror.NewSyncer(c, mmprefix, 0) - s := mirror.NewSyncer(c, mmprefix, startRev) + rc, errc := s.SyncBase(ctx) - // If a rev is provided, then do not sync the whole key space. - // Instead, just start watching the key space starting from the rev - if startRev == 0 { - rc, errc := s.SyncBase(ctx) + // 如果指定并删除目的前缀,则返回错误 + if mmnodestprefix && len(mmdestprefix) > 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one")) + } - // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix - if !mmnodestprefix && len(mmdestprefix) == 0 { - mmdestprefix = mmprefix - } + // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix + if !mmnodestprefix && len(mmdestprefix) == 0 { + mmdestprefix = mmprefix + } - for r := range rc { - for _, kv := range r.Kvs { - _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value)) - if err != nil { - return err - } - atomic.AddInt64(&total, 1) + for r := range rc { + for _, kv := range r.Kvs { + _, err := dc.Put(ctx, modifyPrefix(kv.Key), kv.Value) + if err != nil { + return err } + atomic.AddInt64(&total, 1) } + } - err := <-errc - if err != nil { - return err - } + err := <-errc + if err != nil { + return err } wc := s.SyncUpdates(ctx) @@ -204,21 +185,12 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er ops = []clientv3.Op{} } lastRev = nextRev - - if len(ops) == int(mmmaxTxnOps) { - _, err := dc.Txn(ctx).Then(ops...).Commit() - if err != nil { - return err - } - ops = []clientv3.Op{} - } - switch ev.Type { case mvccpb.PUT: - ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value))) + ops = append(ops, clientv3.OpPut(modifyPrefix(ev.Kv.Key), ev.Kv.Value)) atomic.AddInt64(&total, 1) case mvccpb.DELETE: - ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key)))) + ops = append(ops, clientv3.OpDelete(modifyPrefix(ev.Kv.Key))) atomic.AddInt64(&total, 1) default: panic("unexpected event type") diff --git a/etcdctl/ctlv3/command/member_command.go b/etcdctl/ctlv3/command/member_command.go index 53b624b9881..4756d637c3f 100644 --- a/etcdctl/ctlv3/command/member_command.go +++ b/etcdctl/ctlv3/command/member_command.go @@ -20,10 +20,10 @@ import ( "strconv" "strings" - "github.com/spf13/cobra" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" ) var ( @@ -35,7 +35,7 @@ var ( func NewMemberCommand() *cobra.Command { mc := &cobra.Command{ Use: "member ", - Short: "Membership related commands", + Short: "节点相关的命令", } mc.AddCommand(NewMemberAddCommand()) @@ -51,13 +51,13 @@ func NewMemberCommand() *cobra.Command { func NewMemberAddCommand() *cobra.Command { cc := &cobra.Command{ Use: "add [options]", - Short: "Adds a member into the cluster", + Short: "添加一个节点", Run: memberAddCommandFunc, } - cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "comma separated peer URLs for the new member.") - cc.Flags().BoolVar(&isLearner, "learner", false, "indicates if the new member is raft learner") + cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "用逗号分隔新成员的对等url.") + cc.Flags().BoolVar(&isLearner, "learner", false, "表示新成员是否为learner") return cc } @@ -66,7 +66,7 @@ func NewMemberAddCommand() *cobra.Command { func NewMemberRemoveCommand() *cobra.Command { cc := &cobra.Command{ Use: "remove ", - Short: "Removes a member from the cluster", + Short: "从集群中移除成员", Run: memberRemoveCommandFunc, } @@ -78,7 +78,7 @@ func NewMemberRemoveCommand() *cobra.Command { func NewMemberUpdateCommand() *cobra.Command { cc := &cobra.Command{ Use: "update [options]", - Short: "Updates a member in the cluster", + Short: "更新节点通信地址", Run: memberUpdateCommandFunc, } @@ -92,12 +92,8 @@ func NewMemberUpdateCommand() *cobra.Command { func NewMemberListCommand() *cobra.Command { cc := &cobra.Command{ Use: "list", - Short: "Lists all members in the cluster", - Long: `When --write-out is set to simple, this command prints out comma-separated member lists for each endpoint. -The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs, Is Learner. -`, - - Run: memberListCommandFunc, + Short: "显示集群所有成员", + Run: memberListCommandFunc, } return cc @@ -107,11 +103,8 @@ The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs, Is Learne func NewMemberPromoteCommand() *cobra.Command { cc := &cobra.Command{ Use: "promote ", - Short: "Promotes a non-voting member in the cluster", - Long: `Promotes a non-voting learner member to a voting one in the cluster. -`, - - Run: memberPromoteCommandFunc, + Short: "提升一个learner节点", + Run: memberPromoteCommandFunc, } return cc @@ -158,7 +151,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) { display.MemberAdd(*resp) if _, ok := (display).(*simplePrinter); ok { - var conf []string + conf := []string{} for _, memb := range resp.Members { for _, u := range memb.PeerURLs { n := memb.Name @@ -173,7 +166,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) { fmt.Printf("ETCD_NAME=%q\n", newMemberName) fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ",")) fmt.Printf("ETCD_INITIAL_ADVERTISE_PEER_URLS=%q\n", memberPeerURLs) - fmt.Print("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n") + fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n") } } diff --git a/etcdctl/ctlv3/command/move_leader_command.go b/etcdctl/ctlv3/command/move_leader_command.go index a7b4f397b1c..f1cc3cebb08 100644 --- a/etcdctl/ctlv3/command/move_leader_command.go +++ b/etcdctl/ctlv3/command/move_leader_command.go @@ -18,17 +18,17 @@ import ( "fmt" "strconv" - "github.com/spf13/cobra" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" ) // NewMoveLeaderCommand returns the cobra command for "move-leader". func NewMoveLeaderCommand() *cobra.Command { cmd := &cobra.Command{ Use: "move-leader ", - Short: "Transfers leadership to another etcd cluster member.", + Short: "触发leader转移", Run: transferLeadershipCommandFunc, } return cmd @@ -44,20 +44,20 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) { cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } - cfg := clientConfigFromCmd(cmd) - cli := mustClient(cfg) - eps := cli.Endpoints() - cli.Close() + c := mustClientFromCmd(cmd) + eps := c.Endpoints() + c.Close() ctx, cancel := commandCtx(cmd) - // find current leader var leaderCli *clientv3.Client var leaderID uint64 + // 找到当前的leader for _, ep := range eps { - cfg.Endpoints = []string{ep} - cli := mustClient(cfg) - resp, serr := cli.Status(ctx, ep) + cfg := clientConfigFromCmd(cmd) + cfg.endpoints = []string{ep} + cli := cfg.mustClient() + resp, serr := cli.Status(ctx, ep) // 获取单个节点状态 if serr != nil { cobrautl.ExitWithError(cobrautl.ExitError, serr) } @@ -69,6 +69,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) { } cli.Close() } + if leaderCli == nil { cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("no leader endpoint given at %v", eps)) } diff --git a/etcdctl/ctlv3/command/over_alarm_command.go b/etcdctl/ctlv3/command/over_alarm_command.go new file mode 100644 index 00000000000..a825bbe79d4 --- /dev/null +++ b/etcdctl/ctlv3/command/over_alarm_command.go @@ -0,0 +1,82 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +// NewAlarmCommand returns the cobra command for "alarm". +func NewAlarmCommand() *cobra.Command { + ac := &cobra.Command{ + Use: "alarm ", + Short: "Alarm related commands", + } + + ac.AddCommand(NewAlarmDisarmCommand()) + ac.AddCommand(NewAlarmListCommand()) + + return ac +} + +func NewAlarmDisarmCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "disarm", + Short: "解除所有警报", + Run: alarmDisarmCommandFunc, + } + return &cmd +} + +// alarmDisarmCommandFunc executes the "alarm disarm" command. +func alarmDisarmCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm disarm command accepts no arguments")) + } + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).AlarmDisarm(ctx, &v3.AlarmMember{}) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.Alarm(*resp) +} + +func NewAlarmListCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "list", + Short: "列出所有警报", + Run: alarmListCommandFunc, + } + return &cmd +} + +// alarmListCommandFunc executes the "alarm list" command. +func alarmListCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm list command accepts no arguments")) + } + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).AlarmList(ctx) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.Alarm(*resp) +} diff --git a/etcdctl/ctlv3/command/over_compaction_command.go b/etcdctl/ctlv3/command/over_compaction_command.go new file mode 100644 index 00000000000..37c9d314f1d --- /dev/null +++ b/etcdctl/ctlv3/command/over_compaction_command.go @@ -0,0 +1,64 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +package command + +import ( + "fmt" + "strconv" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var compactPhysical bool + +// NewCompactionCommand returns the cobra command for "compaction". +func NewCompactionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "compaction [options] ", + Short: "压缩etcd中的事件历史记录", + Run: compactionCommandFunc, + } + cmd.Flags().BoolVar(&compactPhysical, "physical", false, "'true' 用于等待压缩从物理上删除所有旧修订") + return cmd +} + +// compactionCommandFunc executes the "compaction" command. +func compactionCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("compaction command needs 1 argument")) + } + + rev, err := strconv.ParseInt(args[0], 10, 64) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + var opts []clientv3.CompactOption + if compactPhysical { + opts = append(opts, clientv3.WithCompactPhysical()) + } + + c := mustClientFromCmd(cmd) + ctx, cancel := commandCtx(cmd) + _, cerr := c.Compact(ctx, rev, opts...) + cancel() + if cerr != nil { + cobrautl.ExitWithError(cobrautl.ExitError, cerr) + } + fmt.Println("已压缩了修订版本:", rev) +} diff --git a/etcdctl/ctlv3/command/over_del_command.go b/etcdctl/ctlv3/command/over_del_command.go new file mode 100644 index 00000000000..a4fcb3be2ea --- /dev/null +++ b/etcdctl/ctlv3/command/over_del_command.go @@ -0,0 +1,95 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var ( + delPrefix bool + delPrevKV bool + delFromKey bool +) + +// NewDelCommand returns the cobra command for "del". +func NewDelCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "del [options] [range_end]", + Short: "移除指定的键或键的范围 [key, range_end)", + Run: delCommandFunc, + } + + cmd.Flags().BoolVar(&delPrefix, "prefix", false, "通过匹配前缀删除键") + cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "返回删除的k,v 键值对") + cmd.Flags().BoolVar(&delFromKey, "from-key", false, "使用字节比较法删除大于或等于给定键的键.") + return cmd +} + +// delCommandFunc executes the "del" command. +func delCommandFunc(cmd *cobra.Command, args []string) { + key, opts := getDelOp(args) + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).Delete(ctx, key, opts...) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.Del(*resp) +} + +func getDelOp(args []string) (string, []clientv3.OpOption) { + if len(args) == 0 || len(args) > 2 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("del command needs one argument as key and an optional argument as range_end")) + } + + if delPrefix && delFromKey { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) + } + + opts := []clientv3.OpOption{} + key := args[0] + if len(args) > 1 { + if delPrefix || delFromKey { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set")) + } + opts = append(opts, clientv3.WithRange(args[1])) + } + + if delPrefix { + if len(key) == 0 { + key = "\x00" + opts = append(opts, clientv3.WithFromKey()) + } else { + opts = append(opts, clientv3.WithPrefix()) + } + } + if delPrevKV { + opts = append(opts, clientv3.WithPrevKV()) + } + + if delFromKey { + if len(key) == 0 { + key = "\x00" + } + opts = append(opts, clientv3.WithFromKey()) + } + + return key, opts +} diff --git a/etcdctl/ctlv3/command/over_ep_command.go b/etcdctl/ctlv3/command/over_ep_command.go new file mode 100644 index 00000000000..7465bc2e938 --- /dev/null +++ b/etcdctl/ctlv3/command/over_ep_command.go @@ -0,0 +1,280 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + "os" + "sync" + "time" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/flags" + + "github.com/spf13/cobra" + "go.uber.org/zap" +) + +var ( + epClusterEndpoints bool + epHashKVRev int64 +) + +// NewEndpointCommand returns the cobra command for "endpoint". +func NewEndpointCommand() *cobra.Command { + ec := &cobra.Command{ + Use: "endpoint ", + Short: "Endpoint related commands", + } + + ec.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") + ec.AddCommand(newEpHealthCommand()) + ec.AddCommand(newEpStatusCommand()) + ec.AddCommand(newEpHashKVCommand()) + + return ec +} + +func newEpHealthCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "health", + Short: "检查端点的健康程度", + Run: epHealthCommandFunc, + } + + return cmd +} + +func newEpStatusCommand() *cobra.Command { + return &cobra.Command{ + Use: "status", + Short: "打印出指定端点的状态", + Long: ``, + Run: epStatusCommandFunc, + } +} + +func newEpHashKVCommand() *cobra.Command { + hc := &cobra.Command{ + Use: "hashkv", + Short: "输出每个端点的KV历史哈希值", + Run: epHashKVCommandFunc, + } + hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: all revisions)") + return hc +} + +type epHealth struct { + Ep string `json:"endpoint"` + Health bool `json:"health"` + Took string `json:"took"` + Error string `json:"error,omitempty"` +} + +func epHealthCommandFunc(cmd *cobra.Command, args []string) { + lg, err := zap.NewProduction() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + flags.SetPflagsFromEnv(lg, "ETCDCTL", cmd.InheritedFlags()) + initDisplayFromCmd(cmd) + + sec := secureCfgFromCmd(cmd) + dt := dialTimeoutFromCmd(cmd) + ka := keepAliveTimeFromCmd(cmd) + kat := keepAliveTimeoutFromCmd(cmd) + auth := authCfgFromCmd(cmd) + var cfgs []*v3.Config + for _, ep := range endpointsFromCluster(cmd) { + cfg, err := newClientCfg([]string{ep}, dt, ka, kat, sec, auth) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + } + cfgs = append(cfgs, cfg) + } + + var wg sync.WaitGroup + hch := make(chan epHealth, len(cfgs)) + for _, cfg := range cfgs { + wg.Add(1) + go func(cfg *v3.Config) { + defer wg.Done() + ep := cfg.Endpoints[0] + cfg.Logger = lg.Named("client") + cli, err := v3.New(*cfg) + if err != nil { + hch <- epHealth{Ep: ep, Health: false, Error: err.Error()} + return + } + st := time.Now() + // 得到一个随机的key.只要我们能够获得响应而没有错误,端点就是健康状态. + ctx, cancel := commandCtx(cmd) + _, err = cli.Get(ctx, "health") + eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()} + // 权限拒绝是可以的,因为提案通过协商一致得到它 + if err == nil || err == rpctypes.ErrPermissionDenied { + eh.Health = true + } else { + eh.Error = err.Error() + } + + if eh.Health { + resp, err := cli.AlarmList(ctx) + if err == nil && len(resp.Alarms) > 0 { + eh.Health = false + eh.Error = "存在警报(s): " + for _, v := range resp.Alarms { + switch v.Alarm { + case etcdserverpb.AlarmType_NOSPACE: + eh.Error = eh.Error + "NOSPACE " + case etcdserverpb.AlarmType_CORRUPT: + eh.Error = eh.Error + "CORRUPT " + default: + eh.Error = eh.Error + "UNKNOWN " + } + } + } else if err != nil { + eh.Health = false + eh.Error = "无法获取alarm信息" + } + } + cancel() + hch <- eh + }(cfg) + } + + wg.Wait() + close(hch) + + errs := false + var healthList []epHealth + for h := range hch { + healthList = append(healthList, h) + if h.Error != "" { + errs = true + } + } + display.EndpointHealth(healthList) + if errs { + cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("unhealthy cluster")) + } +} + +type epStatus struct { + Ep string `json:"Endpoint"` + Resp *v3.StatusResponse `json:"Status"` +} + +func epStatusCommandFunc(cmd *cobra.Command, args []string) { + c := mustClientFromCmd(cmd) + + var statusList []epStatus + var err error + for _, ep := range endpointsFromCluster(cmd) { + ctx, cancel := commandCtx(cmd) + resp, serr := c.Status(ctx, ep) + cancel() + if serr != nil { + err = serr + fmt.Fprintf(os.Stderr, "获取端点状态失败%s (%v)\n", ep, serr) + continue + } + statusList = append(statusList, epStatus{Ep: ep, Resp: resp}) + } + + display.EndpointStatus(statusList) + + if err != nil { + os.Exit(cobrautl.ExitError) + } +} + +type epHashKV struct { + Ep string `json:"Endpoint"` + Resp *v3.HashKVResponse `json:"HashKV"` +} + +func epHashKVCommandFunc(cmd *cobra.Command, args []string) { + c := mustClientFromCmd(cmd) + + hashList := []epHashKV{} + var err error + for _, ep := range endpointsFromCluster(cmd) { + ctx, cancel := commandCtx(cmd) + resp, serr := c.HashKV(ctx, ep, epHashKVRev) + cancel() + if serr != nil { + err = serr + fmt.Fprintf(os.Stderr, "Failed to get the hash of endpoint %s (%v)\n", ep, serr) + continue + } + hashList = append(hashList, epHashKV{Ep: ep, Resp: resp}) + } + + display.EndpointHashKV(hashList) + + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } +} + +func endpointsFromCluster(cmd *cobra.Command) []string { + if !epClusterEndpoints { + endpoints, err := cmd.Flags().GetStringSlice("endpoints") + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + return endpoints + } + + sec := secureCfgFromCmd(cmd) + dt := dialTimeoutFromCmd(cmd) + ka := keepAliveTimeFromCmd(cmd) + kat := keepAliveTimeoutFromCmd(cmd) + eps, err := endpointsFromCmd(cmd) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + // exclude auth for not asking needless password (MemberList() doesn't need authentication) + + cfg, err := newClientCfg(eps, dt, ka, kat, sec, nil) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + c, err := v3.New(*cfg) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + ctx, cancel := commandCtx(cmd) + defer func() { + c.Close() + cancel() + }() + membs, err := c.MemberList(ctx) + if err != nil { + err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err) + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + var ret []string + for _, m := range membs.Members { + ret = append(ret, m.ClientURLs...) + } + return ret +} diff --git a/etcdctl/ctlv3/command/over_get_command.go b/etcdctl/ctlv3/command/over_get_command.go new file mode 100644 index 00000000000..fa1d3917cd9 --- /dev/null +++ b/etcdctl/ctlv3/command/over_get_command.go @@ -0,0 +1,181 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + "strings" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var ( + getConsistency string + getLimit int64 + getSortOrder string + getSortTarget string + getPrefix bool + getFromKey bool + getRev int64 + getKeysOnly bool + getCountOnly bool + printValueOnly bool +) + +func NewGetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "get [options] [range_end]", + Short: "获取键或键的范围", + Run: getCommandFunc, + } + + cmd.Flags().StringVar(&getConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)") + cmd.Flags().StringVar(&getSortOrder, "order", "", "对结果排序; ASCEND or DESCEND (ASCEND by default)") + cmd.Flags().StringVar(&getSortTarget, "sort-by", "", "使用那个字段排序; CREATE, KEY, MODIFY, VALUE, or VERSION") + cmd.Flags().Int64Var(&getLimit, "limit", 0, "结果的最大数量") + cmd.Flags().BoolVar(&getPrefix, "prefix", false, "返回前缀匹配的keys") + cmd.Flags().BoolVar(&getFromKey, "from-key", false, "使用byte compare获取 >= 给定键的键") + cmd.Flags().Int64Var(&getRev, "rev", 0, "指定修订版本") + cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "只获取keys") + cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "只获取匹配的数量") + cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `仅在使用“simple"输出格式时写入值`) + return cmd +} + +func getCommandFunc(cmd *cobra.Command, args []string) { + key, opts := getGetOp(args) + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).Get(ctx, key, opts...) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + if getCountOnly { + if _, fields := display.(*fieldsPrinter); !fields { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`")) + } + } + + if printValueOnly { + dp, simple := (display).(*simplePrinter) + if !simple { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("print-value-only is only for `--write-out=simple`")) + } + dp.valueOnly = true + } + display.Get(*resp) +} + +func getGetOp(args []string) (string, []clientv3.OpOption) { + if len(args) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("get command needs one argument as key and an optional argument as range_end")) + } + + if getPrefix && getFromKey { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) + } + + if getKeysOnly && getCountOnly { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one")) + } + + var opts []clientv3.OpOption + fmt.Println("getConsistency", getConsistency) + switch getConsistency { + case "s": + opts = append(opts, clientv3.WithSerializable()) + case "l": + // 默认就是串行化读 + default: + cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("未知的 consistency 标志 %q", getConsistency)) + } + + key := args[0] + if len(args) > 1 { + if getPrefix || getFromKey { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set")) + } + opts = append(opts, clientv3.WithRange(args[1])) + } + + opts = append(opts, clientv3.WithLimit(getLimit)) + if getRev > 0 { + opts = append(opts, clientv3.WithRev(getRev)) + } + + sortByOrder := clientv3.SortNone + sortOrder := strings.ToUpper(getSortOrder) + switch { + case sortOrder == "ASCEND": + sortByOrder = clientv3.SortAscend + case sortOrder == "DESCEND": + sortByOrder = clientv3.SortDescend + case sortOrder == "": + // nothing + default: + cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort order %v", getSortOrder)) + } + + sortByTarget := clientv3.SortByKey + sortTarget := strings.ToUpper(getSortTarget) + switch { + case sortTarget == "CREATE": + sortByTarget = clientv3.SortByCreateRevision + case sortTarget == "KEY": + sortByTarget = clientv3.SortByKey + case sortTarget == "MODIFY": + sortByTarget = clientv3.SortByModRevision + case sortTarget == "VALUE": + sortByTarget = clientv3.SortByValue + case sortTarget == "VERSION": + sortByTarget = clientv3.SortByVersion + case sortTarget == "": + // nothing + default: + cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort target %v", getSortTarget)) + } + + opts = append(opts, clientv3.WithSort(sortByTarget, sortByOrder)) + + if getPrefix { + if len(key) == 0 { + key = "\x00" + opts = append(opts, clientv3.WithFromKey()) + } else { + opts = append(opts, clientv3.WithPrefix()) + } + } + + if getFromKey { + if len(key) == 0 { + key = "\x00" + } + opts = append(opts, clientv3.WithFromKey()) + } + + if getKeysOnly { + opts = append(opts, clientv3.WithKeysOnly()) + } + + if getCountOnly { + opts = append(opts, clientv3.WithCountOnly()) + } + + return key, opts +} diff --git a/etcdctl/ctlv3/command/over_lease_command.go b/etcdctl/ctlv3/command/over_lease_command.go new file mode 100644 index 00000000000..26fad9361e0 --- /dev/null +++ b/etcdctl/ctlv3/command/over_lease_command.go @@ -0,0 +1,205 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "fmt" + "strconv" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + + "github.com/spf13/cobra" +) + +// NewLeaseCommand returns the cobra command for "lease". +func NewLeaseCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "lease ", + Short: "租约相关命令", + } + + lc.AddCommand(NewLeaseGrantCommand()) + lc.AddCommand(NewLeaseRevokeCommand()) + lc.AddCommand(NewLeaseTimeToLiveCommand()) + lc.AddCommand(NewLeaseListCommand()) + lc.AddCommand(NewLeaseKeepAliveCommand()) + + return lc +} + +// NewLeaseGrantCommand returns the cobra command for "lease grant". +func NewLeaseGrantCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "grant ", + Short: "创建租约", + + Run: leaseGrantCommandFunc, + } + + return lc +} + +func leaseGrantCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease grant命令需要TTL参数")) + } + + ttl, err := strconv.ParseInt(args[0], 10, 64) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("错误的ttl (%v)", err)) + } + + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("创建租约失败 (%v)", err)) + } + display.Grant(*resp) +} + +// NewLeaseRevokeCommand returns the cobra command for "lease revoke". +func NewLeaseRevokeCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "revoke ", + Short: "移除租约", + + Run: leaseRevokeCommandFunc, + } + + return lc +} + +// leaseRevokeCommandFunc executes the "lease grant" command. +func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument")) + } + + id := leaseFromArgs(args[0]) + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).Revoke(ctx, id) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%v)", err)) + } + display.Revoke(id, *resp) +} + +var timeToLiveKeys bool + +// NewLeaseTimeToLiveCommand returns the cobra command for "lease timetolive". +func NewLeaseTimeToLiveCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "timetolive [options]", + Short: "获取租约信息", + + Run: leaseTimeToLiveCommandFunc, + } + lc.Flags().BoolVar(&timeToLiveKeys, "keys", false, "获取租约附加到了哪些key上") + + return lc +} + +// leaseTimeToLiveCommandFunc executes the "lease timetolive" command. +func leaseTimeToLiveCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease timetolive command needs lease ID as argument")) + } + var opts []v3.LeaseOption + if timeToLiveKeys { + opts = append(opts, v3.WithAttachedKeys()) + } + resp, rerr := mustClientFromCmd(cmd).TimeToLive(context.TODO(), leaseFromArgs(args[0]), opts...) + if rerr != nil { + cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr) + } + display.TimeToLive(*resp, timeToLiveKeys) +} + +// NewLeaseListCommand returns the cobra command for "lease list". +func NewLeaseListCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "list", + Short: "显示所有租约", + Run: leaseListCommandFunc, + } + return lc +} + +// leaseListCommandFunc executes the "lease list" command. +func leaseListCommandFunc(cmd *cobra.Command, args []string) { + resp, rerr := mustClientFromCmd(cmd).Leases(context.TODO()) + if rerr != nil { + cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr) + } + display.Leases(*resp) +} + +var leaseKeepAliveOnce bool + +// NewLeaseKeepAliveCommand returns the cobra command for "lease keep-alive". +func NewLeaseKeepAliveCommand() *cobra.Command { + lc := &cobra.Command{ + Use: "keep-alive [options] ", + Short: "重续租约 [renew]", + + Run: leaseKeepAliveCommandFunc, + } + + lc.Flags().BoolVar(&leaseKeepAliveOnce, "once", false, "Resets the keep-alive time to its original value and cobrautl.Exits immediately") + + return lc +} + +// leaseKeepAliveCommandFunc executes the "lease keep-alive" command. +func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease keep-alive命令需要lease ID作为参数")) + } + + id := leaseFromArgs(args[0]) + + if leaseKeepAliveOnce { + respc, kerr := mustClientFromCmd(cmd).KeepAliveOnce(context.TODO(), id) + if kerr != nil { + cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr) + } + display.KeepAlive(*respc) + return + } + + respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), id) + if kerr != nil { + cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr) + } + for resp := range respc { + display.KeepAlive(*resp) + } + + if _, ok := (display).(*simplePrinter); ok { + fmt.Printf("租约 %016x 过期或移除.\n", id) + } +} + +func leaseFromArgs(arg string) v3.LeaseID { + id, err := strconv.ParseInt(arg, 16, 64) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err)) + } + return v3.LeaseID(id) +} diff --git a/etcdctl/ctlv3/command/over_put_command.go b/etcdctl/ctlv3/command/over_put_command.go new file mode 100644 index 00000000000..be259de5fb7 --- /dev/null +++ b/etcdctl/ctlv3/command/over_put_command.go @@ -0,0 +1,101 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + "os" + "strconv" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var ( + leaseStr string + putPrevKV bool + putIgnoreVal bool + putIgnoreLease bool +) + +// NewPutCommand returns the cobra command for "put". +func NewPutCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "put", + Short: "将给定的键放入存储中", + Long: `将给定的键放入存储中`, + Run: putCommandFunc, + } + cmd.Flags().StringVar(&leaseStr, "lease", "0", "将租约附加到key (in hexadecimal) ") + cmd.Flags().BoolVar(&putPrevKV, "prev-kv", false, "返回键值对之前的版本") + cmd.Flags().BoolVar(&putIgnoreVal, "ignore-value", false, "更新当前的值") + cmd.Flags().BoolVar(&putIgnoreLease, "ignore-lease", false, "更新租约") + return cmd +} + +func putCommandFunc(cmd *cobra.Command, args []string) { + key, value, opts := getPutOp(args) + + ctx, cancel := commandCtx(cmd) + resp, err := mustClientFromCmd(cmd).Put(ctx, key, value, opts...) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.Put(*resp) +} + +func getPutOp(args []string) (string, string, []clientv3.OpOption) { + if len(args) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments")) + } + + key := args[0] + if putIgnoreVal && len(args) > 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs only 1 argument when 'ignore-value' is set")) + } + + var value string + var err error + if !putIgnoreVal { + value, err = argOrStdin(args, os.Stdin, 1) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments")) + } + } + + id, err := strconv.ParseInt(leaseStr, 16, 64) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err)) + } + + var opts []clientv3.OpOption + if id != 0 { + opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id))) + } + if putPrevKV { + opts = append(opts, clientv3.WithPrevKV()) + } + if putIgnoreVal { + opts = append(opts, clientv3.WithIgnoreValue()) + } + if putIgnoreLease { + opts = append(opts, clientv3.WithIgnoreLease()) + } + + return key, value, opts +} diff --git a/etcdctl/ctlv3/command/over_role_command.go b/etcdctl/ctlv3/command/over_role_command.go new file mode 100644 index 00000000000..0b908822ce4 --- /dev/null +++ b/etcdctl/ctlv3/command/over_role_command.go @@ -0,0 +1,241 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "fmt" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var ( + rolePermPrefix bool + rolePermFromKey bool +) + +// NewRoleCommand returns the cobra command for "role". +func NewRoleCommand() *cobra.Command { + ac := &cobra.Command{ + Use: "role ", + Short: "Role related commands", + } + + ac.AddCommand(newRoleAddCommand()) + ac.AddCommand(newRoleDeleteCommand()) + ac.AddCommand(newRoleGetCommand()) + ac.AddCommand(newRoleListCommand()) + ac.AddCommand(newRoleGrantPermissionCommand()) + ac.AddCommand(newRoleRevokePermissionCommand()) + + return ac +} + +func newRoleAddCommand() *cobra.Command { + return &cobra.Command{ + Use: "add ", + Short: "添加一个角色", + Run: roleAddCommandFunc, + } +} + +func newRoleDeleteCommand() *cobra.Command { + return &cobra.Command{ + Use: "delete ", + Short: "删除一个角色", + Run: roleDeleteCommandFunc, + } +} + +func newRoleGetCommand() *cobra.Command { + return &cobra.Command{ + Use: "get ", + Short: "获取一个角色的详细信息", + Run: roleGetCommandFunc, + } +} + +func newRoleListCommand() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "显示所有角色", + Run: roleListCommandFunc, + } +} + +func newRoleGrantPermissionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "grant-permission [options] [endkey]", + Short: "给角色授予一个权限", + Run: roleGrantPermissionCommandFunc, + } + + cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "授予前缀权限") + cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "使用byte compare授予大于或等于给定键的权限") + + return cmd +} + +func newRoleRevokePermissionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "revoke-permission [endkey]", + Short: "移除角色权限里的一个key", + Run: roleRevokePermissionCommandFunc, + } + + cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "取消前缀权限") + cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "使用byte compare撤销大于或等于给定键的权限") + + return cmd +} + +func roleAddCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role add命令需要角色名作为参数")) + } + + resp, err := mustClientFromCmd(cmd).Auth.RoleAdd(context.TODO(), args[0]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.RoleAdd(args[0], *resp) +} + +func roleDeleteCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role delete command requires role name as its argument")) + } + + resp, err := mustClientFromCmd(cmd).Auth.RoleDelete(context.TODO(), args[0]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.RoleDelete(args[0], *resp) +} + +func roleGetCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role get命令需要角色名作为参数")) + } + + name := args[0] + resp, err := mustClientFromCmd(cmd).Auth.RoleGet(context.TODO(), name) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.RoleGet(name, *resp) +} + +// roleListCommandFunc executes the "role list" command. +func roleListCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role list command requires no arguments")) + } + + resp, err := mustClientFromCmd(cmd).Auth.RoleList(context.TODO()) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.RoleList(*resp) +} + +func roleGrantPermissionCommandFunc(cmd *cobra.Command, args []string) { + if len(args) < 3 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role grant命令需要角色名、权限类型和关键字[endkey]作为参数")) + } + + perm, err := clientv3.StrToPermissionType(args[1]) // read write readwrite + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + } + + key, rangeEnd := permRange(args[2:]) + resp, err := mustClientFromCmd(cmd).Auth.RoleGrantPermission(context.TODO(), args[0], key, rangeEnd, perm) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.RoleGrantPermission(args[0], *resp) +} + +func roleRevokePermissionCommandFunc(cmd *cobra.Command, args []string) { + if len(args) < 2 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role revoke-permission命令需要角色名和关键字[endkey]作为参数")) + } + + key, rangeEnd := permRange(args[1:]) + resp, err := mustClientFromCmd(cmd).Auth.RoleRevokePermission(context.TODO(), args[0], key, rangeEnd) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.RoleRevokePermission(args[0], args[1], rangeEnd, *resp) +} + +func permRange(args []string) (string, string) { + key := args[0] + var rangeEnd string + if len(key) == 0 { + if rolePermPrefix && rolePermFromKey { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--from-key and --prefix flags 是互相排斥的 ")) + } + + // Range permission is expressed as adt.BytesAffineInterval, + // so the empty prefix which should be matched with every key must be like this ["\x00", ). + key = "\x00" + if rolePermPrefix || rolePermFromKey { + // For the both cases of prefix and from-key, a permission with an empty key + // should allow access to the entire key space. + // 0x00 will be treated as open ended in etcd side. + rangeEnd = "\x00" + } + } else { + var err error + rangeEnd, err = rangeEndFromPermFlags(args[0:]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + } + } + return key, rangeEnd +} + +func rangeEndFromPermFlags(args []string) (string, error) { + if len(args) == 1 { + if rolePermPrefix { + if rolePermFromKey { + return "", fmt.Errorf("--from-key and --prefix flags are mutually exclusive") + } + return clientv3.GetPrefixRangeEnd(args[0]), nil + } + if rolePermFromKey { + return "\x00", nil + } + // single key case + return "", nil + } + if rolePermPrefix { + return "", fmt.Errorf("unexpected endkey argument with --prefix flag") + } + if rolePermFromKey { + return "", fmt.Errorf("unexpected endkey argument with --from-key flag") + } + return args[1], nil +} diff --git a/etcdctl/ctlv3/command/over_user_command.go b/etcdctl/ctlv3/command/over_user_command.go new file mode 100644 index 00000000000..23a06bdfa91 --- /dev/null +++ b/etcdctl/ctlv3/command/over_user_command.go @@ -0,0 +1,298 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "context" + "fmt" + "strings" + + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/bgentry/speakeasy" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" + "github.com/spf13/cobra" +) + +var userShowDetail bool + +// NewUserCommand returns the cobra command for "user". +func NewUserCommand() *cobra.Command { + ac := &cobra.Command{ + Use: "user ", + Short: "User related commands", + } + + ac.AddCommand(newUserAddCommand()) + ac.AddCommand(newUserDeleteCommand()) + ac.AddCommand(newUserGetCommand()) + ac.AddCommand(newUserListCommand()) + ac.AddCommand(newUserChangePasswordCommand()) + ac.AddCommand(newUserGrantRoleCommand()) + ac.AddCommand(newUserRevokeRoleCommand()) + + return ac +} + +var ( + passwordInteractive bool + passwordFromFlag string + noPassword bool +) + +func newUserAddCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "add [options]", + Short: "添加新用户", + Run: userAddCommandFunc, + } + + cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "从stdin读取密码,而不是交互终端") + cmd.Flags().StringVar(&passwordFromFlag, "new-user-password", "", "从命令行标志提供密码") + cmd.Flags().BoolVar(&noPassword, "no-password", false, "创建一个没有密码的用户(仅基于CN的身份验证)") + + return &cmd +} + +func newUserDeleteCommand() *cobra.Command { + return &cobra.Command{ + Use: "delete ", + Short: "删除一个用户", + Run: userDeleteCommandFunc, + } +} + +func newUserGetCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "get [options]", + Short: "获取用户详情", + Run: userGetCommandFunc, + } + + cmd.Flags().BoolVar(&userShowDetail, "detail", false, "显示授予用户的角色的权限") + + return &cmd +} + +func newUserListCommand() *cobra.Command { + return &cobra.Command{ + Use: "list", + Short: "显示所有用户", + Run: userListCommandFunc, + } +} + +func newUserChangePasswordCommand() *cobra.Command { + cmd := cobra.Command{ + Use: "passwd [options]", + Short: "更改用户密码", + Run: userChangePasswordCommandFunc, + } + + cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "如果为true,从stdin读取密码,而不是交互终端") + + return &cmd +} + +func newUserGrantRoleCommand() *cobra.Command { + return &cobra.Command{ + Use: "grant-role ", + Short: "授予用户权限", + Run: userGrantRoleCommandFunc, + } +} + +func newUserRevokeRoleCommand() *cobra.Command { + return &cobra.Command{ + Use: "revoke-role ", + Short: "移除用户权限", + Run: userRevokeRoleCommandFunc, + } +} + +func userAddCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户add命令需要用户名作为参数")) + } + + var password string + var user string + + options := &clientv3.UserAddOptions{ + NoPassword: false, + } + + if !noPassword { // 创建一个没有密码的用户(仅基于CN的身份验证) + if passwordFromFlag != "" { + user = args[0] + password = passwordFromFlag + } else { + splitted := strings.SplitN(args[0], ":", 2) + if len(splitted) < 2 { + user = args[0] + if !passwordInteractive { + fmt.Scanf("%s", &password) + } else { + password = readPasswordInteractive(args[0]) + } + } else { + user = splitted[0] + password = splitted[1] + if len(user) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户名不允许为空")) + } + } + } + } else { + user = args[0] + options.NoPassword = true + } + + resp, err := mustClientFromCmd(cmd).Auth.UserAddWithOptions(context.TODO(), user, password, options) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.UserAdd(user, *resp) +} + +// userDeleteCommandFunc executes the "user delete" command. +func userDeleteCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户删除命令需要用户名作为参数")) + } + + resp, err := mustClientFromCmd(cmd).Auth.UserDelete(context.TODO(), args[0]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.UserDelete(args[0], *resp) +} + +// userGetCommandFunc executes the "user get" command. +func userGetCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户get命令需要用户名作为参数")) + } + + name := args[0] + client := mustClientFromCmd(cmd) + resp, err := client.Auth.UserGet(context.TODO(), name) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + if userShowDetail { + fmt.Printf("User: %s\n", name) + for _, role := range resp.Roles { + fmt.Printf("\n") + roleResp, err := client.Auth.RoleGet(context.TODO(), role) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + display.RoleGet(role, *roleResp) + } + } else { + display.UserGet(name, *resp) + } +} + +// userListCommandFunc executes the "user list" command. +func userListCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user list命令不需要参数")) + } + + resp, err := mustClientFromCmd(cmd).Auth.UserList(context.TODO()) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.UserList(*resp) +} + +// userChangePasswordCommandFunc executes the "user passwd" command. +func userChangePasswordCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户passwd命令需要用户名作为参数")) + } + + var password string + + if !passwordInteractive { + fmt.Scanf("%s", &password) + } else { + password = readPasswordInteractive(args[0]) + } + + resp, err := mustClientFromCmd(cmd).Auth.UserChangePassword(context.TODO(), args[0], password) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.UserChangePassword(*resp) +} + +// userGrantRoleCommandFunc executes the "user grant-role" command. +func userGrantRoleCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 2 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user grant命令需要用户名和角色名作为参数")) + } + + resp, err := mustClientFromCmd(cmd).Auth.UserGrantRole(context.TODO(), args[0], args[1]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.UserGrantRole(args[0], args[1], *resp) +} + +// userRevokeRoleCommandFunc executes the "user revoke-role" command. +func userRevokeRoleCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 2 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户revoke-role需要用户名和角色名作为参数")) + } + + resp, err := mustClientFromCmd(cmd).Auth.UserRevokeRole(context.TODO(), args[0], args[1]) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.UserRevokeRole(args[0], args[1], *resp) +} + +func readPasswordInteractive(name string) string { + prompt1 := fmt.Sprintf("%s密码: ", name) + password1, err1 := speakeasy.Ask(prompt1) + if err1 != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("确认密码失败: %s", err1)) + } + + if len(password1) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("空密码")) + } + + prompt2 := fmt.Sprintf("再次输入密码确认%s:", name) + password2, err2 := speakeasy.Ask(prompt2) + if err2 != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("确认密码失败 %s", err2)) + } + + if password1 != password2 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("提供的密码不一致")) + } + + return password1 +} diff --git a/etcdctl/ctlv3/command/over_version_command.go b/etcdctl/ctlv3/command/over_version_command.go new file mode 100644 index 00000000000..1c8b28b6eed --- /dev/null +++ b/etcdctl/ctlv3/command/over_version_command.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + + "github.com/spf13/cobra" +) + +// NewVersionCommand prints out the version of etcd. +func NewVersionCommand() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "打印编译时的版本", + Run: versionCommandFunc, + } +} + +func versionCommandFunc(cmd *cobra.Command, args []string) { + fmt.Println("etcdctl version:", version.Version) + fmt.Println("API version:", version.APIVersion) +} diff --git a/etcdctl/ctlv3/command/printer.go b/etcdctl/ctlv3/command/printer.go index 7cc1b887b48..096c25bd456 100644 --- a/etcdctl/ctlv3/command/printer.go +++ b/etcdctl/ctlv3/command/printer.go @@ -19,11 +19,11 @@ import ( "fmt" "strings" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "github.com/dustin/go-humanize" + + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" ) type printer interface { @@ -32,37 +32,27 @@ type printer interface { Put(v3.PutResponse) Txn(v3.TxnResponse) Watch(v3.WatchResponse) - Grant(r v3.LeaseGrantResponse) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) KeepAlive(r v3.LeaseKeepAliveResponse) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) Leases(r v3.LeaseLeasesResponse) - MemberAdd(v3.MemberAddResponse) MemberRemove(id uint64, r v3.MemberRemoveResponse) MemberUpdate(id uint64, r v3.MemberUpdateResponse) MemberPromote(id uint64, r v3.MemberPromoteResponse) MemberList(v3.MemberListResponse) - EndpointHealth([]epHealth) EndpointStatus([]epStatus) EndpointHashKV([]epHashKV) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) - - DowngradeValidate(r v3.DowngradeResponse) - DowngradeEnable(r v3.DowngradeResponse) - DowngradeCancel(r v3.DowngradeResponse) - Alarm(v3.AlarmResponse) - RoleAdd(role string, r v3.AuthRoleAddResponse) RoleGet(role string, r v3.AuthRoleGetResponse) RoleDelete(role string, r v3.AuthRoleDeleteResponse) RoleList(v3.AuthRoleListResponse) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) - UserAdd(user string, r v3.AuthUserAddResponse) UserGet(user string, r v3.AuthUserGetResponse) UserList(r v3.AuthUserListResponse) @@ -70,7 +60,6 @@ type printer interface { UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) UserDelete(user string, r v3.AuthUserDeleteResponse) - AuthStatus(r v3.AuthStatusResponse) } @@ -79,7 +68,7 @@ func NewPrinter(printerType string, isHex bool) printer { case "simple": return &simplePrinter{isHex: isHex} case "fields": - return &fieldsPrinter{printer: newPrinterUnsupported("fields"), isHex: isHex} + return &fieldsPrinter{newPrinterUnsupported("fields")} case "json": return newJSONPrinter(isHex) case "protobuf": @@ -111,22 +100,20 @@ func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddRespo func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) { p.p((*pb.MemberRemoveResponse)(&r)) } + func (p *printerRPC) MemberUpdate(id uint64, r v3.MemberUpdateResponse) { p.p((*pb.MemberUpdateResponse)(&r)) } -func (p *printerRPC) MemberPromote(id uint64, r v3.MemberPromoteResponse) { - p.p((*pb.MemberPromoteResponse)(&r)) -} func (p *printerRPC) MemberList(r v3.MemberListResponse) { p.p((*pb.MemberListResponse)(&r)) } func (p *printerRPC) Alarm(r v3.AlarmResponse) { p.p((*pb.AlarmResponse)(&r)) } func (p *printerRPC) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p((*pb.MoveLeaderResponse)(&r)) } -func (p *printerRPC) DowngradeValidate(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } -func (p *printerRPC) DowngradeEnable(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } -func (p *printerRPC) DowngradeCancel(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } func (p *printerRPC) RoleAdd(_ string, r v3.AuthRoleAddResponse) { p.p((*pb.AuthRoleAddResponse)(&r)) } -func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { p.p((*pb.AuthRoleGetResponse)(&r)) } +func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { + p.p((*pb.AuthRoleGetResponse)(&r)) +} + func (p *printerRPC) RoleDelete(_ string, r v3.AuthRoleDeleteResponse) { p.p((*pb.AuthRoleDeleteResponse)(&r)) } @@ -134,6 +121,7 @@ func (p *printerRPC) RoleList(r v3.AuthRoleListResponse) { p.p((*pb.AuthRoleList func (p *printerRPC) RoleGrantPermission(_ string, r v3.AuthRoleGrantPermissionResponse) { p.p((*pb.AuthRoleGrantPermissionResponse)(&r)) } + func (p *printerRPC) RoleRevokePermission(_ string, _ string, _ string, r v3.AuthRoleRevokePermissionResponse) { p.p((*pb.AuthRoleRevokePermissionResponse)(&r)) } @@ -143,15 +131,19 @@ func (p *printerRPC) UserList(r v3.AuthUserListResponse) { p.p((*pb.Auth func (p *printerRPC) UserChangePassword(r v3.AuthUserChangePasswordResponse) { p.p((*pb.AuthUserChangePasswordResponse)(&r)) } + func (p *printerRPC) UserGrantRole(_ string, _ string, r v3.AuthUserGrantRoleResponse) { p.p((*pb.AuthUserGrantRoleResponse)(&r)) } + func (p *printerRPC) UserRevokeRole(_ string, _ string, r v3.AuthUserRevokeRoleResponse) { p.p((*pb.AuthUserRevokeRoleResponse)(&r)) } + func (p *printerRPC) UserDelete(_ string, r v3.AuthUserDeleteResponse) { p.p((*pb.AuthUserDeleteResponse)(&r)) } + func (p *printerRPC) AuthStatus(r v3.AuthStatusResponse) { p.p((*pb.AuthStatusResponse)(&r)) } @@ -170,9 +162,6 @@ func (p *printerUnsupported) EndpointStatus([]epStatus) { p.p(nil) } func (p *printerUnsupported) EndpointHashKV([]epHashKV) { p.p(nil) } func (p *printerUnsupported) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p(nil) } -func (p *printerUnsupported) DowngradeValidate(r v3.DowngradeResponse) { p.p(nil) } -func (p *printerUnsupported) DowngradeEnable(r v3.DowngradeResponse) { p.p(nil) } -func (p *printerUnsupported) DowngradeCancel(r v3.DowngradeResponse) { p.p(nil) } func makeMemberListTable(r v3.MemberListResponse) (hdr []string, rows [][]string) { hdr = []string{"ID", "Status", "Name", "Peer Addrs", "Client Addrs", "Is Learner"} @@ -211,16 +200,16 @@ func makeEndpointHealthTable(healthList []epHealth) (hdr []string, rows [][]stri } func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]string) { - hdr = []string{"endpoint", "ID", "version", "storage version", "db size", "db size in use", "is leader", "is learner", "raft term", - "raft index", "raft applied index", "errors"} + hdr = []string{ + "endpoint", "ID", "version", "db size", "is leader", "is learner", "raft term", + "raft index", "raft applied index", "errors", + } for _, status := range statusList { rows = append(rows, []string{ status.Ep, fmt.Sprintf("%x", status.Resp.Header.MemberId), status.Resp.Version, - status.Resp.StorageVersion, humanize.Bytes(uint64(status.Resp.DbSize)), - humanize.Bytes(uint64(status.Resp.DbSizeInUse)), fmt.Sprint(status.Resp.Leader == status.Resp.Header.MemberId), fmt.Sprint(status.Resp.IsLearner), fmt.Sprint(status.Resp.RaftTerm), @@ -233,12 +222,11 @@ func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]stri } func makeEndpointHashKVTable(hashList []epHashKV) (hdr []string, rows [][]string) { - hdr = []string{"endpoint", "hash", "hash_revision"} + hdr = []string{"endpoint", "hash"} for _, h := range hashList { rows = append(rows, []string{ h.Ep, fmt.Sprint(h.Resp.Hash), - fmt.Sprint(h.Resp.HashRevision), }) } return hdr, rows diff --git a/etcdctl/ctlv3/command/printer_fields.go b/etcdctl/ctlv3/command/printer_fields.go index 5e7d9258425..095ebaa740e 100644 --- a/etcdctl/ctlv3/command/printer_fields.go +++ b/etcdctl/ctlv3/command/printer_fields.go @@ -17,16 +17,12 @@ package command import ( "fmt" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - spb "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/pkg/v3/types" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + spb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" ) -type fieldsPrinter struct { - printer - isHex bool -} +type fieldsPrinter struct{ printer } func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) { fmt.Printf("\"%sKey\" : %q\n", pfx, string(kv.Key)) @@ -34,27 +30,13 @@ func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) { fmt.Printf("\"%sModRevision\" : %d\n", pfx, kv.ModRevision) fmt.Printf("\"%sVersion\" : %d\n", pfx, kv.Version) fmt.Printf("\"%sValue\" : %q\n", pfx, string(kv.Value)) - if p.isHex { - fmt.Printf("\"%sLease\" : %016x\n", pfx, kv.Lease) - } else { - fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease) - } + fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease) } func (p *fieldsPrinter) hdr(h *pb.ResponseHeader) { - if p.isHex { - fmt.Println(`"ClusterID" :`, types.ID(h.ClusterId)) - fmt.Println(`"MemberID" :`, types.ID(h.MemberId)) - } else { - fmt.Println(`"ClusterID" :`, h.ClusterId) - fmt.Println(`"MemberID" :`, h.MemberId) - } - // Revision only makes sense for k/v responses. For other kinds of - // responses, i.e. MemberList, usually the revision isn't populated - // at all; so it would be better to hide this field in these cases. - if h.Revision > 0 { - fmt.Println(`"Revision" :`, h.Revision) - } + fmt.Println(`"ClusterID" :`, h.ClusterId) + fmt.Println(`"MemberID" :`, h.MemberId) + fmt.Println(`"Revision" :`, h.Revision) fmt.Println(`"RaftTerm" :`, h.RaftTerm) } @@ -85,16 +67,18 @@ func (p *fieldsPrinter) Put(r v3.PutResponse) { func (p *fieldsPrinter) Txn(r v3.TxnResponse) { p.hdr(r.Header) fmt.Println(`"Succeeded" :`, r.Succeeded) - for _, resp := range r.Responses { - switch v := resp.Response.(type) { - case *pb.ResponseOp_ResponseDeleteRange: + for _, r := range r.Responses { + if r.ResponseOp_ResponseDeleteRange != nil { + v := r.ResponseOp_ResponseDeleteRange p.Del((v3.DeleteResponse)(*v.ResponseDeleteRange)) - case *pb.ResponseOp_ResponsePut: + } else if r.ResponseOp_ResponsePut != nil { + v := r.ResponseOp_ResponsePut p.Put((v3.PutResponse)(*v.ResponsePut)) - case *pb.ResponseOp_ResponseRange: + } else if r.ResponseOp_ResponseRange != nil { + v := r.ResponseOp_ResponseRange p.Get((v3.GetResponse)(*v.ResponseRange)) - default: - fmt.Printf("\"Unknown\" : %q\n", fmt.Sprintf("%+v", v)) + } else { + fmt.Printf("unexpected response %+v\n", r) } } } @@ -112,11 +96,7 @@ func (p *fieldsPrinter) Watch(resp v3.WatchResponse) { func (p *fieldsPrinter) Grant(r v3.LeaseGrantResponse) { p.hdr(r.ResponseHeader) - if p.isHex { - fmt.Printf("\"ID\" : %016x\n", r.ID) - } else { - fmt.Println(`"ID" :`, r.ID) - } + fmt.Println(`"ID" :`, r.ID) fmt.Println(`"TTL" :`, r.TTL) } @@ -126,21 +106,13 @@ func (p *fieldsPrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) { func (p *fieldsPrinter) KeepAlive(r v3.LeaseKeepAliveResponse) { p.hdr(r.ResponseHeader) - if p.isHex { - fmt.Printf("\"ID\" : %016x\n", r.ID) - } else { - fmt.Println(`"ID" :`, r.ID) - } + fmt.Println(`"ID" :`, r.ID) fmt.Println(`"TTL" :`, r.TTL) } func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) { p.hdr(r.ResponseHeader) - if p.isHex { - fmt.Printf("\"ID\" : %016x\n", r.ID) - } else { - fmt.Println(`"ID" :`, r.ID) - } + fmt.Println(`"ID" :`, r.ID) fmt.Println(`"TTL" :`, r.TTL) fmt.Println(`"GrantedTTL" :`, r.GrantedTTL) for _, k := range r.Keys { @@ -151,22 +123,14 @@ func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) { func (p *fieldsPrinter) Leases(r v3.LeaseLeasesResponse) { p.hdr(r.ResponseHeader) for _, item := range r.Leases { - if p.isHex { - fmt.Printf("\"ID\" : %016x\n", item.ID) - } else { - fmt.Println(`"ID" :`, item.ID) - } + fmt.Println(`"ID" :`, item.ID) } } func (p *fieldsPrinter) MemberList(r v3.MemberListResponse) { p.hdr(r.Header) for _, m := range r.Members { - if p.isHex { - fmt.Println(`"ID" :`, types.ID(m.ID)) - } else { - fmt.Println(`"ID" :`, m.ID) - } + fmt.Println(`"ID" :`, m.ID) fmt.Printf("\"Name\" : %q\n", m.Name) for _, u := range m.PeerURLs { fmt.Printf("\"PeerURL\" : %q\n", u) @@ -193,9 +157,7 @@ func (p *fieldsPrinter) EndpointStatus(eps []epStatus) { for _, ep := range eps { p.hdr(ep.Resp.Header) fmt.Printf("\"Version\" : %q\n", ep.Resp.Version) - fmt.Printf("\"StorageVersion\" : %q\n", ep.Resp.StorageVersion) fmt.Println(`"DBSize" :`, ep.Resp.DbSize) - fmt.Println(`"DBSizeInUse" :`, ep.Resp.DbSizeInUse) fmt.Println(`"Leader" :`, ep.Resp.Leader) fmt.Println(`"IsLearner" :`, ep.Resp.IsLearner) fmt.Println(`"RaftIndex" :`, ep.Resp.RaftIndex) @@ -212,7 +174,6 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) { p.hdr(h.Resp.Header) fmt.Printf("\"Endpoint\" : %q\n", h.Ep) fmt.Println(`"Hash" :`, h.Resp.Hash) - fmt.Println(`"HashRevision" :`, h.Resp.HashRevision) fmt.Println() } } @@ -220,11 +181,7 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) { func (p *fieldsPrinter) Alarm(r v3.AlarmResponse) { p.hdr(r.Header) for _, a := range r.Alarms { - if p.isHex { - fmt.Println(`"MemberID" :`, types.ID(a.MemberID)) - } else { - fmt.Println(`"MemberID" :`, a.MemberID) - } + fmt.Println(`"MemberID" :`, a.MemberID) fmt.Println(`"AlarmType" :`, a.Alarm) fmt.Println() } @@ -242,15 +199,17 @@ func (p *fieldsPrinter) RoleGet(role string, r v3.AuthRoleGetResponse) { func (p *fieldsPrinter) RoleDelete(role string, r v3.AuthRoleDeleteResponse) { p.hdr(r.Header) } func (p *fieldsPrinter) RoleList(r v3.AuthRoleListResponse) { p.hdr(r.Header) - fmt.Print(`"Roles" :`) + fmt.Printf(`"Roles" :`) for _, r := range r.Roles { fmt.Printf(" %q", r) } fmt.Println() } + func (p *fieldsPrinter) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) { p.hdr(r.Header) } + func (p *fieldsPrinter) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) { p.hdr(r.Header) } @@ -259,6 +218,7 @@ func (p *fieldsPrinter) UserChangePassword(r v3.AuthUserChangePasswordResponse) func (p *fieldsPrinter) UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) { p.hdr(r.Header) } + func (p *fieldsPrinter) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) { p.hdr(r.Header) } diff --git a/etcdctl/ctlv3/command/printer_json.go b/etcdctl/ctlv3/command/printer_json.go index c97fc69876a..896435549a3 100644 --- a/etcdctl/ctlv3/command/printer_json.go +++ b/etcdctl/ctlv3/command/printer_json.go @@ -21,7 +21,7 @@ import ( "os" "strconv" - clientv3 "go.etcd.io/etcd/client/v3" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" ) type jsonPrinter struct { @@ -37,7 +37,9 @@ func newJSONPrinter(isHex bool) printer { } func (p *jsonPrinter) EndpointHealth(r []epHealth) { printJSON(r) } -func (p *jsonPrinter) EndpointStatus(r []epStatus) { printJSON(r) } +func (p *jsonPrinter) EndpointStatus(r []epStatus) { + printJSON(r) +} func (p *jsonPrinter) EndpointHashKV(r []epHashKV) { printJSON(r) } func (p *jsonPrinter) MemberList(r clientv3.MemberListResponse) { @@ -67,7 +69,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) { b = strconv.AppendUint(nil, r.Header.MemberId, 16) buffer.Write(b) buffer.WriteString("\",\"raft_term\":") - b = strconv.AppendUint(nil, r.Header.RaftTerm, 10) + b = strconv.AppendUint(nil, r.Header.RaftTerm, 16) buffer.Write(b) buffer.WriteByte('}') for i := 0; i < len(r.Members); i++ { @@ -84,7 +86,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) { return } buffer.Write(b) - buffer.WriteString(",\"clientURLs\":") + buffer.WriteString(",\"clientURLS\":") b, err = json.Marshal(r.Members[i].ClientURLs) if err != nil { return @@ -97,5 +99,4 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) { } buffer.WriteString("}") fmt.Println(buffer.String()) - } diff --git a/etcdctl/ctlv3/command/printer_protobuf.go b/etcdctl/ctlv3/command/printer_protobuf.go index da1da9f3441..25fa0ea186f 100644 --- a/etcdctl/ctlv3/command/printer_protobuf.go +++ b/etcdctl/ctlv3/command/printer_protobuf.go @@ -18,10 +18,10 @@ import ( "fmt" "os" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - mvccpb "go.etcd.io/etcd/api/v3/mvccpb" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + mvccpb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" ) type pbPrinter struct{ printer } diff --git a/etcdctl/ctlv3/command/printer_simple.go b/etcdctl/ctlv3/command/printer_simple.go index 80f3bc9b92a..c992807c6ae 100644 --- a/etcdctl/ctlv3/command/printer_simple.go +++ b/etcdctl/ctlv3/command/printer_simple.go @@ -19,13 +19,10 @@ import ( "os" "strings" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - v3 "go.etcd.io/etcd/client/v3" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" ) -const rootRole = "root" - type simplePrinter struct { isHex bool valueOnly bool @@ -60,14 +57,16 @@ func (s *simplePrinter) Txn(resp v3.TxnResponse) { for _, r := range resp.Responses { fmt.Println("") - switch v := r.Response.(type) { - case *pb.ResponseOp_ResponseDeleteRange: + if r.ResponseOp_ResponseDeleteRange != nil { + v := r.ResponseOp_ResponseDeleteRange s.Del((v3.DeleteResponse)(*v.ResponseDeleteRange)) - case *pb.ResponseOp_ResponsePut: + } else if r.ResponseOp_ResponsePut != nil { + v := r.ResponseOp_ResponsePut s.Put((v3.PutResponse)(*v.ResponsePut)) - case *pb.ResponseOp_ResponseRange: - s.Get(((v3.GetResponse)(*v.ResponseRange))) - default: + } else if r.ResponseOp_ResponseRange != nil { + v := r.ResponseOp_ResponseRange + s.Get((v3.GetResponse)(*v.ResponseRange)) + } else { fmt.Printf("unexpected response %+v\n", r) } } @@ -109,7 +108,7 @@ func (s *simplePrinter) TimeToLive(resp v3.LeaseTimeToLiveResponse, keys bool) { } txt += fmt.Sprintf(", attached keys(%v)", ks) } - fmt.Println(txt) + fmt.Println("TimeToLive--->", txt) } func (s *simplePrinter) Leases(resp v3.LeaseLeasesResponse) { @@ -126,11 +125,7 @@ func (s *simplePrinter) Alarm(resp v3.AlarmResponse) { } func (s *simplePrinter) MemberAdd(r v3.MemberAddResponse) { - asLearner := " " - if r.Member.IsLearner { - asLearner = " as learner " - } - fmt.Printf("Member %16x added%sto cluster %16x\n", r.Member.ID, asLearner, r.Header.ClusterId) + fmt.Printf("Member %16x added to cluster %16x\n", r.Member.ID, r.Header.ClusterId) } func (s *simplePrinter) MemberRemove(id uint64, r v3.MemberRemoveResponse) { @@ -155,9 +150,9 @@ func (s *simplePrinter) MemberList(resp v3.MemberListResponse) { func (s *simplePrinter) EndpointHealth(hs []epHealth) { for _, h := range hs { if h.Error == "" { - fmt.Printf("%s is healthy: successfully committed proposal: took = %v\n", h.Ep, h.Took) + fmt.Printf("%s 健康:propose 成功: took = %v\n", h.Ep, h.Took) } else { - fmt.Fprintf(os.Stderr, "%s is unhealthy: failed to commit proposal: %v\n", h.Ep, h.Error) + fmt.Fprintf(os.Stderr, "%s 不健康:propose 失败: %v\n", h.Ep, h.Error) } } } @@ -180,60 +175,42 @@ func (s *simplePrinter) MoveLeader(leader, target uint64, r v3.MoveLeaderRespons fmt.Printf("Leadership transferred from %s to %s\n", types.ID(leader), types.ID(target)) } -func (s *simplePrinter) DowngradeValidate(r v3.DowngradeResponse) { - fmt.Printf("Downgrade validate success, cluster version %s\n", r.Version) -} -func (s *simplePrinter) DowngradeEnable(r v3.DowngradeResponse) { - fmt.Printf("Downgrade enable success, cluster version %s\n", r.Version) -} -func (s *simplePrinter) DowngradeCancel(r v3.DowngradeResponse) { - fmt.Printf("Downgrade cancel success, cluster version %s\n", r.Version) -} - func (s *simplePrinter) RoleAdd(role string, r v3.AuthRoleAddResponse) { - fmt.Printf("Role %s created\n", role) + fmt.Printf("角色 %s 已创建\n", role) } func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) { fmt.Printf("Role %s\n", role) - if rootRole == role && r.Perm == nil { - fmt.Println("KV Read:") - fmt.Println("\t[, ") - fmt.Println("KV Write:") - fmt.Println("\t[, ") - return - } - - fmt.Println("KV Read:") + fmt.Println("---->KV Read:") printRange := func(perm *v3.Permission) { - sKey := string(perm.Key) - sRangeEnd := string(perm.RangeEnd) + sKey := perm.Key + sRangeEnd := perm.RangeEnd if sRangeEnd != "\x00" { fmt.Printf("\t[%s, %s)", sKey, sRangeEnd) } else { fmt.Printf("\t[%s, ", sKey) } - if v3.GetPrefixRangeEnd(sKey) == sRangeEnd && len(sKey) > 0 { + if v3.GetPrefixRangeEnd(sKey) == sRangeEnd { fmt.Printf(" (prefix %s)", sKey) } - fmt.Print("\n") + fmt.Printf("\n") } for _, perm := range r.Perm { if perm.PermType == v3.PermRead || perm.PermType == v3.PermReadWrite { if len(perm.RangeEnd) == 0 { - fmt.Printf("\t%s\n", string(perm.Key)) + fmt.Printf("\t%s\n", perm.Key) } else { printRange((*v3.Permission)(perm)) } } } - fmt.Println("KV Write:") + fmt.Println("---->KV Write:") for _, perm := range r.Perm { if perm.PermType == v3.PermWrite || perm.PermType == v3.PermReadWrite { if len(perm.RangeEnd) == 0 { - fmt.Printf("\t%s\n", string(perm.Key)) + fmt.Printf("\t%s\n", perm.Key) } else { printRange((*v3.Permission)(perm)) } @@ -248,11 +225,11 @@ func (s *simplePrinter) RoleList(r v3.AuthRoleListResponse) { } func (s *simplePrinter) RoleDelete(role string, r v3.AuthRoleDeleteResponse) { - fmt.Printf("Role %s deleted\n", role) + fmt.Printf("角色 %s 删除了\n", role) } func (s *simplePrinter) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) { - fmt.Printf("Role %s updated\n", role) + fmt.Printf("角色 %s 已更新\n", role) } func (s *simplePrinter) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) { @@ -273,27 +250,27 @@ func (s *simplePrinter) UserAdd(name string, r v3.AuthUserAddResponse) { func (s *simplePrinter) UserGet(name string, r v3.AuthUserGetResponse) { fmt.Printf("User: %s\n", name) - fmt.Print("Roles:") + fmt.Printf("Roles:") for _, role := range r.Roles { fmt.Printf(" %s", role) } - fmt.Print("\n") + fmt.Printf("\n") } func (s *simplePrinter) UserChangePassword(v3.AuthUserChangePasswordResponse) { - fmt.Println("Password updated") + fmt.Println("密码已更新") } func (s *simplePrinter) UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) { - fmt.Printf("Role %s is granted to user %s\n", role, user) + fmt.Printf("角色 %s 授予了用户 %s\n", role, user) } func (s *simplePrinter) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) { - fmt.Printf("Role %s is revoked from user %s\n", role, user) + fmt.Printf("用户%s移除了角色 %s \n", user, role) } func (s *simplePrinter) UserDelete(user string, r v3.AuthUserDeleteResponse) { - fmt.Printf("User %s deleted\n", user) + fmt.Printf("用户 %s 已删除\n", user) } func (s *simplePrinter) UserList(r v3.AuthUserListResponse) { @@ -303,6 +280,6 @@ func (s *simplePrinter) UserList(r v3.AuthUserListResponse) { } func (s *simplePrinter) AuthStatus(r v3.AuthStatusResponse) { - fmt.Println("Authentication Status:", r.Enabled) - fmt.Println("AuthRevision:", r.AuthRevision) + fmt.Println("身份认证是否开启:", r.Enabled) + fmt.Println("验证版本:", r.AuthRevision) } diff --git a/etcdctl/ctlv3/command/printer_table.go b/etcdctl/ctlv3/command/printer_table.go index 2bc6cfcf603..80fee9f3d79 100644 --- a/etcdctl/ctlv3/command/printer_table.go +++ b/etcdctl/ctlv3/command/printer_table.go @@ -17,7 +17,7 @@ package command import ( "os" - v3 "go.etcd.io/etcd/client/v3" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" "github.com/olekukonko/tablewriter" ) @@ -34,6 +34,7 @@ func (tp *tablePrinter) MemberList(r v3.MemberListResponse) { table.SetAlignment(tablewriter.ALIGN_RIGHT) table.Render() } + func (tp *tablePrinter) EndpointHealth(r []epHealth) { hdr, rows := makeEndpointHealthTable(r) table := tablewriter.NewWriter(os.Stdout) @@ -44,6 +45,7 @@ func (tp *tablePrinter) EndpointHealth(r []epHealth) { table.SetAlignment(tablewriter.ALIGN_RIGHT) table.Render() } + func (tp *tablePrinter) EndpointStatus(r []epStatus) { hdr, rows := makeEndpointStatusTable(r) table := tablewriter.NewWriter(os.Stdout) @@ -54,6 +56,7 @@ func (tp *tablePrinter) EndpointStatus(r []epStatus) { table.SetAlignment(tablewriter.ALIGN_RIGHT) table.Render() } + func (tp *tablePrinter) EndpointHashKV(r []epHashKV) { hdr, rows := makeEndpointHashKVTable(r) table := tablewriter.NewWriter(os.Stdout) diff --git a/etcdctl/ctlv3/command/put_command.go b/etcdctl/ctlv3/command/put_command.go deleted file mode 100644 index 1d207bb8388..00000000000 --- a/etcdctl/ctlv3/command/put_command.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - "os" - "strconv" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var ( - leaseStr string - putPrevKV bool - putIgnoreVal bool - putIgnoreLease bool -) - -// NewPutCommand returns the cobra command for "put". -func NewPutCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "put [options] ( can also be given from stdin)", - Short: "Puts the given key into the store", - Long: ` -Puts the given key into the store. - -When begins with '-', is interpreted as a flag. -Insert '--' for workaround: - -$ put -- -$ put -- - -If isn't given as a command line argument and '--ignore-value' is not specified, -this command tries to read the value from standard input. - -If isn't given as a command line argument and '--ignore-lease' is not specified, -this command tries to read the value from standard input. - -For example, -$ cat file | put -will store the content of the file to . -`, - Run: putCommandFunc, - } - cmd.Flags().StringVar(&leaseStr, "lease", "0", "lease ID (in hexadecimal) to attach to the key") - cmd.Flags().BoolVar(&putPrevKV, "prev-kv", false, "return the previous key-value pair before modification") - cmd.Flags().BoolVar(&putIgnoreVal, "ignore-value", false, "updates the key using its current value") - cmd.Flags().BoolVar(&putIgnoreLease, "ignore-lease", false, "updates the key using its current lease") - return cmd -} - -// putCommandFunc executes the "put" command. -func putCommandFunc(cmd *cobra.Command, args []string) { - key, value, opts := getPutOp(args) - - ctx, cancel := commandCtx(cmd) - resp, err := mustClientFromCmd(cmd).Put(ctx, key, value, opts...) - cancel() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.Put(*resp) -} - -func getPutOp(args []string) (string, string, []clientv3.OpOption) { - if len(args) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments")) - } - - key := args[0] - if putIgnoreVal && len(args) > 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs only 1 argument when 'ignore-value' is set")) - } - - var value string - var err error - if !putIgnoreVal { - value, err = argOrStdin(args, os.Stdin, 1) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments")) - } - } - - id, err := strconv.ParseInt(leaseStr, 16, 64) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err)) - } - - var opts []clientv3.OpOption - if id != 0 { - opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id))) - } - if putPrevKV { - opts = append(opts, clientv3.WithPrevKV()) - } - if putIgnoreVal { - opts = append(opts, clientv3.WithIgnoreValue()) - } - if putIgnoreLease { - opts = append(opts, clientv3.WithIgnoreLease()) - } - - return key, value, opts -} diff --git a/etcdctl/ctlv3/command/role_command.go b/etcdctl/ctlv3/command/role_command.go deleted file mode 100644 index 705d88d886f..00000000000 --- a/etcdctl/ctlv3/command/role_command.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "context" - "fmt" - - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var ( - rolePermPrefix bool - rolePermFromKey bool -) - -// NewRoleCommand returns the cobra command for "role". -func NewRoleCommand() *cobra.Command { - ac := &cobra.Command{ - Use: "role ", - Short: "Role related commands", - } - - ac.AddCommand(newRoleAddCommand()) - ac.AddCommand(newRoleDeleteCommand()) - ac.AddCommand(newRoleGetCommand()) - ac.AddCommand(newRoleListCommand()) - ac.AddCommand(newRoleGrantPermissionCommand()) - ac.AddCommand(newRoleRevokePermissionCommand()) - - return ac -} - -func newRoleAddCommand() *cobra.Command { - return &cobra.Command{ - Use: "add ", - Short: "Adds a new role", - Run: roleAddCommandFunc, - } -} - -func newRoleDeleteCommand() *cobra.Command { - return &cobra.Command{ - Use: "delete ", - Short: "Deletes a role", - Run: roleDeleteCommandFunc, - } -} - -func newRoleGetCommand() *cobra.Command { - return &cobra.Command{ - Use: "get ", - Short: "Gets detailed information of a role", - Run: roleGetCommandFunc, - } -} - -func newRoleListCommand() *cobra.Command { - return &cobra.Command{ - Use: "list", - Short: "Lists all roles", - Run: roleListCommandFunc, - } -} - -func newRoleGrantPermissionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "grant-permission [options] [endkey]", - Short: "Grants a key to a role", - Run: roleGrantPermissionCommandFunc, - } - - cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "grant a prefix permission") - cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "grant a permission of keys that are greater than or equal to the given key using byte compare") - - return cmd -} - -func newRoleRevokePermissionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "revoke-permission [endkey]", - Short: "Revokes a key from a role", - Run: roleRevokePermissionCommandFunc, - } - - cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "revoke a prefix permission") - cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "revoke a permission of keys that are greater than or equal to the given key using byte compare") - - return cmd -} - -// roleAddCommandFunc executes the "role add" command. -func roleAddCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role add command requires role name as its argument")) - } - - resp, err := mustClientFromCmd(cmd).Auth.RoleAdd(context.TODO(), args[0]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.RoleAdd(args[0], *resp) -} - -// roleDeleteCommandFunc executes the "role delete" command. -func roleDeleteCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role delete command requires role name as its argument")) - } - - resp, err := mustClientFromCmd(cmd).Auth.RoleDelete(context.TODO(), args[0]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.RoleDelete(args[0], *resp) -} - -// roleGetCommandFunc executes the "role get" command. -func roleGetCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role get command requires role name as its argument")) - } - - name := args[0] - resp, err := mustClientFromCmd(cmd).Auth.RoleGet(context.TODO(), name) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.RoleGet(name, *resp) -} - -// roleListCommandFunc executes the "role list" command. -func roleListCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role list command requires no arguments")) - } - - resp, err := mustClientFromCmd(cmd).Auth.RoleList(context.TODO()) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.RoleList(*resp) -} - -// roleGrantPermissionCommandFunc executes the "role grant-permission" command. -func roleGrantPermissionCommandFunc(cmd *cobra.Command, args []string) { - if len(args) < 3 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role grant command requires role name, permission type, and key [endkey] as its argument")) - } - - perm, err := clientv3.StrToPermissionType(args[1]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) - } - - key, rangeEnd := permRange(args[2:]) - resp, err := mustClientFromCmd(cmd).Auth.RoleGrantPermission(context.TODO(), args[0], key, rangeEnd, perm) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.RoleGrantPermission(args[0], *resp) -} - -// roleRevokePermissionCommandFunc executes the "role revoke-permission" command. -func roleRevokePermissionCommandFunc(cmd *cobra.Command, args []string) { - if len(args) < 2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role revoke-permission command requires role name and key [endkey] as its argument")) - } - - key, rangeEnd := permRange(args[1:]) - resp, err := mustClientFromCmd(cmd).Auth.RoleRevokePermission(context.TODO(), args[0], key, rangeEnd) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.RoleRevokePermission(args[0], args[1], rangeEnd, *resp) -} - -func permRange(args []string) (string, string) { - key := args[0] - var rangeEnd string - if len(key) == 0 { - if rolePermPrefix && rolePermFromKey { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--from-key and --prefix flags are mutually exclusive")) - } - - // Range permission is expressed as adt.BytesAffineInterval, - // so the empty prefix which should be matched with every key must be like this ["\x00", ). - key = "\x00" - if rolePermPrefix || rolePermFromKey { - // For the both cases of prefix and from-key, a permission with an empty key - // should allow access to the entire key space. - // 0x00 will be treated as open ended in server side. - rangeEnd = "\x00" - } - } else { - var err error - rangeEnd, err = rangeEndFromPermFlags(args[0:]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) - } - } - return key, rangeEnd -} - -func rangeEndFromPermFlags(args []string) (string, error) { - if len(args) == 1 { - if rolePermPrefix { - if rolePermFromKey { - return "", fmt.Errorf("--from-key and --prefix flags are mutually exclusive") - } - return clientv3.GetPrefixRangeEnd(args[0]), nil - } - if rolePermFromKey { - return "\x00", nil - } - // single key case - return "", nil - } - if rolePermPrefix { - return "", fmt.Errorf("unexpected endkey argument with --prefix flag") - } - if rolePermFromKey { - return "", fmt.Errorf("unexpected endkey argument with --from-key flag") - } - return args[1], nil -} diff --git a/etcdctl/ctlv3/command/snapshot_command.go b/etcdctl/ctlv3/command/snapshot_command.go index df317e23cc7..5d9cc98d26e 100644 --- a/etcdctl/ctlv3/command/snapshot_command.go +++ b/etcdctl/ctlv3/command/snapshot_command.go @@ -17,13 +17,28 @@ package command import ( "context" "fmt" + "os" + snapshot "github.com/ls-2018/etcd_cn/client_sdk/v3/snapshot" + "github.com/ls-2018/etcd_cn/etcdutl/etcdutl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" "go.uber.org/zap" +) + +const ( + defaultName = "default" + defaultInitialAdvertisePeerURLs = "http://localhost:2380" +) - "go.etcd.io/etcd/client/pkg/v3/logutil" - snapshot "go.etcd.io/etcd/client/v3/snapshot" - "go.etcd.io/etcd/pkg/v3/cobrautl" +var ( + restoreCluster string + restoreClusterToken string + restoreDataDir string + restoreWalDir string + restorePeerURLs string + restoreName string + skipHashCheck bool ) // NewSnapshotCommand returns the cobra command for "snapshot". @@ -33,24 +48,56 @@ func NewSnapshotCommand() *cobra.Command { Short: "Manages etcd node snapshots", } cmd.AddCommand(NewSnapshotSaveCommand()) + cmd.AddCommand(NewSnapshotRestoreCommand()) + cmd.AddCommand(newSnapshotStatusCommand()) return cmd } func NewSnapshotSaveCommand() *cobra.Command { return &cobra.Command{ Use: "save ", - Short: "Stores an etcd node backend snapshot to a given file", + Short: "将etcd节点后端快照存储到给定的文件", Run: snapshotSaveCommandFunc, } } +func newSnapshotStatusCommand() *cobra.Command { + return &cobra.Command{ + Use: "status ", + Short: "[deprecated] 从给定的文件获取快照状态", + Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint. +The items in the lists are hash, revision, total keys, total size. + +Moved to 'etcdctl snapshot status ...' +`, + Run: snapshotStatusCommandFunc, + } +} + +func NewSnapshotRestoreCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "restore [options]", + Short: "将etcd成员快照恢复到etcd目录", + Run: snapshotRestoreCommandFunc, + } + cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "数据目录") + cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "wal目录 (use --data-dir if none given)") + cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "初始集群配置") + cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "在恢复引导过程中etcd集群的初始群集令牌") + cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "要通告给集群其他部分的该成员的对等url列表") + cmd.Flags().StringVar(&restoreName, "name", defaultName, "此成员的人类可读的名称") + cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "忽略快照完整性哈希值(从数据目录复制时需要)") + + return cmd +} + func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 1 { err := fmt.Errorf("snapshot save expects one argument") cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) } - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) + lg, err := zap.NewProduction() if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } @@ -64,12 +111,26 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) { defer cancel() path := args[0] - version, err := snapshot.SaveWithVersion(ctx, lg, *cfg, path) - if err != nil { + if err := snapshot.Save(ctx, lg, *cfg, path); err != nil { cobrautl.ExitWithError(cobrautl.ExitInterrupted, err) } fmt.Printf("Snapshot saved at %s\n", path) - if version != "" { - fmt.Printf("Server version %s\n", version) +} + +func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) { + fmt.Fprintf(os.Stderr, "Deprecated: Use `etcdutl snapshot status` instead.\n\n") + etcdutl.SnapshotStatusCommandFunc(cmd, args) +} + +func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) { + fmt.Fprintf(os.Stderr, "弃用: 使用 `etcdutl snapshot restore` \n\n") + etcdutl.SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir, restorePeerURLs, restoreName, skipHashCheck, args) +} + +func initialClusterFromName(name string) string { + n := name + if name == "" { + n = defaultName } + return fmt.Sprintf("%s=http://localhost:2380", n) } diff --git a/etcdctl/ctlv3/command/txn_command.go b/etcdctl/ctlv3/command/txn_command.go index b05a7ea6614..2483ec6e9cb 100644 --- a/etcdctl/ctlv3/command/txn_command.go +++ b/etcdctl/ctlv3/command/txn_command.go @@ -22,9 +22,10 @@ import ( "strconv" "strings" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) @@ -34,8 +35,8 @@ var txnInteractive bool // NewTxnCommand returns the cobra command for "txn". func NewTxnCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "txn [options]", - Short: "Txn processes all the requests in one transaction", + Use: "txn [options] ", + Short: "在一个事务里处理所有请求 c(\"a\") = \"22222\"", Run: txnCommandFunc, } cmd.Flags().BoolVarP(&txnInteractive, "interactive", "i", false, "Input transaction in interactive mode") @@ -68,7 +69,7 @@ func txnCommandFunc(cmd *cobra.Command, args []string) { func promptInteractive(s string) { if txnInteractive { - fmt.Println(s) + fmt.Println("promptInteractive--->", s) } } @@ -85,7 +86,7 @@ func readCompares(r *bufio.Reader) (cmps []clientv3.Cmp) { break } - cmp, err := ParseCompare(line) + cmp, err := parseCompare(line) if err != nil { cobrautl.ExitWithError(cobrautl.ExitInvalidInput, err) } @@ -119,7 +120,7 @@ func readOps(r *bufio.Reader) (ops []clientv3.Op) { } func parseRequestUnion(line string) (*clientv3.Op, error) { - args := Argify(line) + args := argify(line) if len(args) < 2 { return nil, fmt.Errorf("invalid txn compare request: %s", line) } @@ -153,7 +154,7 @@ func parseRequestUnion(line string) (*clientv3.Op, error) { return &op, nil } -func ParseCompare(line string) (*clientv3.Cmp, error) { +func parseCompare(line string) (*clientv3.Cmp, error) { var ( key string op string @@ -164,7 +165,7 @@ func ParseCompare(line string) (*clientv3.Cmp, error) { if len(lparenSplit) != 2 { return nil, fmt.Errorf("malformed comparison: %s", line) } - + // c("a") = "22222" target := lparenSplit[0] n, serr := fmt.Sscanf(lparenSplit[1], "%q) %s %q", &key, &op, &val) if n != 3 { diff --git a/etcdctl/ctlv3/command/user_command.go b/etcdctl/ctlv3/command/user_command.go deleted file mode 100644 index ee3f55a2a47..00000000000 --- a/etcdctl/ctlv3/command/user_command.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "context" - "fmt" - "strings" - - "github.com/bgentry/speakeasy" - "github.com/spf13/cobra" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" -) - -var ( - userShowDetail bool -) - -// NewUserCommand returns the cobra command for "user". -func NewUserCommand() *cobra.Command { - ac := &cobra.Command{ - Use: "user ", - Short: "User related commands", - } - - ac.AddCommand(newUserAddCommand()) - ac.AddCommand(newUserDeleteCommand()) - ac.AddCommand(newUserGetCommand()) - ac.AddCommand(newUserListCommand()) - ac.AddCommand(newUserChangePasswordCommand()) - ac.AddCommand(newUserGrantRoleCommand()) - ac.AddCommand(newUserRevokeRoleCommand()) - - return ac -} - -var ( - passwordInteractive bool - passwordFromFlag string - noPassword bool -) - -func newUserAddCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "add [options]", - Short: "Adds a new user", - Run: userAddCommandFunc, - } - - cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "Read password from stdin instead of interactive terminal") - cmd.Flags().StringVar(&passwordFromFlag, "new-user-password", "", "Supply password from the command line flag") - cmd.Flags().BoolVar(&noPassword, "no-password", false, "Create a user without password (CN based auth only)") - - return &cmd -} - -func newUserDeleteCommand() *cobra.Command { - return &cobra.Command{ - Use: "delete ", - Short: "Deletes a user", - Run: userDeleteCommandFunc, - } -} - -func newUserGetCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "get [options]", - Short: "Gets detailed information of a user", - Run: userGetCommandFunc, - } - - cmd.Flags().BoolVar(&userShowDetail, "detail", false, "Show permissions of roles granted to the user") - - return &cmd -} - -func newUserListCommand() *cobra.Command { - return &cobra.Command{ - Use: "list", - Short: "Lists all users", - Run: userListCommandFunc, - } -} - -func newUserChangePasswordCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "passwd [options]", - Short: "Changes password of user", - Run: userChangePasswordCommandFunc, - } - - cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "If true, read password from stdin instead of interactive terminal") - - return &cmd -} - -func newUserGrantRoleCommand() *cobra.Command { - return &cobra.Command{ - Use: "grant-role ", - Short: "Grants a role to a user", - Run: userGrantRoleCommandFunc, - } -} - -func newUserRevokeRoleCommand() *cobra.Command { - return &cobra.Command{ - Use: "revoke-role ", - Short: "Revokes a role from a user", - Run: userRevokeRoleCommandFunc, - } -} - -// userAddCommandFunc executes the "user add" command. -func userAddCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user add command requires user name as its argument")) - } - - var password string - var user string - - options := &clientv3.UserAddOptions{ - NoPassword: false, - } - - if !noPassword { - if passwordFromFlag != "" { - user = args[0] - password = passwordFromFlag - } else { - splitted := strings.SplitN(args[0], ":", 2) - if len(splitted) < 2 { - user = args[0] - if !passwordInteractive { - fmt.Scanf("%s", &password) - } else { - password = readPasswordInteractive(args[0]) - } - } else { - user = splitted[0] - password = splitted[1] - if len(user) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("empty user name is not allowed")) - } - } - } - } else { - user = args[0] - options.NoPassword = true - } - - resp, err := mustClientFromCmd(cmd).Auth.UserAddWithOptions(context.TODO(), user, password, options) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.UserAdd(user, *resp) -} - -// userDeleteCommandFunc executes the "user delete" command. -func userDeleteCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user delete command requires user name as its argument")) - } - - resp, err := mustClientFromCmd(cmd).Auth.UserDelete(context.TODO(), args[0]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.UserDelete(args[0], *resp) -} - -// userGetCommandFunc executes the "user get" command. -func userGetCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user get command requires user name as its argument")) - } - - name := args[0] - client := mustClientFromCmd(cmd) - resp, err := client.Auth.UserGet(context.TODO(), name) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - if userShowDetail { - fmt.Printf("User: %s\n", name) - for _, role := range resp.Roles { - fmt.Print("\n") - roleResp, err := client.Auth.RoleGet(context.TODO(), role) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - display.RoleGet(role, *roleResp) - } - } else { - display.UserGet(name, *resp) - } -} - -// userListCommandFunc executes the "user list" command. -func userListCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user list command requires no arguments")) - } - - resp, err := mustClientFromCmd(cmd).Auth.UserList(context.TODO()) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.UserList(*resp) -} - -// userChangePasswordCommandFunc executes the "user passwd" command. -func userChangePasswordCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user passwd command requires user name as its argument")) - } - - var password string - - if !passwordInteractive { - fmt.Scanf("%s", &password) - } else { - password = readPasswordInteractive(args[0]) - } - - resp, err := mustClientFromCmd(cmd).Auth.UserChangePassword(context.TODO(), args[0], password) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.UserChangePassword(*resp) -} - -// userGrantRoleCommandFunc executes the "user grant-role" command. -func userGrantRoleCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user grant command requires user name and role name as its argument")) - } - - resp, err := mustClientFromCmd(cmd).Auth.UserGrantRole(context.TODO(), args[0], args[1]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.UserGrantRole(args[0], args[1], *resp) -} - -// userRevokeRoleCommandFunc executes the "user revoke-role" command. -func userRevokeRoleCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user revoke-role requires user name and role name as its argument")) - } - - resp, err := mustClientFromCmd(cmd).Auth.UserRevokeRole(context.TODO(), args[0], args[1]) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - - display.UserRevokeRole(args[0], args[1], *resp) -} - -func readPasswordInteractive(name string) string { - prompt1 := fmt.Sprintf("Password of %s: ", name) - password1, err1 := speakeasy.Ask(prompt1) - if err1 != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err1)) - } - - if len(password1) == 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("empty password")) - } - - prompt2 := fmt.Sprintf("Type password of %s again for confirmation: ", name) - password2, err2 := speakeasy.Ask(prompt2) - if err2 != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err2)) - } - - if password1 != password2 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("given passwords are different")) - } - - return password1 -} diff --git a/etcdctl/ctlv3/command/util.go b/etcdctl/ctlv3/command/util.go index 8338ef33dcd..c24b9d11ec0 100644 --- a/etcdctl/ctlv3/command/util.go +++ b/etcdctl/ctlv3/command/util.go @@ -19,16 +19,16 @@ import ( "crypto/tls" "encoding/hex" "fmt" - "io" + "io/ioutil" "net/http" "regexp" "strconv" "strings" "time" - pb "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + pb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) @@ -36,13 +36,13 @@ import ( func printKV(isHex bool, valueOnly bool, kv *pb.KeyValue) { k, v := string(kv.Key), string(kv.Value) if isHex { - k = addHexPrefix(hex.EncodeToString(kv.Key)) - v = addHexPrefix(hex.EncodeToString(kv.Value)) + k = addHexPrefix(hex.EncodeToString([]byte(kv.Key))) + v = addHexPrefix(hex.EncodeToString([]byte(kv.Value))) } if !valueOnly { - fmt.Println(k) + fmt.Println("printKV--->", k) } - fmt.Println(v) + fmt.Println("printKV--->", v) } func addHexPrefix(s string) string { @@ -56,7 +56,7 @@ func addHexPrefix(s string) string { return string(ns) } -func Argify(s string) []string { +func argify(s string) []string { r := regexp.MustCompile(`"(?:[^"\\]|\\.)*"|'[^']*'|[^'"\s]\S*[^'"\s]?`) args := r.FindAllString(s, -1) for i := range args { @@ -76,14 +76,6 @@ func Argify(s string) []string { return args } -func commandCtx(cmd *cobra.Command) (context.Context, context.CancelFunc) { - timeOut, err := cmd.Flags().GetDuration("command-timeout") - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - return context.WithTimeout(context.Background(), timeOut) -} - func isCommandTimeoutFlagSet(cmd *cobra.Command) bool { commandTimeoutFlag := cmd.Flags().Lookup("command-timeout") if commandTimeoutFlag == nil { @@ -92,8 +84,8 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool { return commandTimeoutFlag.Changed } -// get the process_resident_memory_bytes from /metrics -func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 { +// get the process_resident_memory_bytes from /metrics +func endpointMemoryMetrics(host string, scfg *secureCfg) float64 { residentMemoryKey := "process_resident_memory_bytes" var residentMemoryValue string if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") { @@ -102,14 +94,14 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 { url := host + "/metrics" if strings.HasPrefix(host, "https://") { // load client certificate - cert, err := tls.LoadX509KeyPair(scfg.Cert, scfg.Key) + cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key) if err != nil { fmt.Println(fmt.Sprintf("client certificate error: %v", err)) return 0.0 } http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, - InsecureSkipVerify: scfg.InsecureSkipVerify, + InsecureSkipVerify: scfg.insecureSkipVerify, } } resp, err := http.Get(url) @@ -117,7 +109,7 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 { fmt.Println(fmt.Sprintf("fetch error: %v", err)) return 0.0 } - byts, readerr := io.ReadAll(resp.Body) + byts, readerr := ioutil.ReadAll(resp.Body) resp.Body.Close() if readerr != nil { fmt.Println(fmt.Sprintf("fetch error: reading %s: %v", url, readerr)) @@ -144,10 +136,10 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 { } // compact keyspace history to a provided revision -func compact(c *clientv3.Client, rev int64) { +func compact(c *v3.Client, rev int64) { fmt.Printf("Compacting with revision %d\n", rev) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - _, err := c.Compact(ctx, rev, clientv3.WithCompactPhysical()) + _, err := c.Compact(ctx, rev, v3.WithCompactPhysical()) cancel() if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) @@ -155,14 +147,22 @@ func compact(c *clientv3.Client, rev int64) { fmt.Printf("Compacted with revision %d\n", rev) } -// defrag a given endpoint -func defrag(c *clientv3.Client, ep string) { - fmt.Printf("Defragmenting %q\n", ep) +func defrag(c *v3.Client, ep string) { + fmt.Printf("开始内存碎片整理 %q\n", ep) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) _, err := c.Defragment(ctx, ep) cancel() if err != nil { cobrautl.ExitWithError(cobrautl.ExitError, err) } - fmt.Printf("Defragmented %q\n", ep) + fmt.Printf("内存碎片整理 %q\n", ep) +} + +// 超时上下文,默认5s +func commandCtx(cmd *cobra.Command) (context.Context, context.CancelFunc) { + timeOut, err := cmd.Flags().GetDuration("command-timeout") + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + return context.WithTimeout(context.Background(), timeOut) } diff --git a/etcdctl/ctlv3/command/version_command.go b/etcdctl/ctlv3/command/version_command.go deleted file mode 100644 index b65c299048b..00000000000 --- a/etcdctl/ctlv3/command/version_command.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "fmt" - - "go.etcd.io/etcd/api/v3/version" - - "github.com/spf13/cobra" -) - -// NewVersionCommand prints out the version of etcd. -func NewVersionCommand() *cobra.Command { - return &cobra.Command{ - Use: "version", - Short: "Prints the version of etcdctl", - Run: versionCommandFunc, - } -} - -func versionCommandFunc(cmd *cobra.Command, args []string) { - fmt.Println("etcdctl version:", version.Version) - fmt.Println("API version:", version.APIVersion) -} diff --git a/etcdctl/ctlv3/command/watch_command.go b/etcdctl/ctlv3/command/watch_command.go index d8592cb4e77..2c2b84ea23d 100644 --- a/etcdctl/ctlv3/command/watch_command.go +++ b/etcdctl/ctlv3/command/watch_command.go @@ -23,8 +23,9 @@ import ( "os/exec" "strings" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/cobrautl" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) @@ -33,7 +34,7 @@ var ( errBadArgsNum = errors.New("bad number of arguments") errBadArgsNumConflictEnv = errors.New("bad number of arguments (found conflicting environment key)") errBadArgsNumSeparator = errors.New("bad number of arguments (found separator --, but no commands)") - errBadArgsInteractiveWatch = errors.New("args[0] must be 'watch' for interactive calls") + errBadArgsInteractiveWatch = errors.New("args[0]必须是'watch' for interactive calls") ) var ( @@ -44,24 +45,21 @@ var ( progressNotify bool ) -// NewWatchCommand returns the cobra command for "watch". func NewWatchCommand() *cobra.Command { cmd := &cobra.Command{ Use: "watch [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]", - Short: "Watches events stream on keys or prefixes", + Short: "监听键或前缀上的事件流", Run: watchCommandFunc, } - cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "Interactive mode") - cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "Watch on a prefix if prefix is set") - cmd.Flags().Int64Var(&watchRev, "rev", 0, "Revision to start watching") - cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "get the previous key-value pair before the event happens") - cmd.Flags().BoolVar(&progressNotify, "progress-notify", false, "get periodic watch progress notification from server") - + cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "交互模式") + cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "是否监听前缀") + cmd.Flags().Int64Var(&watchRev, "rev", 0, "从那个修订版本开始监听") + cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "获取事件发生之前的键值对") + cmd.Flags().BoolVar(&progressNotify, "progress-notify", false, "从etcd获取定期的监听进度通知") return cmd } -// watchCommandFunc executes the "watch" command. func watchCommandFunc(cmd *cobra.Command, args []string) { envKey, envRange := os.Getenv("ETCDCTL_WATCH_KEY"), os.Getenv("ETCDCTL_WATCH_RANGE_END") if envKey == "" && envRange != "" { @@ -88,7 +86,7 @@ func watchCommandFunc(cmd *cobra.Command, args []string) { if err = c.Close(); err != nil { cobrautl.ExitWithError(cobrautl.ExitBadConnection, err) } - cobrautl.ExitWithError(cobrautl.ExitInterrupted, fmt.Errorf("watch is canceled by the server")) + cobrautl.ExitWithError(cobrautl.ExitInterrupted, fmt.Errorf("etcd取消了监听")) } func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange string) { @@ -103,7 +101,7 @@ func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange } l = strings.TrimSuffix(l, "\n") - args := Argify(l) + args := argify(l) if len(args) < 1 { fmt.Fprintf(os.Stderr, "Invalid command: %s (watch and progress supported)\n", l) continue @@ -166,10 +164,10 @@ func getWatchChan(c *clientv3.Client, args []string) (clientv3.WatchChan, error) func printWatchCh(c *clientv3.Client, ch clientv3.WatchChan, execArgs []string) { for resp := range ch { if resp.Canceled { - fmt.Fprintf(os.Stderr, "watch was canceled (%v)\n", resp.Err()) + fmt.Fprintf(os.Stderr, "监听取消了 (%v)\n", resp.Err()) } if resp.IsProgressNotify() { - fmt.Fprintf(os.Stdout, "progress notify: %d\n", resp.Header.Revision) + fmt.Fprintf(os.Stdout, "进程通知: %d\n", resp.Header.Revision) } display.Watch(resp) diff --git a/etcdctl/ctlv3/command/watch_command_test.go b/etcdctl/ctlv3/command/watch_command_test.go deleted file mode 100644 index 2292deadcbb..00000000000 --- a/etcdctl/ctlv3/command/watch_command_test.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package command - -import ( - "reflect" - "testing" -) - -func Test_parseWatchArgs(t *testing.T) { - tt := []struct { - osArgs []string // raw arguments to "watch" command - commandArgs []string // arguments after "spf13/cobra" preprocessing - envKey, envRange string - interactive bool - - interactiveWatchPrefix bool - interactiveWatchRev int64 - interactiveWatchPrevKey bool - - watchArgs []string - execArgs []string - err error - }{ - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"}, - commandArgs: []string{"foo", "bar"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--"}, - commandArgs: []string{"foo", "bar"}, - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumSeparator, - }, - { - osArgs: []string{"./bin/etcdctl", "watch"}, - commandArgs: nil, - envKey: "foo", - envRange: "bar", - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo"}, - commandArgs: []string{"foo"}, - envKey: "foo", - envRange: "", - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumConflictEnv, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"}, - commandArgs: []string{"foo", "bar"}, - envKey: "foo", - envRange: "", - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumConflictEnv, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"}, - commandArgs: []string{"foo", "bar"}, - envKey: "foo", - envRange: "bar", - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumConflictEnv, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo"}, - commandArgs: []string{"foo"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch"}, - commandArgs: nil, - envKey: "foo", - interactive: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo"}, - commandArgs: []string{"foo"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo"}, - commandArgs: []string{"foo"}, - envKey: "foo", - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumConflictEnv, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1"}, - commandArgs: nil, - envKey: "foo", - interactive: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1"}, - commandArgs: []string{"foo"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--", "echo", "watch", "event", "received"}, - commandArgs: []string{"foo", "echo", "watch", "event", "received"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "watch", "event", "received"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "--", "echo", "watch", "event", "received"}, - commandArgs: []string{"foo", "echo", "watch", "event", "received"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "watch", "event", "received"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "--", "echo", "watch", "event", "received"}, - commandArgs: []string{"foo", "echo", "watch", "event", "received"}, - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "watch", "event", "received"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "watch", "event", "received"}, - commandArgs: []string{"foo", "bar", "echo", "watch", "event", "received"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "watch", "event", "received"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "--", "echo", "Hello", "World"}, - commandArgs: []string{"echo", "Hello", "World"}, - envKey: "foo", - envRange: "", - interactive: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "--", "echo", "Hello", "World"}, - commandArgs: []string{"echo", "Hello", "World"}, - envKey: "foo", - envRange: "bar", - interactive: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"}, - commandArgs: []string{"foo", "bar", "echo", "Hello", "World"}, - envKey: "foo", - interactive: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNumConflictEnv, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"foo", "bar", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsInteractiveWatch, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "bar"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch"}, - envKey: "foo", - envRange: "bar", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch"}, - envKey: "hello world!", - envRange: "bar", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"hello world!", "bar"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "--rev", "1"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "--rev", "1", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1", "foo", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "5", "--prev-kv", "foo", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 5, - interactiveWatchPrevKey: true, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1"}, - envKey: "foo", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNum, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1", "--prefix"}, - envKey: "foo", - interactive: true, - interactiveWatchPrefix: true, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "100", "--prefix", "--prev-kv"}, - envKey: "foo", - interactive: true, - interactiveWatchPrefix: true, - interactiveWatchRev: 100, - interactiveWatchPrevKey: true, - watchArgs: []string{"foo"}, - execArgs: nil, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1", "--prefix"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: nil, - execArgs: nil, - err: errBadArgsNum, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--", "echo", "Hello", "World"}, - envKey: "foo", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--", "echo", "Hello", "World"}, - envKey: "foo", - envRange: "bar", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "bar", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 0, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "--rev", "1", "--", "echo", "Hello", "World"}, - envKey: "foo", - envRange: "bar", - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: false, - interactiveWatchRev: 1, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "bar", "--rev", "7", "--prefix", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: true, - interactiveWatchRev: 7, - interactiveWatchPrevKey: false, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - { - osArgs: []string{"./bin/etcdctl", "watch", "-i"}, - commandArgs: []string{"watch", "foo", "bar", "--rev", "7", "--prefix", "--prev-kv", "--", "echo", "Hello", "World"}, - interactive: true, - interactiveWatchPrefix: true, - interactiveWatchRev: 7, - interactiveWatchPrevKey: true, - watchArgs: []string{"foo", "bar"}, - execArgs: []string{"echo", "Hello", "World"}, - err: nil, - }, - } - for i, ts := range tt { - watchArgs, execArgs, err := parseWatchArgs(ts.osArgs, ts.commandArgs, ts.envKey, ts.envRange, ts.interactive) - if err != ts.err { - t.Fatalf("#%d: error expected %v, got %v", i, ts.err, err) - } - if !reflect.DeepEqual(watchArgs, ts.watchArgs) { - t.Fatalf("#%d: watchArgs expected %q, got %v", i, ts.watchArgs, watchArgs) - } - if !reflect.DeepEqual(execArgs, ts.execArgs) { - t.Fatalf("#%d: execArgs expected %q, got %v", i, ts.execArgs, execArgs) - } - if ts.interactive { - if ts.interactiveWatchPrefix != watchPrefix { - t.Fatalf("#%d: interactive watchPrefix expected %v, got %v", i, ts.interactiveWatchPrefix, watchPrefix) - } - if ts.interactiveWatchRev != watchRev { - t.Fatalf("#%d: interactive watchRev expected %d, got %d", i, ts.interactiveWatchRev, watchRev) - } - if ts.interactiveWatchPrevKey != watchPrevKey { - t.Fatalf("#%d: interactive watchPrevKey expected %v, got %v", i, ts.interactiveWatchPrevKey, watchPrevKey) - } - } - } -} diff --git a/etcdctl/ctlv3/ctl.go b/etcdctl/ctlv3/ctl.go index 6d3abfb3d94..a4761798d93 100644 --- a/etcdctl/ctlv3/ctl.go +++ b/etcdctl/ctlv3/ctl.go @@ -12,23 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package ctlv3 contains the main entry point for the etcdctl for v3 API. +// ctlv3 包含用于v3 API的etcdctl的主入口点. package ctlv3 import ( - "os" "time" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/etcdctl/v3/ctlv3/command" - "go.etcd.io/etcd/pkg/v3/cobrautl" + "github.com/ls-2018/etcd_cn/etcdctl/ctlv3/command" + "github.com/ls-2018/etcd_cn/offical/api/v3/version" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) const ( cliName = "etcdctl" - cliDescription = "A simple command line client for etcd3." + cliDescription = "etcd3的一个简单的命令行客户机." defaultDialTimeout = 2 * time.Second defaultCommandTimeOut = 5 * time.Second @@ -36,48 +35,41 @@ const ( defaultKeepAliveTimeOut = 6 * time.Second ) -var ( - globalFlags = command.GlobalFlags{} -) +var globalFlags = command.GlobalFlags{} -var ( - rootCmd = &cobra.Command{ - Use: cliName, - Short: cliDescription, - SuggestFor: []string{"etcdctl"}, - } -) +var rootCmd = &cobra.Command{ + Use: cliName, + Short: cliDescription, + SuggestFor: []string{"etcdctl"}, +} func init() { - rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints") - rootCmd.PersistentFlags().BoolVar(&globalFlags.Debug, "debug", false, "enable client-side debug logging") + rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC端点") + rootCmd.PersistentFlags().BoolVar(&globalFlags.Debug, "debug", false, "启用客户端调试日志记录") - rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)") - rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "print byte strings as hex encoded strings") - rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault - }) + rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "设置输出格式 (fields, json, protobuf, simple, table)") + rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "以十六进制编码的字符串输出字节串") - rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections") - rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "timeout for short running command (excluding dial timeout)") - rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTime, "keepalive-time", defaultKeepAliveTime, "keepalive time for client connections") - rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTimeout, "keepalive-timeout", defaultKeepAliveTimeOut, "keepalive timeout for client connections") + rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "拨号客户端连接超时") + rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "运行命令的超时(不包括拨号超时).") + rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTime, "keepalive-time", defaultKeepAliveTime, "客户端连接的存活时间") + rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTimeout, "keepalive-timeout", defaultKeepAliveTimeOut, "客户端连接的Keepalive超时") // TODO: secure by default when etcd enables secure gRPC by default. - rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "disable transport security for client connections") - rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureDiscovery, "insecure-discovery", true, "accept insecure SRV records describing cluster endpoints") - rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "skip server certificate verification (CAUTION: this option should be enabled only for testing purposes)") - rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.CertFile, "cert", "", "identify secure client using this TLS certificate file") - rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.KeyFile, "key", "", "identify secure client using this TLS key file") - rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.TrustedCAFile, "cacert", "", "verify certificates of TLS-enabled secure servers using this CA bundle") - rootCmd.PersistentFlags().StringVar(&globalFlags.User, "user", "", "username[:password] for authentication (prompt if password is not supplied)") - rootCmd.PersistentFlags().StringVar(&globalFlags.Password, "password", "", "password for authentication (if this option is used, --user option shouldn't include password)") - rootCmd.PersistentFlags().StringVarP(&globalFlags.TLS.ServerName, "discovery-srv", "d", "", "domain name to query for SRV records describing cluster endpoints") - rootCmd.PersistentFlags().StringVarP(&globalFlags.DNSClusterServiceName, "discovery-srv-name", "", "", "service name to query when using DNS discovery") + rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "为客户端连接禁用传输安全性") + rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureDiscovery, "insecure-discovery", true, "接受描述集群端点的不安全的SRV记录") + rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "跳过 etcd 证书验证 (注意:该选项仅用于测试目的.)") + rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.CertFile, "cert", "", "识别使用该TLS证书文件的安全客户端") + rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.KeyFile, "key", "", "识别使用该TLS密钥文件的安全客户端") + rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.TrustedCAFile, "cacert", "", "使用此CA包验证启用tls的安全服务器的证书") + rootCmd.PersistentFlags().StringVar(&globalFlags.User, "user", "", "username[:password] (如果没有提供密码,则提示)") + rootCmd.PersistentFlags().StringVar(&globalFlags.Password, "password", "", "身份验证的密码(如果使用了这个选项,——user选项不应该包含密码)") + rootCmd.PersistentFlags().StringVarP(&globalFlags.TLS.ServerName, "discovery-srv", "d", "", "查询描述集群端点的SRV记录的域名") + rootCmd.PersistentFlags().StringVarP(&globalFlags.DNSClusterServiceName, "discovery-srv-name", "", "", "使用DNS发现时需要查询的服务名称") rootCmd.AddCommand( command.NewGetCommand(), - command.NewPutCommand(), + command.NewPutCommand(), // ✅ command.NewDelCommand(), command.NewTxnCommand(), command.NewCompactionCommand(), @@ -97,8 +89,6 @@ func init() { command.NewUserCommand(), command.NewRoleCommand(), command.NewCheckCommand(), - command.NewCompletionCommand(), - command.NewDowngradeCommand(), ) } @@ -115,11 +105,7 @@ func Start() error { func MustStart() { if err := Start(); err != nil { - if rootCmd.SilenceErrors { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } else { - os.Exit(cobrautl.ExitError) - } + cobrautl.ExitWithError(cobrautl.ExitError, err) } } diff --git a/etcdctl/doc/mirror_maker.md b/etcdctl/doc/mirror_maker.md deleted file mode 100644 index 3da15dca88c..00000000000 --- a/etcdctl/doc/mirror_maker.md +++ /dev/null @@ -1,29 +0,0 @@ -## Mirror Maker - -Mirror maker mirrors a prefix in the key-value space of an etcd cluster into another prefix in another cluster. Mirroring is designed for copying configuration to various clusters distributed around the world. Mirroring usually has very low latency once it completes synchronizing with the initial state. Mirror maker utilizes the etcd watcher facility to immediately inform the mirror of any key modifications. Based on our experiments, the network latency between the mirror maker and the two clusters accounts for most of the latency. If the network is healthy, copying configuration held in etcd to the mirror should take under one second even for a world-wide deployment. - -If the mirror maker fails to connect to one of the clusters, the mirroring will pause. Mirroring can be resumed automatically once connectivity is reestablished. - -The mirroring mechanism is unidirectional. Changing the value on the mirrored cluster won't reflect the value back to the origin cluster. The mirror maker only mirrors key-value pairs; metadata, such as version number or modification revision, is discarded. However, mirror maker still attempts to preserve update ordering during normal operation, but there is no ordering guarantee during initial sync nor during failure recovery following network interruption. As a rule of thumb, the ordering of the updates on the mirror should not be considered reliable. - -``` -+-------------+ -| | -| source | +-----------+ -| cluster +----> | mirror | -| | | maker | -+-------------+ +---+-------+ - | - v - +-------------+ - | | - | mirror | - | cluster | - | | - +-------------+ - -``` - -Mirror-maker is a built-in feature of [etcdctl][etcdctl]. - -[etcdctl]: ../README.md diff --git a/etcdctl/go.mod b/etcdctl/go.mod deleted file mode 100644 index 8c4ab38a3dc..00000000000 --- a/etcdctl/go.mod +++ /dev/null @@ -1,56 +0,0 @@ -module go.etcd.io/etcd/etcdctl/v3 - -go 1.19 - -require ( - github.com/bgentry/speakeasy v0.1.0 - github.com/cheggaaa/pb/v3 v3.1.0 - github.com/dustin/go-humanize v1.0.1 - github.com/olekukonko/tablewriter v0.0.5 - github.com/spf13/cobra v1.6.1 - github.com/spf13/pflag v1.0.5 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 - go.uber.org/zap v1.24.0 - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/grpc v1.51.0 -) - -require ( - github.com/VividCortex/ewma v1.1.1 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.12 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.1 // indirect -) - -replace ( - go.etcd.io/etcd/api/v3 => ../api - go.etcd.io/etcd/client/pkg/v3 => ../client/pkg - go.etcd.io/etcd/client/v3 => ../client/v3 - go.etcd.io/etcd/pkg/v3 => ../pkg -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY -) diff --git a/etcdctl/go.sum b/etcdctl/go.sum deleted file mode 100644 index 070aa8aea05..00000000000 --- a/etcdctl/go.sum +++ /dev/null @@ -1,224 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04= -github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/etcdctl/main.go b/etcdctl/main.go index 95b3416dea5..b04acef4fe8 100644 --- a/etcdctl/main.go +++ b/etcdctl/main.go @@ -12,24 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// etcdctl is a command line application that controls etcd. +// Etcdctl是一个控制etcd的命令行应用程序. package main import ( - "go.etcd.io/etcd/etcdctl/v3/ctlv3" + "github.com/ls-2018/etcd_cn/etcdctl/ctlv3" ) -/* -* -mainWithError is fully analogous to main, but instead of signaling errors -by os.Exit, it exposes the error explicitly, such that test-logic can intercept -control to e.g. dump coverage data (even for test-for-failure scenarios). -*/ -func mainWithError() error { - return ctlv3.Start() -} - func main() { ctlv3.MustStart() - return } diff --git a/etcdctl/main_test.go b/etcdctl/main_test.go deleted file mode 100644 index 8edbb0ae9b8..00000000000 --- a/etcdctl/main_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "log" - "os" - "strings" - "testing" -) - -func SplitTestArgs(args []string) (testArgs, appArgs []string) { - for i, arg := range args { - switch { - case strings.HasPrefix(arg, "-test."): - testArgs = append(testArgs, arg) - case i == 0: - appArgs = append(appArgs, arg) - testArgs = append(testArgs, arg) - default: - appArgs = append(appArgs, arg) - } - } - return -} - -// TestEmpty is an empty test to avoid no-tests warning. -func TestEmpty(t *testing.T) {} - -/** - * The purpose of this "test" is to run etcdctl with code-coverage - * collection turned on. The technique is documented here: - * - * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage - */ -func TestMain(m *testing.M) { - // don't launch etcdctl when invoked via go test - if strings.HasSuffix(os.Args[0], "etcdctl.test") { - return - } - - testArgs, appArgs := SplitTestArgs(os.Args) - - os.Args = appArgs - - err := mainWithError() - if err != nil { - log.Fatalf("etcdctl failed with: %v", err) - } - - // This will generate coverage files: - os.Args = testArgs - m.Run() -} diff --git a/etcdutl/LICENSE b/etcdutl/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/etcdutl/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/etcdutl/README.md b/etcdutl/README.md index 6943a047858..a5dac609fdf 100644 --- a/etcdutl/README.md +++ b/etcdutl/README.md @@ -1,29 +1,10 @@ etcdutl ======== -`etcdutl` is a command line administration utility for [etcd][etcd]. - -It's designed to operate directly on etcd data files. -For operations over a network, please use `etcdctl`. - +它被设计为直接对etcd数据文件进行操作.对于网络上的操作,请使用`etcdctl`. ### DEFRAG [options] - -DEFRAG directly defragments an etcd data directory while etcd is not running. -When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system. - -In order to defrag a live etcd instances over the network, please use `etcdctl defrag` instead. - -#### Options - -- data-dir -- Optional. If present, defragments a data directory not in use by etcd. - -#### Output - -Exit status '0' when the process was successful. - -#### Example - -To defragment a data directory directly, use the `--data-dir` flag: +defrag 在etcd不运行时直接对etcd数据目录进行碎片整理.当一个etcd成员从被删除和压缩的键中回收存储空间时 ,该空间被保留在空闲列表中,数据库文件的大小保持不变. 通过 +碎片整理数据库,etcd成员将这些空闲空间释放到文件系统中. ``` bash # Defragment while etcd is not running @@ -32,151 +13,33 @@ To defragment a data directory directly, use the `--data-dir` flag: # Error: cannot open database at default.etcd/member/snap/db ``` -#### Remarks - -DEFRAG returns a zero exit code only if it succeeded in defragmenting all given endpoints. - - -### SNAPSHOT RESTORE [options] \ - -SNAPSHOT RESTORE creates an etcd data directory for an etcd cluster member from a backend database snapshot and a new cluster configuration. Restoring the snapshot into each member for a new cluster configuration will initialize a new etcd cluster preloaded by the snapshot data. - -#### Options - -The snapshot restore options closely resemble to those used in the `etcd` command for defining a cluster. - -- data-dir -- Path to the data directory. Uses \.etcd if none given. - -- wal-dir -- Path to the WAL directory. Uses data directory if none given. - -- initial-cluster -- The initial cluster configuration for the restored etcd cluster. - -- initial-cluster-token -- Initial cluster token for the restored etcd cluster. - -- initial-advertise-peer-urls -- List of peer URLs for the member being restored. - -- name -- Human-readable name for the etcd cluster member being restored. - -- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory) - -#### Output - -A new etcd data directory initialized with the snapshot. - #### Example Save a snapshot, restore into a new 3 node cluster, and start the cluster: + ``` -# save snapshot -./etcdctl snapshot save snapshot.db +./etcdutl snapshot save snapshot.db # restore members -./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' -./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' -./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' +bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' +bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' +bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' # launch members -./etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 & -./etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 & -./etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 & +bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 & +bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 & +bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 & ``` -### SNAPSHOT STATUS \ - -SNAPSHOT STATUS lists information about a given backend database snapshot file. - -#### Output - -##### Simple format -Prints a humanized table of the database hash, revision, total keys, and size. +snapshot restore ../default.etcd/member/snap/bolt.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' --data-dir=123 -##### JSON format - -Prints a line of JSON encoding the database hash, revision, total keys, and size. - -#### Examples -```bash -./etcdutl snapshot status file.db -# cf1550fb, 3, 3, 25 kB -``` ```bash -./etcdutl --write-out=json snapshot status file.db -# {"hash":3474280699,"revision":3,"totalKey":3,"totalSize":24576} -``` - -```bash -./etcdutl --write-out=table snapshot status file.db +myetcdctl snapshot status ../default.etcd/member/snap/bolt.db --write-out=table +----------+----------+------------+------------+ | HASH | REVISION | TOTAL KEYS | TOTAL SIZE | +----------+----------+------------+------------+ -| cf1550fb | 3 | 3 | 25 kB | +| d1ed6c2f | 0 | 6 | 25 kB | +----------+----------+------------+------------+ ``` - -### VERSION - -Prints the version of etcdutl. - -#### Output - -Prints etcd version and API version. - -#### Examples - - -```bash -./etcdutl version -# etcdutl version: 3.5.0 -# API version: 3.1 -``` - - -## Exit codes - -For all commands, a successful execution returns a zero exit code. All failures will return non-zero exit codes. - -## Output formats - -All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output format, which is meant to be human-readable. The simple format is listed in each command's `Output` description since it is customized for each command. If a command has a corresponding RPC, it will respect all output formats. - -If a command fails, returning a non-zero exit code, an error string will be written to standard error regardless of output format. - -### Simple - -A format meant to be easy to parse and human-readable. Specific to each command. - -### JSON - -The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will encode keys and values in base64. - -Some commands without an RPC also support JSON; see the command's `Output` description. - -### Protobuf - -The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will be concatenated. If an RPC is not given for a command, the protobuf output is not defined. - -### Fields - -An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q` where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`). - -## Compatibility Support - -etcdutl is still in its early stage. We try out best to ensure fully compatible releases, however we might break compatibility to fix bugs or improve commands. If we intend to release a version of etcdutl with backward incompatibilities, we will provide notice prior to release and have instructions on how to upgrade. - -### Input Compatibility - -Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal commands in non-interactive mode. - -### Output Compatibility -Currently, we do not ensure backward compatibility of utility commands. - -### TODO: compatibility with etcd server - -[etcd]: https://github.com/coreos/etcd -[READMEv2]: READMEv2.md -[v2key]: ../store/node_extern.go#L28-L37 -[v3key]: ../api/mvccpb/kv.proto#L12-L29 -[etcdrpc]: ../api/etcdserverpb/rpc.proto -[storagerpc]: ../api/mvccpb/kv.proto diff --git a/etcdutl/ctl.go b/etcdutl/ctl.go deleted file mode 100644 index 5bafe3399e3..00000000000 --- a/etcdutl/ctl.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdutl contains the main entry point for the etcdutl. -package main - -import ( - "github.com/spf13/cobra" - - "go.etcd.io/etcd/etcdutl/v3/etcdutl" -) - -const ( - cliName = "etcdutl" - cliDescription = "An administrative command line tool for etcd3." -) - -var ( - rootCmd = &cobra.Command{ - Use: cliName, - Short: cliDescription, - SuggestFor: []string{"etcdutl"}, - } -) - -func init() { - rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)") - rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { - return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault - }) - - rootCmd.AddCommand( - etcdutl.NewBackupCommand(), - etcdutl.NewDefragCommand(), - etcdutl.NewSnapshotCommand(), - etcdutl.NewVersionCommand(), - etcdutl.NewCompletionCommand(), - etcdutl.NewMigrateCommand(), - ) -} - -func Start() error { - // Make help just show the usage - rootCmd.SetHelpTemplate(`{{.UsageString}}`) - return rootCmd.Execute() -} - -func init() { - cobra.EnablePrefixMatching = true -} diff --git a/etcdutl/etcdutl/backup_command.go b/etcdutl/etcdutl/backup_command.go index 89121a37e95..d1cb0823867 100644 --- a/etcdutl/etcdutl/backup_command.go +++ b/etcdutl/etcdutl/backup_command.go @@ -20,26 +20,25 @@ import ( "regexp" "time" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/datadir" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/verify" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/idutil" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" "github.com/spf13/cobra" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/idutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/datadir" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/etcd/server/v3/verify" - "go.etcd.io/raft/v3/raftpb" bolt "go.etcd.io/bbolt" + "go.uber.org/zap" ) var ( @@ -62,13 +61,9 @@ func NewBackupCommand() *cobra.Command { cmd.Flags().StringVar(&walDir, "wal-dir", "", "Path to the etcd wal dir") cmd.Flags().StringVar(&backupDir, "backup-dir", "", "Path to the backup dir") cmd.Flags().StringVar(&backupWalDir, "backup-wal-dir", "", "Path to the backup wal dir") - cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data. Note -with-v3=false is not supported since etcd v3.6. Please use v3.5.x client as the last supporting this deprecated functionality.") + cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data") cmd.MarkFlagRequired("data-dir") cmd.MarkFlagRequired("backup-dir") - cmd.MarkFlagDirname("data-dir") - cmd.MarkFlagDirname("wal-dir") - cmd.MarkFlagDirname("backup-dir") - cmd.MarkFlagDirname("backup-wal-dir") return cmd } @@ -99,7 +94,9 @@ func newDesiredCluster() desiredCluster { }, RaftAttributes: membership.RaftAttributes{ PeerURLs: []string{"http://use-flag--force-new-cluster:2080"}, - }}}, + }, + }, + }, confState: raftpb.ConfState{Voters: []uint64{nodeID}}, } } @@ -108,11 +105,6 @@ func newDesiredCluster() desiredCluster { func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, destWAL string) error { lg := GetLogger() - if !withV3 { - lg.Warn("-with-v3=false is not supported since etcd v3.6. Please use v3.5.x client as the last supporting this deprecated functionality.") - return nil - } - srcSnap := datadir.ToSnapDir(srcDir) destSnap := datadir.ToSnapDir(destDir) @@ -124,7 +116,7 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des destWAL = datadir.ToWalDir(destDir) } - if err := fileutil.CreateDirAll(lg, destSnap); err != nil { + if err := fileutil.CreateDirAll(destSnap); err != nil { lg.Fatal("failed creating backup snapshot dir", zap.String("dest-snap", destSnap), zap.Error(err)) } @@ -133,8 +125,8 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des desired := newDesiredCluster() walsnap := saveSnap(lg, destSnap, srcSnap, &desired) - metadata, state, ents := translateWAL(lg, srcWAL, walsnap) - saveDB(lg, destDbPath, srcDbPath, state.Commit, state.Term, &desired) + metadata, state, ents := translateWAL(lg, srcWAL, walsnap, withV3) + saveDB(lg, destDbPath, srcDbPath, state.Commit, state.Term, &desired, withV3) neww, err := wal.Create(lg, destWAL, pbutil.MustMarshal(&metadata)) if err != nil { @@ -195,7 +187,7 @@ func mustTranslateV2store(lg *zap.Logger, storeData []byte, desired *desiredClus return outputData } -func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) { +func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot, v3 bool) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) { w, err := wal.OpenForRead(lg, srcWAL, walsnap) if err != nil { lg.Fatal("wal.OpenForRead failed", zap.Error(err)) @@ -223,7 +215,7 @@ func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdse // TERM changes (so there are superflous entries from previous term). if ents[i].Type == raftpb.EntryConfChange { - lg.Info("ignoring EntryConfChange raft entry") + lg.Info("忽略 EntryConfChange 日志项") raftEntryToNoOp(&ents[i]) continue } @@ -238,20 +230,27 @@ func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdse } if v2Req != nil && v2Req.Method == "PUT" && memberAttrRE.MatchString(v2Req.Path) { - lg.Info("ignoring member attribute update on", - zap.Stringer("entry", &ents[i]), - zap.String("v2Req.Path", v2Req.Path)) + lg.Info("忽略成员更新", zap.Stringer("entry", &ents[i]), zap.String("v2Req.Path", v2Req.Path)) raftEntryToNoOp(&ents[i]) continue } + if v2Req != nil { + lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i])) + } + if raftReq.ClusterMemberAttrSet != nil { lg.Info("ignoring cluster_member_attr_set") raftEntryToNoOp(&ents[i]) continue } - lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i])) + if v3 || raftReq.Header == nil { + lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i])) + continue + } + lg.Info("ignoring v3 raft entry") + raftEntryToNoOp(&ents[i]) } var metadata etcdserverpb.Metadata pbutil.MustUnmarshal(&metadata, wmetadata) @@ -266,52 +265,65 @@ func raftEntryToNoOp(entry *raftpb.Entry) { } // saveDB copies the v3 backend and strips cluster information. -func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desired *desiredCluster) { +func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desired *desiredCluster, v3 bool) { // open src db to safely copy db state - var src *bolt.DB - ch := make(chan *bolt.DB, 1) - go func() { - db, err := bolt.Open(srcDB, 0444, &bolt.Options{ReadOnly: true}) - if err != nil { - lg.Fatal("bolt.Open FAILED", zap.Error(err)) + if v3 { + var src *bolt.DB + ch := make(chan *bolt.DB, 1) + go func() { + db, err := bolt.Open(srcDB, 0o444, &bolt.Options{ReadOnly: true}) + if err != nil { + lg.Fatal("bolt.Open FAILED", zap.Error(err)) + } + ch <- db + }() + select { + case src = <-ch: + case <-time.After(time.Second): + lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB)) + src = <-ch } - ch <- db - }() - select { - case src = <-ch: - case <-time.After(time.Second): - lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB)) - } - defer src.Close() + defer src.Close() - tx, err := src.Begin(false) - if err != nil { - lg.Fatal("bbolt.BeginTx failed", zap.Error(err)) - } + tx, err := src.Begin(false) + if err != nil { + lg.Fatal("bbolt.BeginTx failed", zap.Error(err)) + } - // copy srcDB to destDB - dest, err := os.Create(destDB) - if err != nil { - lg.Fatal("creation of destination file failed", zap.String("dest", destDB), zap.Error(err)) - } - if _, err := tx.WriteTo(dest); err != nil { - lg.Fatal("bbolt write to destination file failed", zap.String("dest", destDB), zap.Error(err)) - } - dest.Close() - if err := tx.Rollback(); err != nil { - lg.Fatal("bbolt tx.Rollback failed", zap.String("dest", destDB), zap.Error(err)) + // copy srcDB to destDB + dest, err := os.Create(destDB) + if err != nil { + lg.Fatal("creation of destination file failed", zap.String("dest", destDB), zap.Error(err)) + } + if _, err := tx.WriteTo(dest); err != nil { + lg.Fatal("bbolt write to destination file failed", zap.String("dest", destDB), zap.Error(err)) + } + dest.Close() + if err := tx.Rollback(); err != nil { + lg.Fatal("bbolt tx.Rollback failed", zap.String("dest", destDB), zap.Error(err)) + } } - // trim membership info - be := backend.NewDefaultBackend(lg, destDB) + be := backend.NewDefaultBackend(destDB) defer be.Close() - ms := schema.NewMembershipBackend(lg, be) - if err := ms.TrimClusterFromBackend(); err != nil { + + if err := membership.TrimClusterFromBackend(be); err != nil { lg.Fatal("bbolt tx.Membership failed", zap.Error(err)) } raftCluster := membership.NewClusterFromMembers(lg, desired.clusterId, desired.members) raftCluster.SetID(desired.nodeId, desired.clusterId) - raftCluster.SetBackend(ms) + raftCluster.SetBackend(be) raftCluster.PushMembershipToStorage() + + if !v3 { + tx := be.BatchTx() + tx.Lock() + defer tx.Unlock() + cindex.UnsafeCreateMetaBucket(tx) + cindex.UnsafeUpdateConsistentIndex(tx, idx, term, false) + } else { + // Thanks to translateWAL not moving entries, but just replacing them with + // 'empty', there is no need to update the consistency index. + } } diff --git a/etcdutl/etcdutl/common.go b/etcdutl/etcdutl/common.go index d54827d0457..305bf20a23c 100644 --- a/etcdutl/etcdutl/common.go +++ b/etcdutl/etcdutl/common.go @@ -15,15 +15,13 @@ package etcdutl import ( + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "go.uber.org/zap" "go.uber.org/zap/zapcore" - - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/pkg/v3/cobrautl" ) func GetLogger() *zap.Logger { - config := logutil.DefaultZapLoggerConfig + config := zap.NewProductionConfig() config.Encoding = "console" config.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder lg, err := config.Build() diff --git a/etcdutl/etcdutl/completion_commmand.go b/etcdutl/etcdutl/completion_commmand.go deleted file mode 100644 index 792799b15b0..00000000000 --- a/etcdutl/etcdutl/completion_commmand.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdutl - -import ( - "os" - - "github.com/spf13/cobra" -) - -func NewCompletionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: `To load completions: - -Bash: - - $ source <(etcdutl completion bash) - - # To load completions for each session, execute once: - # Linux: - $ etcdutl completion bash > /etc/bash_completion.d/etcdutl - # macOS: - $ etcdutl completion bash > /usr/local/etc/bash_completion.d/etcdutl - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ etcdutl completion zsh > "${fpath[1]}/_etcdutl" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ etcdutl completion fish | source - - # To load completions for each session, execute once: - $ etcdutl completion fish > ~/.config/fish/completions/etcdutl.fish - -PowerShell: - - PS> etcdutl completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> etcdutl completion powershell > etcdutl.ps1 - # and source this file from your PowerShell profile. -`, - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, - } - - return cmd -} diff --git a/etcdutl/etcdutl/defrag_command.go b/etcdutl/etcdutl/defrag_command.go index d8077ae8bfb..d3387ffe0c5 100644 --- a/etcdutl/etcdutl/defrag_command.go +++ b/etcdutl/etcdutl/defrag_command.go @@ -19,35 +19,29 @@ import ( "os" "time" + "github.com/ls-2018/etcd_cn/etcd/datadir" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" - - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/datadir" ) -var ( - defragDataDir string -) +var defragDataDir string -// NewDefragCommand returns the cobra command for "Defrag". func NewDefragCommand() *cobra.Command { cmd := &cobra.Command{ Use: "defrag", - Short: "Defragments the storage of the etcd", + Short: "清理etcd内存碎片", Run: defragCommandFunc, } - cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Required. Defragments a data directory not in use by etcd.") + cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "") cmd.MarkFlagRequired("data-dir") - cmd.MarkFlagDirname("data-dir") return cmd } func defragCommandFunc(cmd *cobra.Command, args []string) { err := DefragData(defragDataDir) if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, - fmt.Errorf("Failed to defragment etcd data[%s] (%v)", defragDataDir, err)) + cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("对etcd数据进行碎片整理失败[%s] (%v)", defragDataDir, err)) } } @@ -58,7 +52,7 @@ func DefragData(dataDir string) error { dbDir := datadir.ToBackendFileName(dataDir) go func() { defer close(bch) - cfg := backend.DefaultBackendConfig(lg) + cfg := backend.DefaultBackendConfig() cfg.Logger = lg cfg.Path = dbDir be = backend.New(cfg) @@ -66,8 +60,7 @@ func DefragData(dataDir string) error { select { case <-bch: case <-time.After(time.Second): - fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q. "+ - "To defrag a running etcd instance, use `etcdctl defrag` instead.\n", dbDir) + fmt.Fprintf(os.Stderr, "等待etcd关闭并释放其对%q的锁定.要对正在运行的etcd实例进行碎片整理请省略-data-dir. \n", dbDir) <-bch } return be.Defrag() diff --git a/etcdutl/etcdutl/migrate_command.go b/etcdutl/etcdutl/migrate_command.go deleted file mode 100644 index 521cf8ba80c..00000000000 --- a/etcdutl/etcdutl/migrate_command.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdutl - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" - "github.com/spf13/cobra" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/datadir" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" -) - -// NewMigrateCommand prints out the version of etcd. -func NewMigrateCommand() *cobra.Command { - o := newMigrateOptions() - cmd := &cobra.Command{ - Use: "migrate", - Short: "Migrates schema of etcd data dir files to make them compatible with different etcd version", - Run: func(cmd *cobra.Command, args []string) { - cfg, err := o.Config() - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) - } - err = migrateCommandFunc(cfg) - if err != nil { - cobrautl.ExitWithError(cobrautl.ExitError, err) - } - }, - } - o.AddFlags(cmd) - return cmd -} - -type migrateOptions struct { - dataDir string - targetVersion string - force bool -} - -func newMigrateOptions() *migrateOptions { - return &migrateOptions{} -} - -func (o *migrateOptions) AddFlags(cmd *cobra.Command) { - cmd.Flags().StringVar(&o.dataDir, "data-dir", o.dataDir, "Path to the etcd data dir") - cmd.MarkFlagRequired("data-dir") - cmd.MarkFlagDirname("data-dir") - - cmd.Flags().StringVar(&o.targetVersion, "target-version", o.targetVersion, `Target etcd version to migrate contents of data dir. Minimal value 3.5. Format "X.Y" for example 3.6.`) - cmd.MarkFlagRequired("target-version") - - cmd.Flags().BoolVar(&o.force, "force", o.force, "Ignore migration failure and forcefully override storage version. Not recommended.") -} - -func (o *migrateOptions) Config() (*migrateConfig, error) { - c := &migrateConfig{ - force: o.force, - lg: GetLogger(), - } - var err error - dotCount := strings.Count(o.targetVersion, ".") - if dotCount != 1 { - return nil, fmt.Errorf(`wrong target version format, expected "X.Y", got %q`, o.targetVersion) - } - c.targetVersion, err = semver.NewVersion(o.targetVersion + ".0") - if err != nil { - return nil, fmt.Errorf("failed to parse target version: %v", err) - } - if c.targetVersion.LessThan(version.V3_5) { - return nil, fmt.Errorf(`target version %q not supported. Minimal "3.5"`, storageVersionToString(c.targetVersion)) - } - - dbPath := datadir.ToBackendFileName(o.dataDir) - c.be = backend.NewDefaultBackend(GetLogger(), dbPath) - - walPath := datadir.ToWalDir(o.dataDir) - w, err := wal.OpenForRead(c.lg, walPath, walpb.Snapshot{}) - if err != nil { - return nil, fmt.Errorf(`failed to open wal: %v`, err) - } - defer w.Close() - c.walVersion, err = wal.ReadWALVersion(w) - if err != nil { - return nil, fmt.Errorf(`failed to read wal: %v`, err) - } - - return c, nil -} - -type migrateConfig struct { - lg *zap.Logger - be backend.Backend - targetVersion *semver.Version - walVersion schema.WALVersion - force bool -} - -func migrateCommandFunc(c *migrateConfig) error { - defer c.be.Close() - tx := c.be.BatchTx() - current, err := schema.DetectSchemaVersion(c.lg, c.be.ReadTx()) - if err != nil { - c.lg.Error("failed to detect storage version. Please make sure you are using data dir from etcd v3.5 and older") - return err - } - if current == *c.targetVersion { - c.lg.Info("storage version up-to-date", zap.String("storage-version", storageVersionToString(¤t))) - return nil - } - err = schema.Migrate(c.lg, tx, c.walVersion, *c.targetVersion) - if err != nil { - if !c.force { - return err - } - c.lg.Info("normal migrate failed, trying with force", zap.Error(err)) - migrateForce(c.lg, tx, c.targetVersion) - } - c.be.ForceCommit() - return nil -} - -func migrateForce(lg *zap.Logger, tx backend.BatchTx, target *semver.Version) { - tx.LockOutsideApply() - defer tx.Unlock() - // Storage version is only supported since v3.6 - if target.LessThan(version.V3_6) { - schema.UnsafeClearStorageVersion(tx) - lg.Warn("forcefully cleared storage version") - } else { - schema.UnsafeSetStorageVersion(tx, target) - lg.Warn("forcefully set storage version", zap.String("storage-version", storageVersionToString(target))) - } -} - -func storageVersionToString(ver *semver.Version) string { - return fmt.Sprintf("%d.%d", ver.Major, ver.Minor) -} diff --git a/etcdutl/etcdutl/printer.go b/etcdutl/etcdutl/printer.go index 7d65366065f..72d1170508f 100644 --- a/etcdutl/etcdutl/printer.go +++ b/etcdutl/etcdutl/printer.go @@ -18,17 +18,14 @@ import ( "errors" "fmt" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" - "go.etcd.io/etcd/etcdutl/v3/snapshot" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "github.com/dustin/go-humanize" ) -var ( - OutputFormat string -) +var OutputFormat string type printer interface { DBStatus(snapshot.Status) @@ -67,13 +64,12 @@ func newPrinterUnsupported(n string) printer { func (p *printerUnsupported) DBStatus(snapshot.Status) { p.p(nil) } func makeDBStatusTable(ds snapshot.Status) (hdr []string, rows [][]string) { - hdr = []string{"hash", "revision", "total keys", "total size", "version"} + hdr = []string{"hash", "revision", "total keys", "total size"} rows = append(rows, []string{ fmt.Sprintf("%x", ds.Hash), fmt.Sprint(ds.Revision), fmt.Sprint(ds.TotalKey), humanize.Bytes(uint64(ds.TotalSize)), - ds.Version, }) return hdr, rows } diff --git a/etcdutl/etcdutl/printer_fields.go b/etcdutl/etcdutl/printer_fields.go index d534e396ffe..cddbf92869e 100644 --- a/etcdutl/etcdutl/printer_fields.go +++ b/etcdutl/etcdutl/printer_fields.go @@ -17,7 +17,7 @@ package etcdutl import ( "fmt" - "go.etcd.io/etcd/etcdutl/v3/snapshot" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" ) type fieldsPrinter struct{ printer } @@ -27,5 +27,4 @@ func (p *fieldsPrinter) DBStatus(r snapshot.Status) { fmt.Println(`"Revision" :`, r.Revision) fmt.Println(`"Keys" :`, r.TotalKey) fmt.Println(`"Size" :`, r.TotalSize) - fmt.Println(`"Version" :`, r.Version) } diff --git a/etcdutl/etcdutl/printer_json.go b/etcdutl/etcdutl/printer_json.go index 38fe3e4548e..92b106f42d5 100644 --- a/etcdutl/etcdutl/printer_json.go +++ b/etcdutl/etcdutl/printer_json.go @@ -19,7 +19,7 @@ import ( "fmt" "os" - "go.etcd.io/etcd/etcdutl/v3/snapshot" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" ) type jsonPrinter struct { diff --git a/etcdutl/etcdutl/printer_protobuf.go b/etcdutl/etcdutl/printer_protobuf.go index 0a9003b475d..469a01c64ce 100644 --- a/etcdutl/etcdutl/printer_protobuf.go +++ b/etcdutl/etcdutl/printer_protobuf.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "go.etcd.io/etcd/pkg/v3/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" ) type pbPrinter struct{ printer } diff --git a/etcdutl/etcdutl/printer_simple.go b/etcdutl/etcdutl/printer_simple.go index 306ebf0c7f3..a87e8c9a084 100644 --- a/etcdutl/etcdutl/printer_simple.go +++ b/etcdutl/etcdutl/printer_simple.go @@ -18,11 +18,10 @@ import ( "fmt" "strings" - "go.etcd.io/etcd/etcdutl/v3/snapshot" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" ) -type simplePrinter struct { -} +type simplePrinter struct{} func (s *simplePrinter) DBStatus(ds snapshot.Status) { _, rows := makeDBStatusTable(ds) diff --git a/etcdutl/etcdutl/printer_table.go b/etcdutl/etcdutl/printer_table.go index 2f8f81d4e6a..02dccaac715 100644 --- a/etcdutl/etcdutl/printer_table.go +++ b/etcdutl/etcdutl/printer_table.go @@ -17,7 +17,7 @@ package etcdutl import ( "os" - "go.etcd.io/etcd/etcdutl/v3/snapshot" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" "github.com/olekukonko/tablewriter" ) diff --git a/etcdutl/etcdutl/snapshot_command.go b/etcdutl/etcdutl/snapshot_command.go index 28df31f8dd0..425719e89f9 100644 --- a/etcdutl/etcdutl/snapshot_command.go +++ b/etcdutl/etcdutl/snapshot_command.go @@ -18,9 +18,9 @@ import ( "fmt" "strings" - "go.etcd.io/etcd/etcdutl/v3/snapshot" - "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/server/v3/storage/datadir" + "github.com/ls-2018/etcd_cn/etcd/datadir" + "github.com/ls-2018/etcd_cn/etcdutl/snapshot" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" "github.com/spf13/cobra" ) @@ -46,26 +46,24 @@ func NewSnapshotCommand() *cobra.Command { Use: "snapshot ", Short: "Manages etcd node snapshots", } - cmd.AddCommand(NewSnapshotRestoreCommand()) - cmd.AddCommand(newSnapshotStatusCommand()) + cmd.AddCommand(NewSnapshotRestoreCommand()) // restore + cmd.AddCommand(newSnapshotStatusCommand()) // status return cmd } func newSnapshotStatusCommand() *cobra.Command { return &cobra.Command{ Use: "status ", - Short: "Gets backend snapshot status of a given file", - Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint. -The items in the lists are hash, revision, total keys, total size. -`, - Run: SnapshotStatusCommandFunc, + Short: "从给定的文件获取快照状态", + Long: ``, + Run: SnapshotStatusCommandFunc, } } func NewSnapshotRestoreCommand() *cobra.Command { cmd := &cobra.Command{ Use: "restore --data-dir {output dir} [options]", - Short: "Restores an etcd member snapshot to an etcd directory", + Short: "将etcd成员快照恢复到etcd目录", Run: snapshotRestoreCommandFunc, } cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the output data directory") @@ -76,8 +74,7 @@ func NewSnapshotRestoreCommand() *cobra.Command { cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member") cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)") - cmd.MarkFlagDirname("data-dir") - cmd.MarkFlagDirname("wal-dir") + cmd.MarkFlagRequired("data-dir") return cmd } @@ -99,8 +96,7 @@ func SnapshotStatusCommandFunc(cmd *cobra.Command, args []string) { } func snapshotRestoreCommandFunc(_ *cobra.Command, args []string) { - SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir, - restorePeerURLs, restoreName, skipHashCheck, args) + SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir, restorePeerURLs, restoreName, skipHashCheck, args) } func SnapshotRestoreCommandFunc(restoreCluster string, @@ -110,7 +106,8 @@ func SnapshotRestoreCommandFunc(restoreCluster string, restorePeerURLs string, restoreName string, skipHashCheck bool, - args []string) { + args []string, +) { if len(args) != 1 { err := fmt.Errorf("snapshot restore requires exactly one argument") cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) diff --git a/etcdutl/etcdutl/version_command.go b/etcdutl/etcdutl/version_command.go deleted file mode 100644 index 1cb1a146b4b..00000000000 --- a/etcdutl/etcdutl/version_command.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdutl - -import ( - "fmt" - - "go.etcd.io/etcd/api/v3/version" - - "github.com/spf13/cobra" -) - -// NewVersionCommand prints out the version of etcd. -func NewVersionCommand() *cobra.Command { - return &cobra.Command{ - Use: "version", - Short: "Prints the version of etcdutl", - Run: versionCommandFunc, - } -} - -func versionCommandFunc(cmd *cobra.Command, args []string) { - fmt.Println("etcdutl version:", version.Version) - fmt.Println("API version:", version.APIVersion) -} diff --git a/etcdutl/go.mod b/etcdutl/go.mod deleted file mode 100644 index 02254541fe9..00000000000 --- a/etcdutl/go.mod +++ /dev/null @@ -1,75 +0,0 @@ -module go.etcd.io/etcd/etcdutl/v3 - -go 1.19 - -replace ( - go.etcd.io/etcd/api/v3 => ../api - go.etcd.io/etcd/client/pkg/v3 => ../client/pkg - go.etcd.io/etcd/client/v2 => ../client/v2 - go.etcd.io/etcd/client/v3 => ../client/v3 - go.etcd.io/etcd/pkg/v3 => ../pkg - go.etcd.io/etcd/server/v3 => ../server -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY -) - -require ( - github.com/coreos/go-semver v0.3.1 - github.com/dustin/go-humanize v1.0.1 - github.com/olekukonko/tablewriter v0.0.5 - github.com/spf13/cobra v1.6.1 - go.etcd.io/bbolt v1.3.7 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/server/v3 v3.6.0-alpha.0 - go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a - go.uber.org/zap v1.24.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.3 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jonboulle/clockwork v0.3.0 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/goleak v1.1.12 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/grpc v1.51.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect -) diff --git a/etcdutl/go.sum b/etcdutl/go.sum deleted file mode 100644 index 0ffc93d9e20..00000000000 --- a/etcdutl/go.sum +++ /dev/null @@ -1,599 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= -github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= diff --git a/etcdutl/main.go b/etcdutl/main.go index bff0b1d869b..3f94e441b4b 100644 --- a/etcdutl/main.go +++ b/etcdutl/main.go @@ -16,7 +16,7 @@ package main import ( - "go.etcd.io/etcd/pkg/v3/cobrautl" + "github.com/ls-2018/etcd_cn/pkg/cobrautl" ) func main() { @@ -24,3 +24,7 @@ func main() { cobrautl.ExitWithError(cobrautl.ExitError, err) } } + +// snapshot status ../default.etcd/member/snap/bolt.db --write-out=table +// snapshot restore ./123 +// defrag --data-dir ../default.etcd diff --git a/etcdutl/main_test.go b/etcdutl/main_test.go deleted file mode 100644 index 1fe58afb837..00000000000 --- a/etcdutl/main_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "log" - "os" - "strings" - "testing" -) - -func SplitTestArgs(args []string) (testArgs, appArgs []string) { - for i, arg := range args { - switch { - case strings.HasPrefix(arg, "-test."): - testArgs = append(testArgs, arg) - case i == 0: - appArgs = append(appArgs, arg) - testArgs = append(testArgs, arg) - default: - appArgs = append(appArgs, arg) - } - } - return -} - -// TestEmpty is empty test to avoid no-tests warning. -func TestEmpty(t *testing.T) {} - -/** - * The purpose of this "test" is to run etcdctl with code-coverage - * collection turned on. The technique is documented here: - * - * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage - */ -func TestMain(m *testing.M) { - // don't launch etcdutl when invoked via go test - if strings.HasSuffix(os.Args[0], "etcdutl.test") { - return - } - - testArgs, appArgs := SplitTestArgs(os.Args) - - os.Args = appArgs - - err := Start() - if err != nil { - log.Fatalf("etcdctl failed with: %v", err) - } - - // This will generate coverage files: - os.Args = testArgs - m.Run() -} diff --git a/etcdutl/over_ctl.go b/etcdutl/over_ctl.go new file mode 100644 index 00000000000..8463b8fa3dc --- /dev/null +++ b/etcdutl/over_ctl.go @@ -0,0 +1,51 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package etcdutl contains the main entry point for the etcdutl. +package main + +import ( + "github.com/ls-2018/etcd_cn/etcdutl/etcdutl" + "github.com/spf13/cobra" +) + +const ( + cliName = "etcdutl" + cliDescription = "An administrative command line tool for etcd3." +) + +var rootCmd = &cobra.Command{ + Use: cliName, + Short: cliDescription, + SuggestFor: []string{"etcdutl"}, +} + +func init() { + rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)") + + rootCmd.AddCommand( + etcdutl.NewBackupCommand(), // 备份 + etcdutl.NewDefragCommand(), // 清理内存碎片 + etcdutl.NewSnapshotCommand(), // 快照 + ) +} + +func Start() error { + rootCmd.SetHelpTemplate(`{{.UsageString}}`) + return rootCmd.Execute() +} + +func init() { + cobra.EnablePrefixMatching = true +} diff --git a/etcdutl/snapshot/doc.go b/etcdutl/snapshot/doc.go deleted file mode 100644 index 1c761be70d1..00000000000 --- a/etcdutl/snapshot/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snapshot implements utilities around etcd snapshot. -package snapshot diff --git a/etcdutl/snapshot/over_v3_snapshot.go b/etcdutl/snapshot/over_v3_snapshot.go new file mode 100644 index 00000000000..bebcb4c259b --- /dev/null +++ b/etcdutl/snapshot/over_v3_snapshot.go @@ -0,0 +1,460 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snapshot + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "reflect" + "strings" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/config" + "github.com/ls-2018/etcd_cn/etcd/etcdserver" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/etcd/verify" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/raft/raftpb" + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +type Manager interface { + Status(dbPath string) (Status, error) // 快照信息 + Restore(cfg RestoreConfig) error +} + +// NewV3 v3版本的快照管理 +func NewV3(lg *zap.Logger) Manager { + if lg == nil { + lg = zap.NewExample() + } + return &v3Manager{lg: lg} +} + +type v3Manager struct { + lg *zap.Logger + + name string + srcDbPath string + walDir string + snapDir string + cl *membership.RaftCluster + + skipHashCheck bool +} + +func hasChecksum(n int64) bool { + return (n % 512) == sha256.Size +} + +// RestoreConfig configures snapshot restore operation. +type RestoreConfig struct { + // SnapshotPath is the path of snapshot file to restore from. + SnapshotPath string + + // Name is the human-readable name of this member. + Name string + + // OutputDataDir is the target data directory to save restored data. + // OutputDataDir should not conflict with existing etcd data directory. + // If OutputDataDir already exists, it will return an error to prevent + // unintended data directory overwrites. + // If empty, defaults to "[Name].etcd" if not given. + OutputDataDir string + // OutputWALDir is the target WAL data directory. + // If empty, defaults to "[OutputDataDir]/member/wal" if not given. + OutputWALDir string + + // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster. + PeerURLs []string + + // InitialCluster is the initial cluster configuration for restore bootstrap. + InitialCluster string + // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap. + InitialClusterToken string + + // SkipHashCheck is "true" to ignore snapshot integrity hash value + // (required if copied from data directory). + SkipHashCheck bool +} + +// Restore restores a new etcd data directory from given snapshot file. +func (s *v3Manager) Restore(cfg RestoreConfig) error { + pURLs, err := types.NewURLs(cfg.PeerURLs) + if err != nil { + return err + } + var ics types.URLsMap + ics, err = types.NewURLsMap(cfg.InitialCluster) + if err != nil { + return err + } + + srv := config.ServerConfig{ + Logger: s.lg, + Name: cfg.Name, + PeerURLs: pURLs, + InitialPeerURLsMap: ics, + InitialClusterToken: cfg.InitialClusterToken, + } + if err = srv.VerifyBootstrap(); err != nil { + return err + } + + s.cl, err = membership.NewClusterFromURLsMap(s.lg, cfg.InitialClusterToken, ics) + if err != nil { + return err + } + + dataDir := cfg.OutputDataDir + if dataDir == "" { + dataDir = cfg.Name + ".etcd" + } + if fileutil.Exist(dataDir) && !fileutil.DirEmpty(dataDir) { + return fmt.Errorf("data-dir %q not empty or could not be read", dataDir) + } + + walDir := cfg.OutputWALDir + if walDir == "" { + walDir = filepath.Join(dataDir, "member", "wal") + } else if fileutil.Exist(walDir) { + return fmt.Errorf("wal-dir %q exists", walDir) + } + + s.name = cfg.Name + s.srcDbPath = cfg.SnapshotPath + s.walDir = walDir + s.snapDir = filepath.Join(dataDir, "member", "snap") + s.skipHashCheck = cfg.SkipHashCheck + + s.lg.Info( + "restoring snapshot", + zap.String("path", s.srcDbPath), + zap.String("wal-dir", s.walDir), + zap.String("data-dir", dataDir), + zap.String("snap-dir", s.snapDir), + zap.Stack("stack"), + ) + + if err = s.saveDB(); err != nil { + return err + } + hardstate, err := s.saveWALAndSnap() + if err != nil { + return err + } + + if err := s.updateCIndex(hardstate.Commit, hardstate.Term); err != nil { + return err + } + + s.lg.Info( + "restored snapshot", + zap.String("path", s.srcDbPath), + zap.String("wal-dir", s.walDir), + zap.String("data-dir", dataDir), + zap.String("snap-dir", s.snapDir), + ) + + return verify.VerifyIfEnabled(verify.Config{ + ExactIndex: true, + Logger: s.lg, + DataDir: dataDir, + }) +} + +func (s *v3Manager) outDbPath() string { + return filepath.Join(s.snapDir, "db") +} + +// saveDB 将数据库快照复制到快照目录中. +func (s *v3Manager) saveDB() error { + err := s.copyAndVerifyDB() + if err != nil { + return err + } + + be := backend.NewDefaultBackend(s.outDbPath()) + defer be.Close() + + err = membership.TrimMembershipFromBackend(s.lg, be) + if err != nil { + return err + } + + return nil +} + +func (s *v3Manager) copyAndVerifyDB() error { + srcf, ferr := os.Open(s.srcDbPath) + if ferr != nil { + return ferr + } + defer srcf.Close() + + // 获取快照完整性哈希值 + if _, err := srcf.Seek(-sha256.Size, io.SeekEnd); err != nil { + return err + } + sha := make([]byte, sha256.Size) + if _, err := srcf.Read(sha); err != nil { + return err + } + if _, err := srcf.Seek(0, io.SeekStart); err != nil { + return err + } + + if err := fileutil.CreateDirAll(s.snapDir); err != nil { + return err + } + + outDbPath := s.outDbPath() + + db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0o600) + if dberr != nil { + return dberr + } + dbClosed := false + defer func() { + if !dbClosed { + db.Close() + dbClosed = true + } + }() + if _, err := io.Copy(db, srcf); err != nil { + return err + } + + // truncate away integrity hash, if any. + off, serr := db.Seek(0, io.SeekEnd) + if serr != nil { + return serr + } + hasHash := hasChecksum(off) + if hasHash { + if err := db.Truncate(off - sha256.Size); err != nil { + return err + } + } + + if !hasHash && !s.skipHashCheck { + return fmt.Errorf("snapshot missing hash but --skip-hash-check=false") + } + + if hasHash && !s.skipHashCheck { + // check for match + if _, err := db.Seek(0, io.SeekStart); err != nil { + return err + } + h := sha256.New() + if _, err := io.Copy(h, db); err != nil { + return err + } + dbsha := h.Sum(nil) + if !reflect.DeepEqual(sha, dbsha) { + return fmt.Errorf("expected sha256 %v, got %v", sha, dbsha) + } + } + + // db hash is OK, can now modify DB so it can be part of a new cluster + db.Close() + return nil +} + +// saveWALAndSnap creates a WAL for the initial cluster +// +// TODO: This code ignores learners !!! +func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) { + if err := fileutil.CreateDirAll(s.walDir); err != nil { + return nil, err + } + + // add members again to persist them to the store we create. + st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix) + s.cl.SetStore(st) + be := backend.NewDefaultBackend(s.outDbPath()) + defer be.Close() + s.cl.SetBackend(be) + for _, m := range s.cl.Members() { + s.cl.AddMember(m, true) + } + + m := s.cl.MemberByName(s.name) + md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())} + metadata, merr := md.Marshal() + if merr != nil { + return nil, merr + } + w, walerr := wal.Create(s.lg, s.walDir, metadata) + if walerr != nil { + return nil, walerr + } + defer w.Close() + + peers := make([]raft.Peer, len(s.cl.MemberIDs())) + for i, id := range s.cl.MemberIDs() { + ctx, err := json.Marshal((*s.cl).Member(id)) + if err != nil { + return nil, err + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + + ents := make([]raftpb.Entry, len(peers)) + nodeIDs := make([]uint64, len(peers)) + for i, p := range peers { + nodeIDs[i] = p.ID + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeAddNode, + NodeID: p.ID, + Context: string(p.Context), + } + d, err := cc.Marshal() + if err != nil { + return nil, err + } + ents[i] = raftpb.Entry{ + Type: raftpb.EntryConfChange, + Term: 1, + Index: uint64(i + 1), + Data: d, // ok + } + } + + commit, term := uint64(len(ents)), uint64(1) + hardState := raftpb.HardState{ + Term: term, + Vote: peers[0].ID, + Commit: commit, + } + if err := w.Save(hardState, ents); err != nil { + return nil, err + } + + b, berr := st.Save() + if berr != nil { + return nil, berr + } + confState := raftpb.ConfState{ + Voters: nodeIDs, + } + raftSnap := raftpb.Snapshot{ + Data: b, + Metadata: raftpb.SnapshotMetadata{ + Index: commit, + Term: term, + ConfState: confState, + }, + } + sn := snap.New(s.lg, s.snapDir) + if err := sn.SaveSnap(raftSnap); err != nil { + return nil, err + } + snapshot := walpb.Snapshot{Index: commit, Term: term, ConfState: &confState} + return &hardState, w.SaveSnapshot(snapshot) +} + +func (s *v3Manager) updateCIndex(commit uint64, term uint64) error { + be := backend.NewDefaultBackend(s.outDbPath()) + defer be.Close() + + cindex.UpdateConsistentIndex(be.BatchTx(), commit, term, false) + return nil +} + +// ---------------------------------------- OVER ----------------------------------------v---------- + +type Status struct { + Hash uint32 `json:"hash"` // bolt.db哈希值 + Revision int64 `json:"revision"` // 修订版本 + TotalKey int `json:"totalKey"` // 总key数 + TotalSize int64 `json:"totalSize"` // 实际存储大小 +} + +// Status 返回blot.db信息 +func (s *v3Manager) Status(dbPath string) (ds Status, err error) { + if _, err = os.Stat(dbPath); err != nil { + return ds, err + } + + db, err := bolt.Open(dbPath, 0o400, &bolt.Options{ReadOnly: true}) + if err != nil { + return ds, err + } + defer db.Close() + + h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + // 只读事务 + // Bolt 将其key以字节排序的顺序存储在存储桶中.这使得对这些键的顺序迭代非常快.要遍历键我们将使用光标 + if err = db.View(func(tx *bolt.Tx) error { + // 首先检查快照文件的完整性 + var dbErrStrings []string + for dbErr := range tx.Check() { + dbErrStrings = append(dbErrStrings, dbErr.Error()) + } + if len(dbErrStrings) > 0 { + return fmt.Errorf("快照文件完整性检查失败.发现%d错误.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings)) + } + ds.TotalSize = tx.Size() + c := tx.Cursor() + for next, _ := c.First(); next != nil; next, _ = c.Next() { + b := tx.Bucket(next) + if b == nil { + return fmt.Errorf("无法获得桶的哈希值 %s", string(next)) + } + if _, err := h.Write(next); err != nil { + return fmt.Errorf("不能写入bucket %s : %v", string(next), err) + } + iskeyb := string(next) == "key" + if err := b.ForEach(func(k, v []byte) error { + if _, err := h.Write(k); err != nil { + return fmt.Errorf("cannot write to bucket %s", err.Error()) + } + if _, err := h.Write(v); err != nil { + return fmt.Errorf("cannot write to bucket %s", err.Error()) + } + if iskeyb { + rev := bytesToRev(k) + ds.Revision = rev.main + } + ds.TotalKey++ + return nil + }); err != nil { + return fmt.Errorf("不能写入bucket %s : %v", string(next), err) + } + } + return nil + }); err != nil { + return ds, err + } + + ds.Hash = h.Sum32() + return ds, nil +} diff --git a/etcdutl/snapshot/util.go b/etcdutl/snapshot/util.go index 2c1fae21fa1..8722c418077 100644 --- a/etcdutl/snapshot/util.go +++ b/etcdutl/snapshot/util.go @@ -19,8 +19,8 @@ import ( ) type revision struct { - main int64 - sub int64 + main int64 // 一个全局递增的主版本号,随put/txn/delete事务递增,一个事务内的key main版本号是一致的 + sub int64 // 一个事务内的子版本号,从0开始随事务内put/delete操作递增 } func bytesToRev(bytes []byte) revision { diff --git a/etcdutl/snapshot/v3_snapshot.go b/etcdutl/snapshot/v3_snapshot.go deleted file mode 100644 index 8958ba80da1..00000000000 --- a/etcdutl/snapshot/v3_snapshot.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snapshot - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "reflect" - "strings" - - "go.uber.org/zap" - - bolt "go.etcd.io/bbolt" - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/snapshot" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/etcd/server/v3/verify" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -// Manager defines snapshot methods. -type Manager interface { - // Save fetches snapshot from remote etcd server, saves data - // to target path and returns server version. If the context "ctx" is canceled or timed out, - // snapshot save stream will error out (e.g. context.Canceled, - // context.DeadlineExceeded). Make sure to specify only one endpoint - // in client configuration. Snapshot API must be requested to a - // selected node, and saved snapshot is the point-in-time state of - // the selected node. - Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) - - // Status returns the snapshot file information. - Status(dbPath string) (Status, error) - - // Restore restores a new etcd data directory from given snapshot - // file. It returns an error if specified data directory already - // exists, to prevent unintended data directory overwrites. - Restore(cfg RestoreConfig) error -} - -// NewV3 returns a new snapshot Manager for v3.x snapshot. -func NewV3(lg *zap.Logger) Manager { - return &v3Manager{lg: lg} -} - -type v3Manager struct { - lg *zap.Logger - - name string - srcDbPath string - walDir string - snapDir string - cl *membership.RaftCluster - - skipHashCheck bool -} - -// hasChecksum returns "true" if the file size "n" -// has appended sha256 hash digest. -func hasChecksum(n int64) bool { - // 512 is chosen because it's a minimum disk sector size - // smaller than (and multiplies to) OS page size in most systems - return (n % 512) == sha256.Size -} - -// Save fetches snapshot from remote etcd server and saves data to target path. -func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) { - return snapshot.SaveWithVersion(ctx, s.lg, cfg, dbPath) -} - -// Status is the snapshot file status. -type Status struct { - Hash uint32 `json:"hash"` - Revision int64 `json:"revision"` - TotalKey int `json:"totalKey"` - TotalSize int64 `json:"totalSize"` - // Version is equal to storageVersion of the snapshot - // Empty if server does not supports versioned snapshots ( 0 { - return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings)) - } - ds.TotalSize = tx.Size() - v := schema.ReadStorageVersionFromSnapshot(tx) - if v != nil { - ds.Version = v.String() - } - c := tx.Cursor() - for next, _ := c.First(); next != nil; next, _ = c.Next() { - b := tx.Bucket(next) - if b == nil { - return fmt.Errorf("cannot get hash of bucket %s", string(next)) - } - if _, err := h.Write(next); err != nil { - return fmt.Errorf("cannot write bucket %s : %v", string(next), err) - } - iskeyb := (string(next) == "key") - if err := b.ForEach(func(k, v []byte) error { - if _, err := h.Write(k); err != nil { - return fmt.Errorf("cannot write to bucket %s", err.Error()) - } - if _, err := h.Write(v); err != nil { - return fmt.Errorf("cannot write to bucket %s", err.Error()) - } - if iskeyb { - rev := bytesToRev(k) - ds.Revision = rev.main - } - ds.TotalKey++ - return nil - }); err != nil { - return fmt.Errorf("cannot write bucket %s : %v", string(next), err) - } - } - return nil - }); err != nil { - return ds, err - } - - ds.Hash = h.Sum32() - return ds, nil -} - -// RestoreConfig configures snapshot restore operation. -type RestoreConfig struct { - // SnapshotPath is the path of snapshot file to restore from. - SnapshotPath string - - // Name is the human-readable name of this member. - Name string - - // OutputDataDir is the target data directory to save restored data. - // OutputDataDir should not conflict with existing etcd data directory. - // If OutputDataDir already exists, it will return an error to prevent - // unintended data directory overwrites. - // If empty, defaults to "[Name].etcd" if not given. - OutputDataDir string - // OutputWALDir is the target WAL data directory. - // If empty, defaults to "[OutputDataDir]/member/wal" if not given. - OutputWALDir string - - // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster. - PeerURLs []string - - // InitialCluster is the initial cluster configuration for restore bootstrap. - InitialCluster string - // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap. - InitialClusterToken string - - // SkipHashCheck is "true" to ignore snapshot integrity hash value - // (required if copied from data directory). - SkipHashCheck bool -} - -// Restore restores a new etcd data directory from given snapshot file. -func (s *v3Manager) Restore(cfg RestoreConfig) error { - pURLs, err := types.NewURLs(cfg.PeerURLs) - if err != nil { - return err - } - var ics types.URLsMap - ics, err = types.NewURLsMap(cfg.InitialCluster) - if err != nil { - return err - } - - srv := config.ServerConfig{ - Logger: s.lg, - Name: cfg.Name, - PeerURLs: pURLs, - InitialPeerURLsMap: ics, - InitialClusterToken: cfg.InitialClusterToken, - } - if err = srv.VerifyBootstrap(); err != nil { - return err - } - - s.cl, err = membership.NewClusterFromURLsMap(s.lg, cfg.InitialClusterToken, ics) - if err != nil { - return err - } - - dataDir := cfg.OutputDataDir - if dataDir == "" { - dataDir = cfg.Name + ".etcd" - } - if fileutil.Exist(dataDir) && !fileutil.DirEmpty(dataDir) { - return fmt.Errorf("data-dir %q not empty or could not be read", dataDir) - } - - walDir := cfg.OutputWALDir - if walDir == "" { - walDir = filepath.Join(dataDir, "member", "wal") - } else if fileutil.Exist(walDir) { - return fmt.Errorf("wal-dir %q exists", walDir) - } - - s.name = cfg.Name - s.srcDbPath = cfg.SnapshotPath - s.walDir = walDir - s.snapDir = filepath.Join(dataDir, "member", "snap") - s.skipHashCheck = cfg.SkipHashCheck - - s.lg.Info( - "restoring snapshot", - zap.String("path", s.srcDbPath), - zap.String("wal-dir", s.walDir), - zap.String("data-dir", dataDir), - zap.String("snap-dir", s.snapDir), - ) - - if err = s.saveDB(); err != nil { - return err - } - hardstate, err := s.saveWALAndSnap() - if err != nil { - return err - } - - if err := s.updateCIndex(hardstate.Commit, hardstate.Term); err != nil { - return err - } - - s.lg.Info( - "restored snapshot", - zap.String("path", s.srcDbPath), - zap.String("wal-dir", s.walDir), - zap.String("data-dir", dataDir), - zap.String("snap-dir", s.snapDir), - ) - - return verify.VerifyIfEnabled(verify.Config{ - ExactIndex: true, - Logger: s.lg, - DataDir: dataDir, - }) -} - -func (s *v3Manager) outDbPath() string { - return filepath.Join(s.snapDir, "db") -} - -// saveDB copies the database snapshot to the snapshot directory -func (s *v3Manager) saveDB() error { - err := s.copyAndVerifyDB() - if err != nil { - return err - } - - be := backend.NewDefaultBackend(s.lg, s.outDbPath()) - defer be.Close() - - err = schema.NewMembershipBackend(s.lg, be).TrimMembershipFromBackend() - if err != nil { - return err - } - - return nil -} - -func (s *v3Manager) copyAndVerifyDB() error { - srcf, ferr := os.Open(s.srcDbPath) - if ferr != nil { - return ferr - } - defer srcf.Close() - - // get snapshot integrity hash - if _, err := srcf.Seek(-sha256.Size, io.SeekEnd); err != nil { - return err - } - sha := make([]byte, sha256.Size) - if _, err := srcf.Read(sha); err != nil { - return err - } - if _, err := srcf.Seek(0, io.SeekStart); err != nil { - return err - } - - if err := fileutil.CreateDirAll(s.lg, s.snapDir); err != nil { - return err - } - - outDbPath := s.outDbPath() - - db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0600) - if dberr != nil { - return dberr - } - dbClosed := false - defer func() { - if !dbClosed { - db.Close() - dbClosed = true - } - }() - if _, err := io.Copy(db, srcf); err != nil { - return err - } - - // truncate away integrity hash, if any. - off, serr := db.Seek(0, io.SeekEnd) - if serr != nil { - return serr - } - hasHash := hasChecksum(off) - if hasHash { - if err := db.Truncate(off - sha256.Size); err != nil { - return err - } - } - - if !hasHash && !s.skipHashCheck { - return fmt.Errorf("snapshot missing hash but --skip-hash-check=false") - } - - if hasHash && !s.skipHashCheck { - // check for match - if _, err := db.Seek(0, io.SeekStart); err != nil { - return err - } - h := sha256.New() - if _, err := io.Copy(h, db); err != nil { - return err - } - dbsha := h.Sum(nil) - if !reflect.DeepEqual(sha, dbsha) { - return fmt.Errorf("expected sha256 %v, got %v", sha, dbsha) - } - } - - // db hash is OK, can now modify DB so it can be part of a new cluster - db.Close() - return nil -} - -// saveWALAndSnap creates a WAL for the initial cluster -// -// TODO: This code ignores learners !!! -func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) { - if err := fileutil.CreateDirAll(s.lg, s.walDir); err != nil { - return nil, err - } - - // add members again to persist them to the store we create. - st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix) - s.cl.SetStore(st) - be := backend.NewDefaultBackend(s.lg, s.outDbPath()) - defer be.Close() - s.cl.SetBackend(schema.NewMembershipBackend(s.lg, be)) - for _, m := range s.cl.Members() { - s.cl.AddMember(m, true) - } - - m := s.cl.MemberByName(s.name) - md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())} - metadata, merr := md.Marshal() - if merr != nil { - return nil, merr - } - w, walerr := wal.Create(s.lg, s.walDir, metadata) - if walerr != nil { - return nil, walerr - } - defer w.Close() - - peers := make([]raft.Peer, len(s.cl.MemberIDs())) - for i, id := range s.cl.MemberIDs() { - ctx, err := json.Marshal((*s.cl).Member(id)) - if err != nil { - return nil, err - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - - ents := make([]raftpb.Entry, len(peers)) - nodeIDs := make([]uint64, len(peers)) - for i, p := range peers { - nodeIDs[i] = p.ID - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: p.ID, - Context: p.Context, - } - d, err := cc.Marshal() - if err != nil { - return nil, err - } - ents[i] = raftpb.Entry{ - Type: raftpb.EntryConfChange, - Term: 1, - Index: uint64(i + 1), - Data: d, - } - } - - commit, term := uint64(len(ents)), uint64(1) - hardState := raftpb.HardState{ - Term: term, - Vote: peers[0].ID, - Commit: commit, - } - if err := w.Save(hardState, ents); err != nil { - return nil, err - } - - b, berr := st.Save() - if berr != nil { - return nil, berr - } - confState := raftpb.ConfState{ - Voters: nodeIDs, - } - raftSnap := raftpb.Snapshot{ - Data: b, - Metadata: raftpb.SnapshotMetadata{ - Index: commit, - Term: term, - ConfState: confState, - }, - } - sn := snap.New(s.lg, s.snapDir) - if err := sn.SaveSnap(raftSnap); err != nil { - return nil, err - } - snapshot := walpb.Snapshot{Index: commit, Term: term, ConfState: &confState} - return &hardState, w.SaveSnapshot(snapshot) -} - -func (s *v3Manager) updateCIndex(commit uint64, term uint64) error { - be := backend.NewDefaultBackend(s.lg, s.outDbPath()) - defer be.Close() - - cindex.UpdateConsistentIndexForce(be.BatchTx(), commit, term) - return nil -} diff --git a/go.mod b/go.mod index f37399e5b32..317971ae6a4 100644 --- a/go.mod +++ b/go.mod @@ -1,104 +1,76 @@ -module go.etcd.io/etcd/v3 +module github.com/ls-2018/etcd_cn -go 1.19 +go 1.16 -replace ( - go.etcd.io/etcd/api/v3 => ./api - go.etcd.io/etcd/client/pkg/v3 => ./client/pkg - go.etcd.io/etcd/client/v2 => ./client/v2 - go.etcd.io/etcd/client/v3 => ./client/v3 - go.etcd.io/etcd/etcdctl/v3 => ./etcdctl - go.etcd.io/etcd/etcdutl/v3 => ./etcdutl - go.etcd.io/etcd/pkg/v3 => ./pkg - go.etcd.io/etcd/server/v3 => ./server - go.etcd.io/etcd/tests/v3 => ./tests -) +replace go.etcd.io/etcd/api/v3 v3.5.2 => github.com/etcd-io/etcd/api/v3 v3.5.2 -require ( - github.com/bgentry/speakeasy v0.1.0 - github.com/cheggaaa/pb/v3 v3.1.0 - github.com/coreos/go-semver v0.3.1 - github.com/dustin/go-humanize v1.0.1 - github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.1 - go.etcd.io/bbolt v1.3.7 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 - go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/server/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/tests/v3 v3.6.0-alpha.0 - go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a - go.uber.org/zap v1.24.0 - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/grpc v1.51.0 - google.golang.org/protobuf v1.28.1 -) +replace github.com/etcd-io/etcd/api/v3 v3.5.2 => ./offical/api/v3 + +replace go.etcd.io/etcd/client/v3 v3.5.2 => github.com/etcd-io/etcd/client/v3 v3.5.2 +replace github.com/etcd-io/client/api/v3 v3.5.2 => ./offical/client/v3 +replace github.com/ls-2018/etcd_cn/official => ./offical require ( - cloud.google.com/go v0.81.0 // indirect - github.com/VividCortex/ewma v1.1.1 // indirect - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.3 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/gorilla/websocket v1.4.2 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jonboulle/clockwork v0.3.0 // indirect + github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 // indirect + github.com/alexkohler/nakedret v1.0.0 + github.com/bgentry/speakeasy v0.1.0 + github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 + github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-systemd/v22 v22.3.2 + github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e + github.com/creack/pty v1.1.11 + github.com/dustin/go-humanize v1.0.0 + github.com/fatih/color v1.10.0 // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible + github.com/go-openapi/loads v0.19.5 // indirect + github.com/go-openapi/spec v0.19.9 // indirect + github.com/gogo/protobuf v1.3.2 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/protobuf v1.5.2 + github.com/google/btree v1.0.1 + github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 + github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa + github.com/jonboulle/clockwork v0.2.2 + github.com/json-iterator/go v1.1.11 + github.com/kr/pretty v0.3.0 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.12 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/soheilhy/cmux v0.1.5 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.11.2 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f + github.com/mgechev/revive v1.0.2 + github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735 + github.com/modern-go/reflect2 v1.0.1 + github.com/olekukonko/tablewriter v0.0.5 + github.com/prometheus/client_golang v1.11.0 + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/sirupsen/logrus v1.7.0 // indirect + github.com/soheilhy/cmux v0.1.5 + github.com/spf13/cobra v1.1.3 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.7.0 + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 + github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f // indirect + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 + go.etcd.io/bbolt v1.3.6 + go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 + go.opentelemetry.io/otel v0.20.0 + go.opentelemetry.io/otel/exporters/otlp v0.20.0 + go.opentelemetry.io/otel/sdk v0.20.0 + go.uber.org/multierr v1.6.0 + go.uber.org/zap v1.17.0 + golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/net v0.0.0-20211008194852-3b03d305991f + golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7 + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba + google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 + google.golang.org/grpc v1.38.0 + gopkg.in/cheggaaa/pb.v1 v1.0.28 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + honnef.co/go/tools v0.0.1-2019.2.3 + mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 + sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index 620de23812b..b7439902a78 100644 --- a/go.sum +++ b/go.sum @@ -4,152 +4,188 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 h1:bYOD6QJnBJY79MJQR1i9cyQePG5oNDZXDKL2bhN/uvE= +github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19/go.mod h1:HcqyLXmWoESd/vPSbCPqvgw5l5cMM5PtoqFOnXLjSeM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret v1.0.0 h1:S/bzOFhZHYUJp6qPmdXdFHS5nlWGFmLmoc8QOydvotE= +github.com/alexkohler/nakedret v1.0.0/go.mod h1:tfDQbtPt67HhBK/6P0yNktIX7peCxfOp0jO9007DrLE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04= -github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 h1:0wUHjDfbCAROEAZ96zAJGwcNMkPIheFaIjtQyv3QqfM= +github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03/go.mod h1:uFE9hX+zXEwvyUThZ4gDb9vkAwc5DoHUnRSEpH0VrOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e h1:vHRufSa2k8tfkcDdia1vJFa+oiBvvPxW94mg76PPAoA= +github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e/go.mod h1:4xMOusJ7xxc84WclVxKT8+lNfGYDwojOUC2OQNCwcj4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3 h1:7MGZI1ibQDLasvAz8HuhvYk9eNJbJkCOXWsSjjMS+Zc= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.9 h1:9z9cbFuZJ7AcvOHKIY+f6Aevb4vObNDkTEyoMfO7rAc= +github.com/go-openapi/spec v0.19.9/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.4 h1:eRvaqAhpL0IL6Trh5fDsGnGhiXndzHFuA05w6sXH6/g= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7 h1:VRuXN2EnMSsZdauzdss6JBC29YotDqG59BZ+tdlIL1s= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/goccy/go-yaml v1.8.1 h1:JuZRFlqLM5cWF6A+waL8AKVuCcqvKOuhJtUQI+L3ez0= +github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -159,235 +195,328 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78 h1:U/zHjaVG/sECz5xhnh7kPH+Fv/maPbhZPcaTquo5sPg= +github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 h1:BGeD3v3lyKZy+ocGtprXiDXjIiXvZDfuyII7Lym7GbQ= +github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535/go.mod h1:xV7b0Cn2irnP1jU+mMYvqPAPuFPNjtgB+rvKu/dLIz4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= -github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa h1:oDcxzjIf33MTX7b8Eu7eO3a/z8mlTT+blyEoVxBmUUg= +github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa/go.mod h1:wSgrm+n3LvHOVxUJo2ha5ffLqRmt6+oGoD6J/suB66c= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f h1:Kc3s6QFyh9DLgInXpWKuG+8I7R7lXbnP7mcoOVIt6KY= +github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f/go.mod h1:AmCV4WB3cDMZqgPk+OUQKumliiQS4ZYsBt3AXekyuAU= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= +github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735 h1:Qn41fatPrqv5qVpDFx+4ABF14LNj9jiNLm/BsrDb01U= +github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f h1:92ZQJRegaqnKjz9HY9an696Sw5EmAqRv0eie/U2IE6k= +github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f/go.mod h1:wxUiQ1klFJmwnM41kQI7IT2g8jjOKbtuL54LdjkxAI0= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 h1:QQiUXlqz+d96jyNG71NE+IGTgOK6Xlhdx+PzvfbLHlQ= +go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116/go.mod h1:F9kog+iVAuvPJucb1dkYcDcbV0g4uyGEHllTP5NrXiw= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0 h1:ew6uUIeJOo+qdUUv7LxFCUhtWmVv7ZV/Xuy4FAUsw2E= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -397,133 +526,91 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f h1:1scJEYZBaF48BaG6tYbtxmLcXqwYGSfGcMoStTqkkIw= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -531,110 +618,69 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7 h1:8IVLkfbr2cLhv0a/vKq4UFUcJym8RmDoDboxCFWEjYE= +golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -644,63 +690,24 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -709,43 +716,50 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY= +mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/benchmark/README.md b/hack/benchmark/README.md index 6a09c530299..4ece14ca1da 100644 --- a/hack/benchmark/README.md +++ b/hack/benchmark/README.md @@ -10,5 +10,7 @@ Benchmark 3-member etcd cluster to get its read and write performance. ## Caveat -1. Set environment variable `GOMAXPROCS` as the number of available cores to maximize CPU resources for both etcd member and bench process. -2. Set the number of open files per process as 10000 for amounts of client connections for both etcd member and benchmark process. +1. Set environment variable `GOMAXPROCS` as the number of available cores to maximize CPU resources for both etcd member + and bench process. +2. Set the number of open files per process as 10000 for amounts of client connections for both etcd member and + benchmark process. diff --git a/hack/kubernetes-deploy/README.md b/hack/kubernetes-deploy/README.md index bd34115d719..22df221d4aa 100644 --- a/hack/kubernetes-deploy/README.md +++ b/hack/kubernetes-deploy/README.md @@ -18,4 +18,4 @@ $ kubectl create -f vulcand.yml TODO: - create a replication controller like service that knows how to add and remove nodes from the cluster correctly -- use kubernetes secrets API to configure TLS for etcd clients and peers +- use kubernetes secrets API to configureAndSendRaft TLS for etcd clients and peers diff --git a/hack/kubernetes-deploy/etcd.yml b/hack/kubernetes-deploy/etcd.yml index 84bf6be95ad..416f5729117 100644 --- a/hack/kubernetes-deploy/etcd.yml +++ b/hack/kubernetes-deploy/etcd.yml @@ -4,10 +4,10 @@ metadata: name: etcd-client spec: ports: - - name: etcd-client-port - port: 2379 - protocol: TCP - targetPort: 2379 + - name: etcd-client-port + port: 2379 + protocol: TCP + targetPort: 2379 selector: app: etcd @@ -22,31 +22,31 @@ metadata: name: etcd0 spec: containers: - - command: - - /usr/local/bin/etcd - - --name - - etcd0 - - --initial-advertise-peer-urls - - http://etcd0:2380 - - --listen-peer-urls - - http://0.0.0.0:2380 - - --listen-client-urls - - http://0.0.0.0:2379 - - --advertise-client-urls - - http://etcd0:2379 - - --initial-cluster - - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 - - --initial-cluster-state - - new - image: quay.io/coreos/etcd:latest - name: etcd0 - ports: - - containerPort: 2379 - name: client - protocol: TCP - - containerPort: 2380 - name: server - protocol: TCP + - command: + - /usr/local/bin/etcd + - --name + - etcd0 + - --initial-advertise-peer-urls + - http://etcd0:2380 + - --listen-peer-urls + - http://0.0.0.0:2380 + - --listen-client-urls + - http://0.0.0.0:2379 + - --advertise-client-urls + - http://etcd0:2379 + - --initial-cluster + - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - --initial-cluster-state + - new + image: quay.io/coreos/etcd:latest + name: etcd0 + ports: + - containerPort: 2379 + name: client + protocol: TCP + - containerPort: 2380 + name: etcd + protocol: TCP restartPolicy: Always --- @@ -59,14 +59,14 @@ metadata: name: etcd0 spec: ports: - - name: client - port: 2379 - protocol: TCP - targetPort: 2379 - - name: server - port: 2380 - protocol: TCP - targetPort: 2380 + - name: client + port: 2379 + protocol: TCP + targetPort: 2379 + - name: etcd + port: 2380 + protocol: TCP + targetPort: 2380 selector: etcd_node: etcd0 @@ -81,31 +81,31 @@ metadata: name: etcd1 spec: containers: - - command: - - /usr/local/bin/etcd - - --name - - etcd1 - - --initial-advertise-peer-urls - - http://etcd1:2380 - - --listen-peer-urls - - http://0.0.0.0:2380 - - --listen-client-urls - - http://0.0.0.0:2379 - - --advertise-client-urls - - http://etcd1:2379 - - --initial-cluster - - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 - - --initial-cluster-state - - new - image: quay.io/coreos/etcd:latest - name: etcd1 - ports: - - containerPort: 2379 - name: client - protocol: TCP - - containerPort: 2380 - name: server - protocol: TCP + - command: + - /usr/local/bin/etcd + - --name + - etcd1 + - --initial-advertise-peer-urls + - http://etcd1:2380 + - --listen-peer-urls + - http://0.0.0.0:2380 + - --listen-client-urls + - http://0.0.0.0:2379 + - --advertise-client-urls + - http://etcd1:2379 + - --initial-cluster + - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - --initial-cluster-state + - new + image: quay.io/coreos/etcd:latest + name: etcd1 + ports: + - containerPort: 2379 + name: client + protocol: TCP + - containerPort: 2380 + name: etcd + protocol: TCP restartPolicy: Always --- @@ -118,14 +118,14 @@ metadata: name: etcd1 spec: ports: - - name: client - port: 2379 - protocol: TCP - targetPort: 2379 - - name: server - port: 2380 - protocol: TCP - targetPort: 2380 + - name: client + port: 2379 + protocol: TCP + targetPort: 2379 + - name: etcd + port: 2380 + protocol: TCP + targetPort: 2380 selector: etcd_node: etcd1 @@ -140,31 +140,31 @@ metadata: name: etcd2 spec: containers: - - command: - - /usr/local/bin/etcd - - --name - - etcd2 - - --initial-advertise-peer-urls - - http://etcd2:2380 - - --listen-peer-urls - - http://0.0.0.0:2380 - - --listen-client-urls - - http://0.0.0.0:2379 - - --advertise-client-urls - - http://etcd2:2379 - - --initial-cluster - - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 - - --initial-cluster-state - - new - image: quay.io/coreos/etcd:latest - name: etcd2 - ports: - - containerPort: 2379 - name: client - protocol: TCP - - containerPort: 2380 - name: server - protocol: TCP + - command: + - /usr/local/bin/etcd + - --name + - etcd2 + - --initial-advertise-peer-urls + - http://etcd2:2380 + - --listen-peer-urls + - http://0.0.0.0:2380 + - --listen-client-urls + - http://0.0.0.0:2379 + - --advertise-client-urls + - http://etcd2:2379 + - --initial-cluster + - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - --initial-cluster-state + - new + image: quay.io/coreos/etcd:latest + name: etcd2 + ports: + - containerPort: 2379 + name: client + protocol: TCP + - containerPort: 2380 + name: etcd + protocol: TCP restartPolicy: Always --- @@ -177,13 +177,13 @@ metadata: name: etcd2 spec: ports: - - name: client - port: 2379 - protocol: TCP - targetPort: 2379 - - name: server - port: 2380 - protocol: TCP - targetPort: 2380 + - name: client + port: 2379 + protocol: TCP + targetPort: 2379 + - name: etcd + port: 2380 + protocol: TCP + targetPort: 2380 selector: etcd_node: etcd2 diff --git a/hack/kubernetes-deploy/vulcand.yml b/hack/kubernetes-deploy/vulcand.yml index bb61eec461b..fd45f41a19c 100644 --- a/hack/kubernetes-deploy/vulcand.yml +++ b/hack/kubernetes-deploy/vulcand.yml @@ -6,17 +6,17 @@ metadata: name: vulcand spec: containers: - - command: - - /go/bin/vulcand - - -apiInterface=0.0.0.0 - - --etcd=http://etcd-client:2379 - image: mailgun/vulcand:v0.8.0-beta.2 - name: vulcand - ports: - - containerPort: 8081 - name: api - protocol: TCP - - containerPort: 8082 - name: server - protocol: TCP + - command: + - /go/bin/vulcand + - -apiInterface=0.0.0.0 + - --etcd=http://etcd-client:2379 + image: mailgun/vulcand:v0.8.0-beta.2 + name: vulcand + ports: + - containerPort: 8081 + name: api + protocol: TCP + - containerPort: 8082 + name: etcd + protocol: TCP restartPolicy: Always diff --git a/hack/patch/README.md b/hack/patch/README.md deleted file mode 100644 index 32323f17996..00000000000 --- a/hack/patch/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# ./hack/patch/cherrypick.sh - -Handles cherry-picks of PR(s) from etcd main to a stable etcd release branch automatically. - -## Setup - -Set the `UPSTREAM_REMOTE` and `FORK_REMOTE` environment variables. -`UPSTREAM_REMOTE` should be set to git remote name of `github.com/etcd-io/etcd`, -and `FORK_REMOTE` should be set to the git remote name of the forked etcd -repo (`github.com/${github-username}/etcd`). Use `git remote -v` to -look up the git remote names. If etcd has not been forked, create -one on github.com and register it locally with `git remote add ...`. - - -``` -export UPSTREAM_REMOTE=upstream -export FORK_REMOTE=origin -export GITHUB_USER=${github-username} -``` - -Next, install hub from https://github.com/github/hub - -## Usage - -To cherry pick PR 12345 onto release-3.2 and propose is as a PR, run: - -```sh -./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345 -``` - -To cherry pick 12345 then 56789 and propose them togther as a single PR, run: - -``` -./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345 56789 -``` - - diff --git a/hack/patch/cherrypick.sh b/hack/patch/cherrypick.sh deleted file mode 100755 index ad143514f55..00000000000 --- a/hack/patch/cherrypick.sh +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env bash - -# Based on github.com/kubernetes/kubernetes/blob/v1.8.2/hack/cherry_pick_pull.sh - -# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How -# meta.) Assumes you care about pulls from remote "upstream" and -# checks thems out to a branch named: -# automated-cherry-pick-of--- - -set -o errexit -set -o nounset -set -o pipefail - -declare -r ETCD_ROOT="$(dirname "${BASH_SOURCE}")/../.." -cd "${ETCD_ROOT}" - -declare -r STARTINGBRANCH=$(git symbolic-ref --short HEAD) -declare -r REBASEMAGIC="${ETCD_ROOT}/.git/rebase-apply" -DRY_RUN=${DRY_RUN:-""} -REGENERATE_DOCS=${REGENERATE_DOCS:-""} -UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream} -FORK_REMOTE=${FORK_REMOTE:-origin} - -if [[ -z ${GITHUB_USER:-} ]]; then - echo "Please export GITHUB_USER= (or GH organization, if that's where your fork lives)" - exit 1 -fi - -if ! which hub > /dev/null; then - echo "Can't find 'hub' tool in PATH, please install from https://github.com/github/hub" - exit 1 -fi - -if [[ "$#" -lt 2 ]]; then - echo "${0} ...: cherry pick one or more onto and leave instructions for proposing pull request" - echo - echo " Checks out and handles the cherry-pick of (possibly multiple) for you." - echo " Examples:" - echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR." - echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR." - echo - echo " Set the DRY_RUN environment var to skip git push and creating PR." - echo " This is useful for creating patches to a release branch without making a PR." - echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked." - echo - echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits." - echo " This is useful when picking commits containing changes to API documentation." - echo - echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)" - echo " To override the default remote names to what you have locally." - exit 2 -fi - -if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then - echo "!!! Dirty tree. Clean up and try again." - exit 1 -fi - -if [[ -e "${REBASEMAGIC}" ]]; then - echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again." - exit 1 -fi - -declare -r BRANCH="$1" -shift 1 -declare -r PULLS=( "$@" ) - -function join { local IFS="$1"; shift; echo "$*"; } -declare -r PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789" -declare -r PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789" - -echo "+++ Updating remotes..." -git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}" - -if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then - echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21." - echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)" - exit 1 -fi - -declare -r NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools. -declare -r NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')" -declare -r NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)" -echo "+++ Creating local branch ${NEWBRANCHUNIQ}" - -cleanbranch="" -prtext="" -gitamcleanup=false -function return_to_kansas { - if [[ "${gitamcleanup}" == "true" ]]; then - echo - echo "+++ Aborting in-progress git am." - git am --abort >/dev/null 2>&1 || true - fi - - # return to the starting branch and delete the PR text file - if [[ -z "${DRY_RUN}" ]]; then - echo - echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up." - git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true - if [[ -n "${cleanbranch}" ]]; then - git branch -D "${cleanbranch}" >/dev/null 2>&1 || true - fi - if [[ -n "${prtext}" ]]; then - rm "${prtext}" - fi - fi -} -trap return_to_kansas EXIT - -SUBJECTS=() -function make-a-pr() { - local rel="$(basename "${BRANCH}")" - echo - echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}" - - # This looks like an unnecessary use of a tmpfile, but it avoids - # https://github.com/github/hub/issues/976 Otherwise stdin is stolen - # when we shove the heredoc at hub directly, tickling the ioctl - # crash. - prtext="$(mktemp -t prtext.XXXX)" # cleaned in return_to_kansas - cat >"${prtext}" <&2 - exit 1 - fi - done - - if [[ "${conflicts}" != "true" ]]; then - echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'" - exit 1 - fi - } - - # set the subject - subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //') - SUBJECTS+=("#${pull}: ${subject}") - - # remove the patch file from /tmp - rm -f "/tmp/${pull}.patch" -done -gitamcleanup=false - -# Re-generate docs (if needed) -if [[ -n "${REGENERATE_DOCS}" ]]; then - echo - echo "Regenerating docs..." - if ! hack/generate-docs.sh; then - echo - echo "hack/generate-docs.sh FAILED to complete." - exit 1 - fi -fi - -if [[ -n "${DRY_RUN}" ]]; then - echo "!!! Skipping git push and PR creation because you set DRY_RUN." - echo "To return to the branch you were in when you invoked this script:" - echo - echo " git checkout ${STARTINGBRANCH}" - echo - echo "To delete this branch:" - echo - echo " git branch -D ${NEWBRANCHUNIQ}" - exit 0 -fi - -if git remote -v | grep ^${FORK_REMOTE} | grep etcd/etcd.git; then - echo "!!! You have ${FORK_REMOTE} configured as your etcd/etcd.git" - echo "This isn't normal. Leaving you with push instructions:" - echo - echo "+++ First manually push the branch this script created:" - echo - echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}" - echo - echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)." - echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values." - echo - make-a-pr - cleanbranch="" - exit 0 -fi - -echo -echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):" -echo -echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}" -echo -read -p "+++ Proceed (anything but 'y' aborts the cherry-pick)? [y/n] " -r -if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then - echo "Aborting." >&2 - exit 1 -fi - -git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}" -make-a-pr diff --git a/hack/tls-setup/Makefile b/hack/tls-setup/Makefile index a1d48ac9bf3..6c9551cd84b 100644 --- a/hack/tls-setup/Makefile +++ b/hack/tls-setup/Makefile @@ -1,14 +1,14 @@ .PHONY: cfssl ca req clean -CFSSL = @env PATH=$(GOPATH)/bin:$(PATH) cfssl -JSON = env PATH=$(GOPATH)/bin:$(PATH) cfssljson +CFSSL = cfssl +JSON = cfssljson all: ca req cfssl: - HTTPS_PROXY=127.0.0.1:12639 go get -u -tags nopkcs11 github.com/cloudflare/cfssl/cmd/cfssl - HTTPS_PROXY=127.0.0.1:12639 go get -u github.com/cloudflare/cfssl/cmd/cfssljson - HTTPS_PROXY=127.0.0.1:12639 go get -u github.com/mattn/goreman + go get github.com/cloudflare/cfssl/cmd/cfssl + go get github.com/cloudflare/cfssl/cmd/cfssljson + go get github.com/mattn/goreman ca: mkdir -p certs diff --git a/hack/tls-setup/Procfile b/hack/tls-setup/Procfile index f3532ca8bad..7fca9fc3964 100644 --- a/hack/tls-setup/Procfile +++ b/hack/tls-setup/Procfile @@ -1,9 +1,9 @@ # Use goreman to run `go get github.com/mattn/goreman` -etcd1: ../../bin/etcd --name infra1 --listen-client-urls https://localhost:2379 --advertise-client-urls https://localhost:2379 --listen-peer-urls https://localhost:2380 --initial-advertise-peer-urls https://localhost:2380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd1.pem --key-file=certs/etcd1-key.pem --peer-cert-file=certs/etcd1.pem --peer-key-file=certs/etcd1-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem +etcd1: etcd_backend --name infra1 --listen-client-urls https://localhost:2379 --advertise-client-urls https://localhost:2379 --listen-peer-urls https://localhost:2380 --initial-advertise-peer-urls https://localhost:2380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd1.pem --key-file=certs/etcd1-key.pem --peer-cert-file=certs/etcd1.pem --peer-key-file=certs/etcd1-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem -etcd2: ../../bin/etcd --name infra2 --listen-client-urls https://localhost:12379 --advertise-client-urls https://localhost:12379 --listen-peer-urls https://localhost:12380 --initial-advertise-peer-urls https://localhost:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd2.pem --key-file=certs/etcd2-key.pem --peer-cert-file=certs/etcd2.pem --peer-key-file=certs/etcd2-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem +etcd2: etcd_backend --name infra2 --listen-client-urls https://localhost:12379 --advertise-client-urls https://localhost:12379 --listen-peer-urls https://localhost:12380 --initial-advertise-peer-urls https://localhost:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd2.pem --key-file=certs/etcd2-key.pem --peer-cert-file=certs/etcd2.pem --peer-key-file=certs/etcd2-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem -etcd3: ../../bin/etcd --name infra3 --listen-client-urls https://localhost:22379 --advertise-client-urls https://localhost:22379 --listen-peer-urls https://localhost:22380 --initial-advertise-peer-urls https://localhost:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd3.pem --key-file=certs/etcd3-key.pem --peer-cert-file=certs/etcd3.pem --peer-key-file=certs/etcd3-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem +etcd3: etcd_backend --name infra3 --listen-client-urls https://localhost:22379 --advertise-client-urls https://localhost:22379 --listen-peer-urls https://localhost:22380 --initial-advertise-peer-urls https://localhost:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --initial-cluster-state new --cert-file=certs/etcd3.pem --key-file=certs/etcd3-key.pem --peer-cert-file=certs/etcd3.pem --peer-key-file=certs/etcd3-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem -proxy: ../../bin/etcd --name proxy1 --proxy=on --listen-client-urls https://localhost:8080 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --cert-file=certs/proxy1.pem --key-file=certs/proxy1-key.pem --trusted-ca-file=certs/ca.pem --peer-cert-file=certs/proxy1.pem --peer-key-file=certs/proxy1-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem +proxy: etcd_backend --name proxy1 --proxy=on --listen-client-urls https://localhost:8080 --initial-cluster 'infra1=https://localhost:2380,infra2=https://localhost:12380,infra3=https://localhost:22380' --cert-file=certs/proxy1.pem --key-file=certs/proxy1-key.pem --trusted-ca-file=certs/ca.pem --peer-cert-file=certs/proxy1.pem --peer-key-file=certs/proxy1-key.pem --peer-client-cert-auth --peer-trusted-ca-file=certs/ca.pem diff --git a/hack/tls-setup/README.md b/hack/tls-setup/README.md index 7ff5233eaca..3db37f0b54e 100644 --- a/hack/tls-setup/README.md +++ b/hack/tls-setup/README.md @@ -1,11 +1,16 @@ -This demonstrates using Cloudflare's [cfssl](https://github.com/cloudflare/cfssl) to easily generate certificates for an etcd cluster. +This demonstrates using Cloudflare's [cfssl](https://github.com/cloudflare/cfssl) to easily generate certificates for an +etcd cluster. -Defaults generate an ECDSA-384 root and leaf certificates for `localhost`. etcd nodes will use the same certificates for both sides of mutual authentication, but won't require client certs for non-peer clients. +Defaults generate an ECDSA-384 root and leaf certificates for `localhost`. etcd nodes will use the same certificates for +both sides of mutual authentication, but won't require client certs for non-peer clients. **Instructions** 1. Install git, go, and make -2. Amend https://github.com/etcd-io/etcd/blob/main/hack/tls-setup/config/req-csr.json - IP's currently in the config should be replaced/added with IP addresses of each cluster node, please note 127.0.0.1 is always required for loopback purposes: +2. Amend https://github.com/etcd-io/etcd/blob/main/hack/tls-setup/config/req-csr.json - IP's currently in the config + should backend replaced/added with IP addresses of each cluster node, please note 127.0.0.1 is always required for + loopback purposes: + ```json Example: { @@ -28,10 +33,13 @@ Example: ] } ``` + 3. Set the following environment variables subsituting your IP address: + ```bash -export infra0={IP-0} -export infra1={IP-1} -export infra2={IP-2} +export infra0=etcd1 +export infra1=etcd2 +export infra2=etcd3 ``` + 4. Run `make` to generate the certs diff --git a/hack/tls-setup/config/ca-config.json b/hack/tls-setup/config/ca-config.json index edd0c078e9f..58cdebe1d6e 100644 --- a/hack/tls-setup/config/ca-config.json +++ b/hack/tls-setup/config/ca-config.json @@ -1,13 +1,13 @@ { "signing": { "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "876000h" + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "876000h" } } } diff --git a/images/42eaa94b0f4f7a18895780e6f61ce381.webp b/images/42eaa94b0f4f7a18895780e6f61ce381.webp new file mode 100644 index 00000000000..e9507204855 Binary files /dev/null and b/images/42eaa94b0f4f7a18895780e6f61ce381.webp differ diff --git a/images/7e05c744ba292cf26c39d69101200554.webp b/images/7e05c744ba292cf26c39d69101200554.webp new file mode 100644 index 00000000000..b709ee10d42 Binary files /dev/null and b/images/7e05c744ba292cf26c39d69101200554.webp differ diff --git a/images/MsgReadIndex.png b/images/MsgReadIndex.png new file mode 100644 index 00000000000..3519998d9d1 Binary files /dev/null and b/images/MsgReadIndex.png differ diff --git a/images/cde3f155f51bfd3d7fd78fe8e7ac9bf0.webp b/images/cde3f155f51bfd3d7fd78fe8e7ac9bf0.webp new file mode 100644 index 00000000000..6430db326ba Binary files /dev/null and b/images/cde3f155f51bfd3d7fd78fe8e7ac9bf0.webp differ diff --git a/images/compact.webp b/images/compact.webp new file mode 100644 index 00000000000..b18d105253a Binary files /dev/null and b/images/compact.webp differ diff --git a/images/delay.webp b/images/delay.webp new file mode 100644 index 00000000000..5b487d2878d Binary files /dev/null and b/images/delay.webp differ diff --git a/images/design.jpeg b/images/design.jpeg new file mode 100644 index 00000000000..d9d3b04c10a Binary files /dev/null and b/images/design.jpeg differ diff --git a/images/disk_metrics.webp b/images/disk_metrics.webp new file mode 100644 index 00000000000..2b5c2710124 Binary files /dev/null and b/images/disk_metrics.webp differ diff --git a/images/edf53f37c0725c9757e4ecb89982a7ea.webp b/images/edf53f37c0725c9757e4ecb89982a7ea.webp new file mode 100644 index 00000000000..3ffd5c8fd36 Binary files /dev/null and b/images/edf53f37c0725c9757e4ecb89982a7ea.webp differ diff --git a/images/etcd-raft-wal.jpg b/images/etcd-raft-wal.jpg new file mode 100644 index 00000000000..7036d4a5e67 Binary files /dev/null and b/images/etcd-raft-wal.jpg differ diff --git a/images/etcdserver_metrics.webp b/images/etcdserver_metrics.webp new file mode 100644 index 00000000000..2818d1ac427 Binary files /dev/null and b/images/etcdserver_metrics.webp differ diff --git a/images/feab756999bf4941a7bae1541895a494.png b/images/feab756999bf4941a7bae1541895a494.png new file mode 100644 index 00000000000..97dc5b1b7fa Binary files /dev/null and b/images/feab756999bf4941a7bae1541895a494.png differ diff --git a/images/issue.webp b/images/issue.webp new file mode 100644 index 00000000000..f4de08d7170 Binary files /dev/null and b/images/issue.webp differ diff --git a/images/mem.webp b/images/mem.webp new file mode 100644 index 00000000000..a0db34fbc07 Binary files /dev/null and b/images/mem.webp differ diff --git a/images/messageType.png b/images/messageType.png new file mode 100644 index 00000000000..4ba95ca92b7 Binary files /dev/null and b/images/messageType.png differ diff --git a/images/mvcc_metrics.webp b/images/mvcc_metrics.webp new file mode 100644 index 00000000000..4fd1ef7c5a6 Binary files /dev/null and b/images/mvcc_metrics.webp differ diff --git a/images/net_metrics.webp b/images/net_metrics.webp new file mode 100644 index 00000000000..758b808e2da Binary files /dev/null and b/images/net_metrics.webp differ diff --git a/images/opter.webp b/images/opter.webp new file mode 100644 index 00000000000..a52b265b448 Binary files /dev/null and b/images/opter.webp differ diff --git a/images/optimize.png b/images/optimize.png new file mode 100644 index 00000000000..dcde6269bb7 Binary files /dev/null and b/images/optimize.png differ diff --git a/images/p.webp b/images/p.webp new file mode 100644 index 00000000000..dddd50c0e87 Binary files /dev/null and b/images/p.webp differ diff --git a/images/process.jpeg b/images/process.jpeg new file mode 100644 index 00000000000..138b43aa695 Binary files /dev/null and b/images/process.jpeg differ diff --git a/images/process2.jpeg b/images/process2.jpeg new file mode 100644 index 00000000000..4aaa129307e Binary files /dev/null and b/images/process2.jpeg differ diff --git a/images/r-design.jpeg b/images/r-design.jpeg new file mode 100644 index 00000000000..fb9068605c6 Binary files /dev/null and b/images/r-design.jpeg differ diff --git a/images/raft-leader.png b/images/raft-leader.png new file mode 100644 index 00000000000..bf57445a310 Binary files /dev/null and b/images/raft-leader.png differ diff --git a/images/raft.monopic b/images/raft.monopic new file mode 100644 index 00000000000..21ed6dee7a2 Binary files /dev/null and b/images/raft.monopic differ diff --git a/images/raft.png b/images/raft.png new file mode 100644 index 00000000000..dc443d0da87 Binary files /dev/null and b/images/raft.png differ diff --git a/images/raft.webp b/images/raft.webp new file mode 100644 index 00000000000..ccf9bc2d0f1 Binary files /dev/null and b/images/raft.webp differ diff --git a/images/raftLog.png b/images/raftLog.png new file mode 100644 index 00000000000..5132072abef Binary files /dev/null and b/images/raftLog.png differ diff --git a/images/unstable_index.png b/images/unstable_index.png new file mode 100644 index 00000000000..136520f36c7 Binary files /dev/null and b/images/unstable_index.png differ diff --git a/images/v2watch.png b/images/v2watch.png new file mode 100644 index 00000000000..5cb478d09ff Binary files /dev/null and b/images/v2watch.png differ diff --git a/images/v3watch.png b/images/v3watch.png new file mode 100644 index 00000000000..99363b771b1 Binary files /dev/null and b/images/v3watch.png differ diff --git a/images/x.webp b/images/x.webp new file mode 100644 index 00000000000..cf72c473ca1 Binary files /dev/null and b/images/x.webp differ diff --git a/images/you.webp b/images/you.webp new file mode 100644 index 00000000000..354431c9d41 Binary files /dev/null and b/images/you.webp differ diff --git a/logos/etcd-glyph-color.png b/logos/etcd-glyph-color.png deleted file mode 100644 index 8bf92948b63..00000000000 Binary files a/logos/etcd-glyph-color.png and /dev/null differ diff --git a/logos/etcd-glyph-color.svg b/logos/etcd-glyph-color.svg deleted file mode 100644 index 5884e92733b..00000000000 --- a/logos/etcd-glyph-color.svg +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - diff --git a/logos/etcd-horizontal-bw.png b/logos/etcd-horizontal-bw.png deleted file mode 100644 index 044d8cf7315..00000000000 Binary files a/logos/etcd-horizontal-bw.png and /dev/null differ diff --git a/logos/etcd-horizontal-bw.svg b/logos/etcd-horizontal-bw.svg deleted file mode 100644 index 1403d90c918..00000000000 --- a/logos/etcd-horizontal-bw.svg +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/logos/etcd-horizontal-color.png b/logos/etcd-horizontal-color.png deleted file mode 100644 index 2638ff37b18..00000000000 Binary files a/logos/etcd-horizontal-color.png and /dev/null differ diff --git a/logos/etcd-horizontal-color.svg b/logos/etcd-horizontal-color.svg deleted file mode 100644 index c6b056575bd..00000000000 --- a/logos/etcd-horizontal-color.svg +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/logos/etcd-offset-bw.png b/logos/etcd-offset-bw.png deleted file mode 100644 index f5cc3944ba5..00000000000 Binary files a/logos/etcd-offset-bw.png and /dev/null differ diff --git a/logos/etcd-offset-bw.svg b/logos/etcd-offset-bw.svg deleted file mode 100644 index 2dbdb416568..00000000000 --- a/logos/etcd-offset-bw.svg +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/logos/etcd-offset-color.png b/logos/etcd-offset-color.png deleted file mode 100644 index a7f904283d9..00000000000 Binary files a/logos/etcd-offset-color.png and /dev/null differ diff --git a/logos/etcd-offset-color.svg b/logos/etcd-offset-color.svg deleted file mode 100644 index 722cd3e09bc..00000000000 --- a/logos/etcd-offset-color.svg +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/logos/etcd-stacked-bw.png b/logos/etcd-stacked-bw.png deleted file mode 100644 index ffcb379c004..00000000000 Binary files a/logos/etcd-stacked-bw.png and /dev/null differ diff --git a/logos/etcd-stacked-bw.svg b/logos/etcd-stacked-bw.svg deleted file mode 100644 index 7338327e0e4..00000000000 --- a/logos/etcd-stacked-bw.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/logos/etcd-stacked-color.png b/logos/etcd-stacked-color.png deleted file mode 100644 index fa342b58600..00000000000 Binary files a/logos/etcd-stacked-color.png and /dev/null differ diff --git a/logos/etcd-stacked-color.svg b/logos/etcd-stacked-color.svg deleted file mode 100644 index c73fd37f19e..00000000000 --- a/logos/etcd-stacked-color.svg +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/main.py b/main.py new file mode 100644 index 00000000000..b078e0dd408 --- /dev/null +++ b/main.py @@ -0,0 +1,44 @@ +import os + + +def get_files(path): + for item in os.listdir(path): + dir_file = os.path.join(path, item) + if os.path.isfile(dir_file): + yield dir_file + else: + for xx in get_files(dir_file): + yield xx + + +main_set = set() +for file in get_files('.'): + if file.endswith('py'): + continue + try: + flag = False + # data = '' + # with open(file,'r', encoding='utf8')as f: + # data = f.read() + # if 'clientv3' in data and 'clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"' not in data: + # data = data.replace('import (', 'import (\n clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"') + # flag = True + # + # # print(file) + # if flag: + # with open(file, 'w', encoding='utf8', )as f: + # f.write(data) + with open(file, 'r', encoding='utf8') as f: + if 'package main' in f.read(): + main_set.add(os.path.dirname(file)) + + except Exception: + pass + +for item in main_set: + # print(item) + print('cd %s ; go build . ; cd -' % item.replace('\\', '/')) + +for item in main_set: + # print(item) + print('rm %s' % os.path.join(item, item.split('/')[-1])) diff --git a/offical/api/v3/a.py b/offical/api/v3/a.py new file mode 100644 index 00000000000..c5297d7c23f --- /dev/null +++ b/offical/api/v3/a.py @@ -0,0 +1,36 @@ +res = '' + +map_ = { + 'XXX_Merge(src proto.Message)', + 'XXX_Unmarshal(b []byte) error', + 'XXX_Marshal(b []byte,', + 'XXX_Merge(', + 'XXX_Size() int', + 'XXX_DiscardUnknown()', + # 'MarshalTo(dAtA []byte) (int, error)', + # 'MarshalToSizedBuffer(dAtA []byte) (int, er', +} +file = './etcdserverpb/etcdserver.pb.go' +with open(file, 'r', encoding='utf8') as f: + flag = False + for line in f.readlines(): + if ') Marshal() (' in line: + print(line.strip()+'return json.Marshal(m)}') + if ') Size() (' in line: + print(line.strip() + 'marshal,_:= json.Marshal(m) return len(marshal) }') + if ') Unmarshal(' in line: + print(line.strip() + 'return json.Unmarshal(dAtA,m) }') + # if not flag: + # for item in map_: + # if item in line: + # flag = True + # if flag: + # if line == '}\n': + # flag = False + # continue + # if line.startswith('var xxx_'): + # continue + # if not flag: + # res += line +# with open(file, 'w', encoding='utf8') as f: +# f.write(res) diff --git a/offical/api/v3/authpb/auth.pb.go b/offical/api/v3/authpb/auth.pb.go new file mode 100644 index 00000000000..d743e5bc60a --- /dev/null +++ b/offical/api/v3/authpb/auth.pb.go @@ -0,0 +1,210 @@ +package authpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var PermissionTypeName = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} + +var PermissionTypeValue = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(PermissionTypeName, int32(x)) +} + +func (Permission_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{2, 0} +} + +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{0} +} + +type User struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{1} +} + +// Permission 是赋予角色的权限. +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{2} +} + +// Role is a single entry in the bucket authRoles +type Role struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{3} +} + +func init() { + proto.RegisterEnum("authpb.Permission_Type", PermissionTypeName, PermissionTypeValue) + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") +} + +func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } + +var fileDescriptor_8bbd6f3875b0e874 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} + +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *User) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *UserAddOptions) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *User) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Permission) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Role) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *User) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *Permission) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *Role) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/authpb/auth.proto b/offical/api/v3/authpb/auth.proto similarity index 100% rename from api/authpb/auth.proto rename to offical/api/v3/authpb/auth.proto diff --git a/offical/api/v3/membershippb/membership.pb.go b/offical/api/v3/membershippb/membership.pb.go new file mode 100644 index 00000000000..86b25cdfada --- /dev/null +++ b/offical/api/v3/membershippb/membership.pb.go @@ -0,0 +1,204 @@ +package membershippb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// RaftAttributes represents the raft related attributes of an etcd member. +type RaftAttributes struct { + // peerURLs is the list of peers in the raft cluster. + PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RaftAttributes) Reset() { *m = RaftAttributes{} } +func (m *RaftAttributes) String() string { return proto.CompactTextString(m) } +func (*RaftAttributes) ProtoMessage() {} +func (*RaftAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{0} +} + +// Attributes represents all the non-raft related attributes of an etcd member. +type Attributes struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ClientUrls []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attributes) Reset() { *m = Attributes{} } +func (m *Attributes) String() string { return proto.CompactTextString(m) } +func (*Attributes) ProtoMessage() {} +func (*Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{1} +} + +type Member struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + RaftAttributes *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"` + MemberAttributes *Attributes `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{2} +} + +type ClusterVersionSetRequest struct { + Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterVersionSetRequest) Reset() { *m = ClusterVersionSetRequest{} } +func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterVersionSetRequest) ProtoMessage() {} +func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{3} +} + +type ClusterMemberAttrSetRequest struct { + Member_ID uint64 `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"` + MemberAttributes *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterMemberAttrSetRequest) Reset() { *m = ClusterMemberAttrSetRequest{} } +func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterMemberAttrSetRequest) ProtoMessage() {} +func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{4} +} + +type DowngradeInfoSetRequest struct { + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Ver string `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DowngradeInfoSetRequest) Reset() { *m = DowngradeInfoSetRequest{} } +func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) } +func (*DowngradeInfoSetRequest) ProtoMessage() {} +func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_949fe0d019050ef5, []int{5} +} + +func init() { + proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes") + proto.RegisterType((*Attributes)(nil), "membershippb.Attributes") + proto.RegisterType((*Member)(nil), "membershippb.Member") + proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest") + proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest") + proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest") +} + +func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) } + +var fileDescriptor_949fe0d019050ef5 = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xf2, 0x40, + 0x14, 0x85, 0x99, 0x42, 0xf8, 0xdb, 0xcb, 0x1f, 0xc4, 0x09, 0x89, 0x8d, 0x68, 0x25, 0x5d, 0xb1, + 0x30, 0x98, 0xe8, 0x13, 0xa0, 0xb0, 0x20, 0x81, 0xcd, 0x18, 0xdd, 0x92, 0x56, 0x2e, 0xd8, 0xa4, + 0x74, 0xea, 0xcc, 0x54, 0xd7, 0xbe, 0x85, 0x4f, 0xe0, 0xb3, 0xb0, 0xf4, 0x11, 0x14, 0x5f, 0xc4, + 0x74, 0x5a, 0x4a, 0x49, 0xdc, 0xb8, 0xbb, 0x3d, 0xbd, 0xf7, 0x9c, 0xf3, 0x35, 0x85, 0xd6, 0x0a, + 0x57, 0x3e, 0x0a, 0xf9, 0x18, 0xc4, 0xfd, 0x58, 0x70, 0xc5, 0xe9, 0xff, 0x9d, 0x12, 0xfb, 0xc7, + 0xed, 0x25, 0x5f, 0x72, 0xfd, 0xe2, 0x22, 0x9d, 0xb2, 0x1d, 0x77, 0x02, 0x4d, 0xe6, 0x2d, 0xd4, + 0x40, 0x29, 0x11, 0xf8, 0x89, 0x42, 0x49, 0x3b, 0x60, 0xc5, 0x88, 0x62, 0x96, 0x88, 0x50, 0xda, + 0xa4, 0x5b, 0xed, 0x59, 0xcc, 0x4c, 0x85, 0x3b, 0x11, 0x4a, 0x7a, 0x0a, 0x10, 0xc8, 0x59, 0x88, + 0x9e, 0x88, 0x50, 0xd8, 0x46, 0x97, 0xf4, 0x4c, 0x66, 0x05, 0x72, 0x92, 0x09, 0xee, 0x00, 0xa0, + 0xe4, 0x44, 0xa1, 0x16, 0x79, 0x2b, 0xb4, 0x49, 0x97, 0xf4, 0x2c, 0xa6, 0x67, 0x7a, 0x06, 0x8d, + 0x87, 0x30, 0xc0, 0x48, 0x65, 0xfe, 0x86, 0xf6, 0x87, 0x4c, 0x4a, 0x13, 0xdc, 0x77, 0x02, 0xf5, + 0xa9, 0xee, 0x4d, 0x9b, 0x60, 0x8c, 0x87, 0xfa, 0xba, 0xc6, 0x8c, 0xf1, 0x90, 0x8e, 0xe0, 0x40, + 0x78, 0x0b, 0x35, 0xf3, 0x8a, 0x08, 0xdd, 0xa0, 0x71, 0x79, 0xd2, 0x2f, 0x93, 0xf6, 0xf7, 0x81, + 0x58, 0x53, 0xec, 0x03, 0x8e, 0xe0, 0x30, 0x5b, 0x2f, 0x1b, 0x55, 0xb5, 0x91, 0xbd, 0x6f, 0x54, + 0x32, 0xc9, 0xbf, 0xee, 0x4e, 0x71, 0xcf, 0xc1, 0xbe, 0x09, 0x13, 0xa9, 0x50, 0xdc, 0xa3, 0x90, + 0x01, 0x8f, 0x6e, 0x51, 0x31, 0x7c, 0x4a, 0x50, 0x2a, 0xda, 0x82, 0xea, 0x33, 0x8a, 0x1c, 0x3c, + 0x1d, 0xdd, 0x57, 0x02, 0x9d, 0x7c, 0x7d, 0x5a, 0x38, 0x95, 0x2e, 0x3a, 0x60, 0xe5, 0xa5, 0x0a, + 0x64, 0x33, 0x13, 0x34, 0xf8, 0x2f, 0x8d, 0x8d, 0x3f, 0x37, 0x1e, 0xc1, 0xd1, 0x90, 0xbf, 0x44, + 0x4b, 0xe1, 0xcd, 0x71, 0x1c, 0x2d, 0x78, 0x29, 0xde, 0x86, 0x7f, 0x18, 0x79, 0x7e, 0x88, 0x73, + 0x1d, 0x6e, 0xb2, 0xed, 0xe3, 0x16, 0xc5, 0x28, 0x50, 0xae, 0xdb, 0xeb, 0x2f, 0xa7, 0xb2, 0xde, + 0x38, 0xe4, 0x63, 0xe3, 0x90, 0xcf, 0x8d, 0x43, 0xde, 0xbe, 0x9d, 0x8a, 0x5f, 0xd7, 0xff, 0xd3, + 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x7d, 0x0b, 0x87, 0x02, 0x00, 0x00, +} + +var ( + ErrInvalidLengthMembership = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMembership = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group") +) + +func (m *RaftAttributes) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Attributes) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Member) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *RaftAttributes) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Attributes) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Member) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ClusterVersionSetRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ClusterMemberAttrSetRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *DowngradeInfoSetRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *RaftAttributes) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *Attributes) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *Member) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } diff --git a/api/membershippb/membership.proto b/offical/api/v3/membershippb/membership.proto similarity index 75% rename from api/membershippb/membership.proto rename to offical/api/v3/membershippb/membership.proto index cb7254f1cf6..e63e9ecc994 100644 --- a/api/membershippb/membership.proto +++ b/offical/api/v3/membershippb/membership.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package membershippb; import "gogoproto/gogo.proto"; -import "etcd/api/versionpb/version.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; @@ -11,8 +10,6 @@ option (gogoproto.goproto_getters_all) = false; // RaftAttributes represents the raft related attributes of an etcd member. message RaftAttributes { - option (versionpb.etcd_version_msg) = "3.5"; - // peerURLs is the list of peers in the raft cluster. repeated string peer_urls = 1; // isLearner indicates if the member is raft learner. @@ -21,36 +18,26 @@ message RaftAttributes { // Attributes represents all the non-raft related attributes of an etcd member. message Attributes { - option (versionpb.etcd_version_msg) = "3.5"; - string name = 1; repeated string client_urls = 2; } message Member { - option (versionpb.etcd_version_msg) = "3.5"; - uint64 ID = 1; RaftAttributes raft_attributes = 2; Attributes member_attributes = 3; } message ClusterVersionSetRequest { - option (versionpb.etcd_version_msg) = "3.5"; - string ver = 1; } message ClusterMemberAttrSetRequest { - option (versionpb.etcd_version_msg) = "3.5"; - uint64 member_ID = 1; Attributes member_attributes = 2; } message DowngradeInfoSetRequest { - option (versionpb.etcd_version_msg) = "3.5"; - bool enabled = 1; string ver = 2; } \ No newline at end of file diff --git a/offical/api/v3/mvccpb/kv.pb.go b/offical/api/v3/mvccpb/kv.pb.go new file mode 100644 index 00000000000..aa23ad6e54b --- /dev/null +++ b/offical/api/v3/mvccpb/kv.pb.go @@ -0,0 +1,153 @@ +package mvccpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} + +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} + +func (Event_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{1, 0} +} + +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // Version是key的版本.删除键会将该键的版本重置为0,对键的任何修改都会增加它的版本. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + Value string `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{0} +} + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_2216fe83c9c12408, []int{1} +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") +} + +func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } + +var fileDescriptor_2216fe83c9c12408 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} + +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *KeyValue) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Event) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *KeyValue) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *Event) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") +) diff --git a/api/mvccpb/kv.proto b/offical/api/v3/mvccpb/kv.proto similarity index 100% rename from api/mvccpb/kv.proto rename to offical/api/v3/mvccpb/kv.proto diff --git a/api/v3rpc/rpctypes/doc.go b/offical/api/v3/v3rpc/rpctypes/doc.go similarity index 100% rename from api/v3rpc/rpctypes/doc.go rename to offical/api/v3/v3rpc/rpctypes/doc.go diff --git a/offical/api/v3/v3rpc/rpctypes/error.go b/offical/api/v3/v3rpc/rpctypes/error.go new file mode 100644 index 00000000000..7b56b1a29d1 --- /dev/null +++ b/offical/api/v3/v3rpc/rpctypes/error.go @@ -0,0 +1,220 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// server-side error +var ( + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: 所需的修订版 已被压缩").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: 所需的修订版是一个未来版本").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: 请求的租约不存在").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() + + ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch 取消了").Err() + + ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() + ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() + ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() + ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() + ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() + + ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: 请求体太大").Err() + ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: 请求次数太多").Err() + + ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root用户不存在").Err() + ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root用户没有root角色").Err() + ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: 用户已存在").Err() + ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: 用户名为空").Err() + ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: 用户没找到").Err() + ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: 角色已存在").Err() + ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: 角色没找到").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() + ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() + ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err() + ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() + ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() + ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() + ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() + ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() + ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err() + + ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: 没有leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: 不是leader").Err() + ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader改变了").Err() + ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: 没有容量了").Err() + ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: 服务已停止").Err() + ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: 请求超时").Err() + ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: 请求超时,可能是之前的leader导致的").Err() + ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: 请求超时,可能是链接丢失").Err() + ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: 不健康的集群").Err() + ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: 集群损坏").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: learner不支持rpc请求").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: leader转移失败").Err() + + ErrGRPCClusterVersionUnavailable = status.New(codes.Unavailable, "etcdserver: cluster version not found during downgrade").Err() + ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err() + ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err() + ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err() + ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err() + + ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: 取消请求").Err() + ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: 上下文超时").Err() + + errStringToError = map[string]error{ + ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, + + ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, + + ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, + ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision, + + ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, + ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, + + ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable, + ErrorDesc(ErrGRPCWrongDowngradeVersionFormat): ErrGRPCWrongDowngradeVersionFormat, + ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion, + ErrorDesc(ErrGRPCDowngradeInProcess): ErrGRPCDowngradeInProcess, + ErrorDesc(ErrGRPCNoInflightDowngrade): ErrGRPCNoInflightDowngrade, + } +) + +// client-side error +var ( + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision) + + ErrNoLeader = Error(ErrGRPCNoLeader) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + ev, ok := status.FromError(verr) + var desc string + if ok { + desc = ev.Message() + } else { + desc = verr.Error() + } + return EtcdError{code: ev.Code(), desc: desc} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/api/v3rpc/rpctypes/md.go b/offical/api/v3/v3rpc/rpctypes/md.go similarity index 100% rename from api/v3rpc/rpctypes/md.go rename to offical/api/v3/v3rpc/rpctypes/md.go diff --git a/api/v3rpc/rpctypes/metadatafields.go b/offical/api/v3/v3rpc/rpctypes/metadatafields.go similarity index 100% rename from api/v3rpc/rpctypes/metadatafields.go rename to offical/api/v3/v3rpc/rpctypes/metadatafields.go diff --git a/offical/api/v3/version/version.go b/offical/api/v3/version/version.go new file mode 100644 index 00000000000..38c529864b9 --- /dev/null +++ b/offical/api/v3/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.5.2" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/offical/etcdserverpb/alarm.json b/offical/etcdserverpb/alarm.json new file mode 100644 index 00000000000..c168b4b673b --- /dev/null +++ b/offical/etcdserverpb/alarm.json @@ -0,0 +1,11 @@ +{ + "AlarmType_NONE": { + }, + "AlarmType_NOSPACE": { + "1": { + "MemberID": "1", + "AlarmType": "AlarmType_NOSPACE" + } + }, + "AlarmType_CORRUPT": {} +} diff --git a/offical/etcdserverpb/etcdserver.pb.go b/offical/etcdserverpb/etcdserver.pb.go new file mode 100644 index 00000000000..965e67401c4 --- /dev/null +++ b/offical/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,124 @@ +package etcdserverpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_09ffbeb3bebbce7e, []int{0} +} + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_09ffbeb3bebbce7e, []int{1} +} + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } + +var fileDescriptor_09ffbeb3bebbce7e = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group") +) + +func (m *Request) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Metadata) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Request) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Metadata) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *Request) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *Metadata) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } diff --git a/api/etcdserverpb/etcdserver.proto b/offical/etcdserverpb/etcdserver.proto similarity index 100% rename from api/etcdserverpb/etcdserver.proto rename to offical/etcdserverpb/etcdserver.proto diff --git a/api/etcdserverpb/gw/rpc.pb.gw.go b/offical/etcdserverpb/gw/rpc.pb.gw.go similarity index 93% rename from api/etcdserverpb/gw/rpc.pb.gw.go rename to offical/etcdserverpb/gw/rpc.pb.gw.go index 042a4bee2bd..71edba83da3 100644 --- a/api/etcdserverpb/gw/rpc.pb.gw.go +++ b/offical/etcdserverpb/gw/rpc.pb.gw.go @@ -1,6 +1,3 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: api/etcdserverpb/rpc.proto - /* Package etcdserverpb is a reverse proxy. @@ -10,10 +7,11 @@ package gw import ( "context" - "go.etcd.io/etcd/api/v3/etcdserverpb" "io" "net/http" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/golang/protobuf/descriptor" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -21,18 +19,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) // Suppress "imported and not used" errors var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join + +var ( + _ io.Reader + _ status.Status + _ = runtime.String + _ = utilities.NewDoubleArray + _ = descriptor.ForMessage +) func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq etcdserverpb.RangeRequest @@ -48,7 +47,6 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -65,7 +63,6 @@ func local_request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := server.Range(ctx, &protoReq) return msg, metadata, err - } func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -82,7 +79,6 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client e msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -99,7 +95,6 @@ func local_request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, se msg, err := server.Put(ctx, &protoReq) return msg, metadata, err - } func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -116,7 +111,6 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -133,7 +127,6 @@ func local_request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marsh msg, err := server.DeleteRange(ctx, &protoReq) return msg, metadata, err - } func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -150,7 +143,6 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client e msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -167,7 +159,6 @@ func local_request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, se msg, err := server.Txn(ctx, &protoReq) return msg, metadata, err - } func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -184,7 +175,6 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -201,7 +191,6 @@ func local_request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler msg, err := server.Compact(ctx, &protoReq) return msg, metadata, err - } func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { @@ -270,7 +259,6 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -287,7 +275,6 @@ func local_request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Mar msg, err := server.LeaseGrant(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -304,7 +291,6 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -321,7 +307,6 @@ func local_request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Ma msg, err := server.LeaseRevoke(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -338,7 +323,6 @@ func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshale msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -355,7 +339,6 @@ func local_request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Ma msg, err := server.LeaseRevoke(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { @@ -424,7 +407,6 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -441,7 +423,6 @@ func local_request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtim msg, err := server.LeaseTimeToLive(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -458,7 +439,6 @@ func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Mars msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -475,7 +455,6 @@ func local_request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtim msg, err := server.LeaseTimeToLive(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -492,7 +471,6 @@ func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -509,7 +487,6 @@ func local_request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Ma msg, err := server.LeaseLeases(ctx, &protoReq) return msg, metadata, err - } func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -526,7 +503,6 @@ func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshale msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -543,7 +519,6 @@ func local_request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Ma msg, err := server.LeaseLeases(ctx, &protoReq) return msg, metadata, err - } func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -560,7 +535,6 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -577,7 +551,6 @@ func local_request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Ma msg, err := server.MemberAdd(ctx, &protoReq) return msg, metadata, err - } func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -594,7 +567,6 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -611,7 +583,6 @@ func local_request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime msg, err := server.MemberRemove(ctx, &protoReq) return msg, metadata, err - } func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -628,7 +599,6 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -645,7 +615,6 @@ func local_request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime msg, err := server.MemberUpdate(ctx, &protoReq) return msg, metadata, err - } func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -662,7 +631,6 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -679,7 +647,6 @@ func local_request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.M msg, err := server.MemberList(ctx, &protoReq) return msg, metadata, err - } func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -696,7 +663,6 @@ func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Mars msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -713,7 +679,6 @@ func local_request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtim msg, err := server.MemberPromote(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -730,7 +695,6 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -747,7 +711,6 @@ func local_request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Ma msg, err := server.Alarm(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -764,7 +727,6 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -781,7 +743,6 @@ func local_request_Maintenance_Status_0(ctx context.Context, marshaler runtime.M msg, err := server.Status(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -798,7 +759,6 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -815,7 +775,6 @@ func local_request_Maintenance_Defragment_0(ctx context.Context, marshaler runti msg, err := server.Defragment(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -832,7 +791,6 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -849,7 +807,6 @@ func local_request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Mar msg, err := server.Hash(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -866,7 +823,6 @@ func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshal msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -883,7 +839,6 @@ func local_request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.M msg, err := server.HashKV(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { @@ -908,7 +863,6 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh } metadata.HeaderMD = header return stream, metadata, nil - } func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -925,7 +879,6 @@ func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Mar msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -942,7 +895,6 @@ func local_request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runti msg, err := server.MoveLeader(ctx, &protoReq) return msg, metadata, err - } func request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -959,7 +911,6 @@ func request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Mars msg, err := client.Downgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -976,7 +927,6 @@ func local_request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtim msg, err := server.Downgrade(ctx, &protoReq) return msg, metadata, err - } func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -993,7 +943,6 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1010,7 +959,6 @@ func local_request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Mars msg, err := server.AuthEnable(ctx, &protoReq) return msg, metadata, err - } func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1027,7 +975,6 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1044,7 +991,6 @@ func local_request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Mar msg, err := server.AuthDisable(ctx, &protoReq) return msg, metadata, err - } func request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1061,7 +1007,6 @@ func request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.AuthStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1078,7 +1023,6 @@ func local_request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Mars msg, err := server.AuthStatus(ctx, &protoReq) return msg, metadata, err - } func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1095,7 +1039,6 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1112,7 +1055,6 @@ func local_request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Ma msg, err := server.Authenticate(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1129,7 +1071,6 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1146,7 +1087,6 @@ func local_request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshal msg, err := server.UserAdd(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1163,7 +1103,6 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1180,7 +1119,6 @@ func local_request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshal msg, err := server.UserGet(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1197,7 +1135,6 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1214,7 +1151,6 @@ func local_request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marsha msg, err := server.UserList(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1231,7 +1167,6 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1248,7 +1183,6 @@ func local_request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Mars msg, err := server.UserDelete(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1265,7 +1199,6 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1282,7 +1215,6 @@ func local_request_Auth_UserChangePassword_0(ctx context.Context, marshaler runt msg, err := server.UserChangePassword(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1299,7 +1231,6 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1316,7 +1247,6 @@ func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.M msg, err := server.UserGrantRole(ctx, &protoReq) return msg, metadata, err - } func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1333,7 +1263,6 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1350,7 +1279,6 @@ func local_request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime. msg, err := server.UserRevokeRole(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1367,7 +1295,6 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1384,7 +1311,6 @@ func local_request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshal msg, err := server.RoleAdd(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1401,7 +1327,6 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1418,7 +1343,6 @@ func local_request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshal msg, err := server.RoleGet(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1435,7 +1359,6 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1452,7 +1375,6 @@ func local_request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marsha msg, err := server.RoleList(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1469,7 +1391,6 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1486,7 +1407,6 @@ func local_request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Mars msg, err := server.RoleDelete(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1503,7 +1423,6 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1520,7 +1439,6 @@ func local_request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler run msg, err := server.RoleGrantPermission(ctx, &protoReq) return msg, metadata, err - } func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1537,7 +1455,6 @@ func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime. msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -1554,20 +1471,15 @@ func local_request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler ru msg, err := server.RoleRevokePermission(ctx, &protoReq) return msg, metadata, err - } // etcdserverpb.RegisterKVHandlerServer registers the http handlers for service KV to "mux". // UnaryRPC :call etcdserverpb.KVServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterKVHandlerFromEndpoint instead. func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.KVServer) error { - mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1575,7 +1487,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server return } resp, md, err := local_request_KV_Range_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1583,14 +1494,11 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server } forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1598,7 +1506,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server return } resp, md, err := local_request_KV_Put_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1606,14 +1513,11 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server } forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1621,7 +1525,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server return } resp, md, err := local_request_KV_DeleteRange_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1629,14 +1532,11 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server } forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1644,7 +1544,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server return } resp, md, err := local_request_KV_Txn_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1652,14 +1551,11 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server } forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1667,7 +1563,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server return } resp, md, err := local_request_KV_Compact_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1675,7 +1570,6 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server } forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -1684,9 +1578,7 @@ func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server // etcdserverpb.RegisterWatchHandlerServer registers the http handlers for service Watch to "mux". // UnaryRPC :call etcdserverpb.WatchServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterWatchHandlerFromEndpoint instead. func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.WatchServer) error { - mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -1700,14 +1592,10 @@ func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv // etcdserverpb.RegisterLeaseHandlerServer registers the http handlers for service Lease to "mux". // UnaryRPC :call etcdserverpb.LeaseServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLeaseHandlerFromEndpoint instead. func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.LeaseServer) error { - mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1715,7 +1603,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseGrant_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1723,14 +1610,11 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1738,7 +1622,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1746,14 +1629,11 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1761,7 +1641,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1769,7 +1648,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -1782,8 +1660,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1791,7 +1667,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1799,14 +1674,11 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1814,7 +1686,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1822,14 +1693,11 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1837,7 +1705,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseLeases_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1845,14 +1712,11 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1860,7 +1724,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv return } resp, md, err := local_request_Lease_LeaseLeases_1(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1868,7 +1731,6 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv } forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -1877,14 +1739,10 @@ func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv // etcdserverpb.RegisterClusterHandlerServer registers the http handlers for service Cluster to "mux". // UnaryRPC :call etcdserverpb.ClusterServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterClusterHandlerFromEndpoint instead. func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.ClusterServer) error { - mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1892,7 +1750,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se return } resp, md, err := local_request_Cluster_MemberAdd_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1900,14 +1757,11 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se } forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1915,7 +1769,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se return } resp, md, err := local_request_Cluster_MemberRemove_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1923,14 +1776,11 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se } forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1938,7 +1788,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se return } resp, md, err := local_request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1946,14 +1795,11 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se } forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1961,7 +1807,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se return } resp, md, err := local_request_Cluster_MemberList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1969,14 +1814,11 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se } forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -1984,7 +1826,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se return } resp, md, err := local_request_Cluster_MemberPromote_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -1992,7 +1833,6 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se } forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -2001,14 +1841,10 @@ func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, se // etcdserverpb.RegisterMaintenanceHandlerServer registers the http handlers for service Maintenance to "mux". // UnaryRPC :call etcdserverpb.MaintenanceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMaintenanceHandlerFromEndpoint instead. func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.MaintenanceServer) error { - mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2016,7 +1852,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_Alarm_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2024,14 +1859,11 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2039,7 +1871,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_Status_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2047,14 +1878,11 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2062,7 +1890,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_Defragment_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2070,14 +1897,11 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2085,7 +1909,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_Hash_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2093,14 +1916,11 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2108,7 +1928,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_HashKV_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2116,7 +1935,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2129,8 +1947,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2138,7 +1954,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2146,14 +1961,11 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2161,7 +1973,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux return } resp, md, err := local_request_Maintenance_Downgrade_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2169,7 +1980,6 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -2178,14 +1988,10 @@ func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux // etcdserverpb.RegisterAuthHandlerServer registers the http handlers for service Auth to "mux". // UnaryRPC :call etcdserverpb.AuthServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead. func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.AuthServer) error { - mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2193,7 +1999,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_AuthEnable_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2201,14 +2006,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2216,7 +2018,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_AuthDisable_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2224,14 +2025,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2239,7 +2037,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_AuthStatus_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2247,14 +2044,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2262,7 +2056,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_Authenticate_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2270,14 +2063,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2285,7 +2075,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserAdd_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2293,14 +2082,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2308,7 +2094,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserGet_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2316,14 +2101,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2331,7 +2113,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2339,14 +2120,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2354,7 +2132,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserDelete_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2362,14 +2139,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2377,7 +2151,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserChangePassword_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2385,14 +2158,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2400,7 +2170,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserGrantRole_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2408,14 +2177,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2423,7 +2189,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2431,14 +2196,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2446,7 +2208,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleAdd_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2454,14 +2215,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2469,7 +2227,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleGet_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2477,14 +2234,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2492,7 +2246,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleList_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2500,14 +2253,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2515,7 +2265,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleDelete_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2523,14 +2272,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2538,7 +2284,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2546,14 +2291,11 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) if err != nil { @@ -2561,7 +2303,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve return } resp, md, err := local_request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) @@ -2569,7 +2310,6 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve } forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -2612,7 +2352,6 @@ func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.Cl // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "KVClient" to call the correct interceptors. func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { - mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2630,7 +2369,6 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2650,7 +2388,6 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2670,7 +2407,6 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2690,7 +2426,6 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2710,7 +2445,6 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -2777,7 +2511,6 @@ func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "WatchClient" to call the correct interceptors. func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { - mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2795,19 +2528,14 @@ func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - }) return nil } -var ( - pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "", runtime.AssumeColonVerbOpt(true))) -) +var pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "", runtime.AssumeColonVerbOpt(true))) -var ( - forward_Watch_Watch_0 = runtime.ForwardResponseStream -) +var forward_Watch_Watch_0 = runtime.ForwardResponseStream // RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. @@ -2846,7 +2574,6 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "LeaseClient" to call the correct interceptors. func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { - mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2864,7 +2591,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2884,7 +2610,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2904,7 +2629,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2924,7 +2648,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2944,7 +2667,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2964,7 +2686,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -2984,7 +2705,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3004,7 +2724,6 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -3083,7 +2802,6 @@ func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *gr // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "ClusterClient" to call the correct interceptors. func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { - mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3101,7 +2819,6 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3121,7 +2838,6 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3141,7 +2857,6 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3161,7 +2876,6 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3181,7 +2895,6 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -3248,7 +2961,6 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "MaintenanceClient" to call the correct interceptors. func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { - mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3266,7 +2978,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3286,7 +2997,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3306,7 +3016,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3326,7 +3035,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3346,7 +3054,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3366,7 +3073,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3386,7 +3092,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3406,7 +3111,6 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_Maintenance_Downgrade_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -3485,7 +3189,6 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc. // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "AuthClient" to call the correct interceptors. func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { - mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3503,7 +3206,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3523,7 +3225,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3543,7 +3244,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_AuthStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3563,7 +3263,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3583,7 +3282,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3603,7 +3301,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3623,7 +3320,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3643,7 +3339,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3663,7 +3358,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3683,7 +3377,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3703,7 +3396,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3723,7 +3415,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3743,7 +3434,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3763,7 +3453,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3783,7 +3472,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3803,7 +3491,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { @@ -3823,7 +3510,6 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil diff --git a/offical/etcdserverpb/lease.go b/offical/etcdserverpb/lease.go new file mode 100644 index 00000000000..b0e9969335a --- /dev/null +++ b/offical/etcdserverpb/lease.go @@ -0,0 +1,630 @@ +package etcdserverpb + +import ( + "context" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{25} +} + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{26} +} + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{27} +} + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{28} +} + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseCheckpoint struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // 租约ID + RemainingTtl int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` // 剩余的存活时间 +} + +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{29} +} + +func (m *LeaseCheckpoint) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.RemainingTtl + } + return 0 +} + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{30} +} + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{31} +} + +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{32} +} + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // 租约ID + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{33} +} + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{34} +} + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{35} +} + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type LeaseLeasesRequest struct{} + +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{36} +} + +type LeaseStatus struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{37} +} + +func (m *LeaseStatus) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseLeasesResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{38} +} + +func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { + if m != nil { + return m.Leases + } + return nil +} + +type LeaseServer interface { + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) // 创建租约 + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) // 移除租约 + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error // 租约 续租 + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) // 检索租约信息 + LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) // 显示所有存在的租约 +} + +// UnimplementedLeaseServer can be embedded to have forward compatible implementations. +type UnimplementedLeaseServer struct{} + +func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented") +} + +func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented") +} + +func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error { + return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented") +} + +func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented") +} + +func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented") +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseLeases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + { + MethodName: "LeaseLeases", + Handler: _Lease_LeaseLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +type LeaseClient interface { + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) + LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { + out := new(LeaseLeasesResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} diff --git a/offical/etcdserverpb/over.go b/offical/etcdserverpb/over.go new file mode 100644 index 00000000000..64eb9900a71 --- /dev/null +++ b/offical/etcdserverpb/over.go @@ -0,0 +1,161 @@ +package etcdserverpb + +import ( + "encoding/json" + "errors" + "strings" + + "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb" +) + +type xx struct { + Key string + Value string + Lease int64 + PrevKv bool + IgnoreValue bool + IgnoreLease bool +} +type ASD struct { + Put *xx + Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` + AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` + ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"` + ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"` + DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + _ = m.Unmarshal + a := ASD{ + Put: nil, + Header: m.Header, + ID: m.ID, + V2: m.V2, + Txn: m.Txn, + DeleteRange: m.DeleteRange, + AuthRoleRevokePermission: m.AuthRoleRevokePermission, + AuthRoleGet: m.AuthRoleGet, + AuthRoleDelete: m.AuthRoleDelete, + AuthUserList: m.AuthUserList, + AuthUserChangePassword: m.AuthUserChangePassword, + AuthStatus: m.AuthStatus, + LeaseCheckpoint: m.LeaseCheckpoint, + Alarm: m.Alarm, + AuthDisable: m.AuthDisable, + LeaseRevoke: m.LeaseRevoke, + AuthEnable: m.AuthEnable, + AuthUserDelete: m.AuthUserDelete, + Authenticate: m.Authenticate, + AuthUserGet: m.AuthUserGet, + AuthRoleGrantPermission: m.AuthRoleGrantPermission, + AuthUserRevokeRole: m.AuthUserRevokeRole, + LeaseGrant: m.LeaseGrant, + Compaction: m.Compaction, + AuthRoleList: m.AuthRoleList, + AuthRoleAdd: m.AuthRoleAdd, + AuthUserGrantRole: m.AuthUserGrantRole, + AuthUserAdd: m.AuthUserAdd, + ClusterVersionSet: m.ClusterVersionSet, + ClusterMemberAttrSet: m.ClusterMemberAttrSet, + DowngradeInfoSet: m.DowngradeInfoSet, + } + + if m.Put != nil { + a.Put = &xx{ + Key: m.Put.Key, + Value: m.Put.Value, + Lease: m.Put.Lease, + PrevKv: m.Put.PrevKv, + IgnoreValue: m.Put.IgnoreValue, + IgnoreLease: m.Put.IgnoreLease, + } + } + + return json.Marshal(a) +} + +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + // a := `{"header":{"ID":7587861231285799685},"put":{"key":"YQ==","value":"Yg=="}}` + // b := `{"ID":7587861231285799684,"Method":"PUT","Path":"/0/version","Val":"3.5.0","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false}` + // fmt.Println(json.Unmarshal([]byte(a), &etcdserverpb.InternalRaftRequest{})) // 不能反序列化成功 + // fmt.Println(json.Unmarshal([]byte(b), &etcdserverpb.InternalRaftRequest{})) + + if strings.Contains(string(dAtA), "Method") { + return errors.New("特殊需求,不是使其反序列化成功") + } + a := ASD{} + err := json.Unmarshal(dAtA, &a) + if a.Put != nil { + m.Put = &PutRequest{ + Key: a.Put.Key, + Value: a.Put.Value, + Lease: a.Put.Lease, + PrevKv: a.Put.PrevKv, + IgnoreValue: a.Put.IgnoreValue, + IgnoreLease: a.Put.IgnoreLease, + } + } + m.Header = a.Header + m.ID = a.ID + m.Txn = a.Txn + m.V2 = a.V2 + m.DeleteRange = a.DeleteRange + m.AuthRoleRevokePermission = a.AuthRoleRevokePermission + m.AuthRoleGet = a.AuthRoleGet + m.AuthRoleDelete = a.AuthRoleDelete + m.AuthUserList = a.AuthUserList + m.AuthUserChangePassword = a.AuthUserChangePassword + m.AuthStatus = a.AuthStatus + m.LeaseCheckpoint = a.LeaseCheckpoint + m.Alarm = a.Alarm + m.AuthDisable = a.AuthDisable + m.LeaseRevoke = a.LeaseRevoke + m.AuthEnable = a.AuthEnable + m.AuthUserDelete = a.AuthUserDelete + m.AuthRoleGrantPermission = a.AuthRoleGrantPermission + m.Authenticate = a.Authenticate + m.AuthUserGet = a.AuthUserGet + m.AuthUserRevokeRole = a.AuthUserRevokeRole + m.LeaseGrant = a.LeaseGrant + m.Compaction = a.Compaction + m.AuthRoleList = a.AuthRoleList + m.AuthRoleAdd = a.AuthRoleAdd + m.AuthUserGrantRole = a.AuthUserGrantRole + m.AuthUserAdd = a.AuthUserAdd + m.ClusterVersionSet = a.ClusterVersionSet + m.ClusterMemberAttrSet = a.ClusterMemberAttrSet + m.DowngradeInfoSet = a.DowngradeInfoSet + return err +} + +// 不能更改 diff --git a/offical/etcdserverpb/raft_internal.pb.go b/offical/etcdserverpb/raft_internal.pb.go new file mode 100644 index 00000000000..b799c658a88 --- /dev/null +++ b/offical/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,231 @@ +package etcdserverpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + membershippb "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{0} +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` + AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` + ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"` + ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"` + DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{1} +} + +type EmptyResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{2} +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b4c9a9be0cfca103, []int{3} +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } + +var fileDescriptor_b4c9a9be0cfca103 = []byte{ + // 1003 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xd9, 0x72, 0x1b, 0x45, + 0x14, 0x86, 0x23, 0xc5, 0x71, 0xac, 0x96, 0xed, 0x38, 0x6d, 0x87, 0x34, 0x72, 0x95, 0x70, 0x1c, + 0x12, 0xcc, 0x66, 0x53, 0xca, 0x03, 0x80, 0x90, 0x5c, 0x8e, 0xab, 0x42, 0x70, 0x4d, 0xcc, 0x52, + 0xc5, 0xc5, 0xd0, 0x9a, 0x39, 0x96, 0x06, 0xcf, 0x46, 0x77, 0x4b, 0x31, 0xef, 0x11, 0x28, 0x1e, + 0x83, 0xed, 0x21, 0x72, 0xc1, 0x62, 0xe0, 0x05, 0xc0, 0xdc, 0x70, 0x0f, 0xdc, 0x53, 0xbd, 0xcc, + 0x26, 0xb5, 0x7c, 0xa7, 0xf9, 0xcf, 0x7f, 0xbe, 0x73, 0xba, 0xe7, 0xf4, 0xa8, 0xd1, 0x3a, 0xa3, + 0x27, 0xc2, 0x0d, 0x62, 0x01, 0x2c, 0xa6, 0xe1, 0x6e, 0xca, 0x12, 0x91, 0xe0, 0x65, 0x10, 0x9e, + 0xcf, 0x81, 0x4d, 0x80, 0xa5, 0x83, 0xd6, 0xc6, 0x30, 0x19, 0x26, 0x2a, 0xb0, 0x27, 0x7f, 0x69, + 0x4f, 0x6b, 0xad, 0xf0, 0x18, 0xa5, 0xc1, 0x52, 0xcf, 0xfc, 0xbc, 0x2f, 0x83, 0x7b, 0x34, 0x0d, + 0xf6, 0x22, 0x88, 0x06, 0xc0, 0xf8, 0x28, 0x48, 0xd3, 0x41, 0xe9, 0x41, 0xfb, 0xb6, 0x3f, 0x45, + 0x2b, 0x0e, 0x7c, 0x3e, 0x06, 0x2e, 0x1e, 0x02, 0xf5, 0x81, 0xe1, 0x55, 0x54, 0x3f, 0xec, 0x93, + 0xda, 0x56, 0x6d, 0x67, 0xc1, 0xa9, 0x1f, 0xf6, 0x71, 0x0b, 0x2d, 0x8d, 0xb9, 0x6c, 0x2d, 0x02, + 0x52, 0xdf, 0xaa, 0xed, 0x34, 0x9c, 0xfc, 0x19, 0xdf, 0x45, 0x2b, 0x74, 0x2c, 0x46, 0x2e, 0x83, + 0x49, 0xc0, 0x83, 0x24, 0x26, 0x57, 0x55, 0xda, 0xb2, 0x14, 0x1d, 0xa3, 0x6d, 0x3f, 0xc3, 0x68, + 0xfd, 0xd0, 0xac, 0xce, 0xa1, 0x27, 0xc2, 0x94, 0xc3, 0x0f, 0xd0, 0xe2, 0x48, 0x95, 0x24, 0xfe, + 0x56, 0x6d, 0xa7, 0xd9, 0xd9, 0xdc, 0x2d, 0xaf, 0x79, 0xb7, 0xd2, 0x95, 0x63, 0xac, 0x33, 0xdd, + 0xdd, 0x43, 0xf5, 0x49, 0x47, 0xf5, 0xd5, 0xec, 0xdc, 0xb2, 0x02, 0x9c, 0xfa, 0xa4, 0x83, 0xdf, + 0x42, 0xd7, 0x18, 0x8d, 0x87, 0xa0, 0x1a, 0x6c, 0x76, 0x5a, 0x53, 0x4e, 0x19, 0xca, 0xec, 0xda, + 0x88, 0x5f, 0x43, 0x57, 0xd3, 0xb1, 0x20, 0x0b, 0xca, 0x4f, 0xaa, 0xfe, 0xa3, 0x71, 0xb6, 0x08, + 0x47, 0x9a, 0x70, 0x0f, 0x2d, 0xfb, 0x10, 0x82, 0x00, 0x57, 0x17, 0xb9, 0xa6, 0x92, 0xb6, 0xaa, + 0x49, 0x7d, 0xe5, 0xa8, 0x94, 0x6a, 0xfa, 0x85, 0x26, 0x0b, 0x8a, 0xb3, 0x98, 0x2c, 0xda, 0x0a, + 0x1e, 0x9f, 0xc5, 0x79, 0x41, 0x71, 0x16, 0xe3, 0xb7, 0x11, 0xf2, 0x92, 0x28, 0xa5, 0x9e, 0x90, + 0x9b, 0x7e, 0x5d, 0xa5, 0xbc, 0x54, 0x4d, 0xe9, 0xe5, 0xf1, 0x2c, 0xb3, 0x94, 0x82, 0xdf, 0x41, + 0xcd, 0x10, 0x28, 0x07, 0x77, 0xc8, 0x68, 0x2c, 0xc8, 0x92, 0x8d, 0xf0, 0x48, 0x1a, 0x0e, 0x64, + 0x3c, 0x27, 0x84, 0xb9, 0x24, 0xd7, 0xac, 0x09, 0x0c, 0x26, 0xc9, 0x29, 0x90, 0x86, 0x6d, 0xcd, + 0x0a, 0xe1, 0x28, 0x43, 0xbe, 0xe6, 0xb0, 0xd0, 0xe4, 0x6b, 0xa1, 0x21, 0x65, 0x11, 0x41, 0xb6, + 0xd7, 0xd2, 0x95, 0xa1, 0xfc, 0xb5, 0x28, 0x23, 0x7e, 0x1f, 0xad, 0xe9, 0xb2, 0xde, 0x08, 0xbc, + 0xd3, 0x34, 0x09, 0x62, 0x41, 0x9a, 0x2a, 0xf9, 0x65, 0x4b, 0xe9, 0x5e, 0x6e, 0xca, 0x30, 0x37, + 0xc2, 0xaa, 0x8e, 0xbb, 0xa8, 0xa9, 0x46, 0x18, 0x62, 0x3a, 0x08, 0x81, 0xfc, 0x6d, 0xdd, 0xcc, + 0xee, 0x58, 0x8c, 0xf6, 0x95, 0x21, 0xdf, 0x0a, 0x9a, 0x4b, 0xb8, 0x8f, 0xd4, 0xc0, 0xbb, 0x7e, + 0xc0, 0x15, 0xe3, 0x9f, 0xeb, 0xb6, 0xbd, 0x90, 0x8c, 0xbe, 0x76, 0xe4, 0x7b, 0x41, 0x0b, 0x2d, + 0x6f, 0x84, 0x0b, 0x2a, 0xc6, 0x9c, 0xfc, 0x37, 0xb7, 0x91, 0x27, 0xca, 0x50, 0x69, 0x44, 0x4b, + 0xf8, 0xb1, 0x6e, 0x04, 0x62, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x57, 0x33, 0x5e, 0xad, 0x32, 0xb2, + 0xb3, 0xd8, 0x2d, 0x59, 0x33, 0x5a, 0x25, 0x1f, 0xef, 0x9b, 0xe3, 0x2d, 0xcf, 0xbb, 0x4b, 0x7d, + 0x9f, 0xfc, 0xb8, 0x34, 0x6f, 0x65, 0x1f, 0x70, 0x60, 0x5d, 0xdf, 0xaf, 0xac, 0xcc, 0x68, 0xf8, + 0x31, 0x5a, 0x2b, 0x30, 0x7a, 0xe4, 0xc9, 0x4f, 0x9a, 0x74, 0xd7, 0x4e, 0x32, 0x67, 0xc5, 0xc0, + 0x56, 0x69, 0x45, 0xae, 0xb6, 0x35, 0x04, 0x41, 0x7e, 0xbe, 0xb4, 0xad, 0x03, 0x10, 0x33, 0x6d, + 0x1d, 0x80, 0xc0, 0x43, 0xf4, 0x62, 0x81, 0xf1, 0x46, 0xf2, 0x10, 0xba, 0x29, 0xe5, 0xfc, 0x69, + 0xc2, 0x7c, 0xf2, 0x8b, 0x46, 0xbe, 0x6e, 0x47, 0xf6, 0x94, 0xfb, 0xc8, 0x98, 0x33, 0xfa, 0x0b, + 0xd4, 0x1a, 0xc6, 0x1f, 0xa3, 0x8d, 0x52, 0xbf, 0xf2, 0xf4, 0xb8, 0x2c, 0x09, 0x81, 0x9c, 0xeb, + 0x1a, 0xf7, 0xe7, 0xb4, 0xad, 0x4e, 0x5e, 0x52, 0x4c, 0xcb, 0x4d, 0x3a, 0x1d, 0xc1, 0x9f, 0xa0, + 0x5b, 0x05, 0x59, 0x1f, 0x44, 0x8d, 0xfe, 0x55, 0xa3, 0x5f, 0xb1, 0xa3, 0xcd, 0x89, 0x2c, 0xb1, + 0x31, 0x9d, 0x09, 0xe1, 0x87, 0x68, 0xb5, 0x80, 0x87, 0x01, 0x17, 0xe4, 0x37, 0x4d, 0xbd, 0x63, + 0xa7, 0x3e, 0x0a, 0xb8, 0xa8, 0xcc, 0x51, 0x26, 0xe6, 0x24, 0xd9, 0x9a, 0x26, 0xfd, 0x3e, 0x97, + 0x24, 0x4b, 0xcf, 0x90, 0x32, 0x31, 0x7f, 0xf5, 0x8a, 0x24, 0x27, 0xf2, 0x9b, 0xc6, 0xbc, 0x57, + 0x2f, 0x73, 0xa6, 0x27, 0xd2, 0x68, 0xf9, 0x44, 0x2a, 0x8c, 0x99, 0xc8, 0x6f, 0x1b, 0xf3, 0x26, + 0x52, 0x66, 0x59, 0x26, 0xb2, 0x90, 0xab, 0x6d, 0xc9, 0x89, 0xfc, 0xee, 0xd2, 0xb6, 0xa6, 0x27, + 0xd2, 0x68, 0xf8, 0x33, 0xd4, 0x2a, 0x61, 0xd4, 0xa0, 0xa4, 0xc0, 0xa2, 0x80, 0xab, 0xff, 0xd6, + 0xef, 0x35, 0xf3, 0x8d, 0x39, 0x4c, 0x69, 0x3f, 0xca, 0xdd, 0x19, 0xff, 0x36, 0xb5, 0xc7, 0x71, + 0x84, 0x36, 0x8b, 0x5a, 0x66, 0x74, 0x4a, 0xc5, 0x7e, 0xd0, 0xc5, 0xde, 0xb4, 0x17, 0xd3, 0x53, + 0x32, 0x5b, 0x8d, 0xd0, 0x39, 0x06, 0xfc, 0x11, 0x5a, 0xf7, 0xc2, 0x31, 0x17, 0xc0, 0xdc, 0x09, + 0x30, 0x29, 0xb9, 0x1c, 0x04, 0x79, 0x86, 0xcc, 0x11, 0x28, 0x5f, 0x52, 0x76, 0x7b, 0xda, 0xf9, + 0xa1, 0x36, 0x3e, 0x29, 0x76, 0xeb, 0xa6, 0x37, 0x1d, 0xc1, 0x14, 0xdd, 0xce, 0xc0, 0x9a, 0xe1, + 0x52, 0x21, 0x98, 0x82, 0x7f, 0x89, 0xcc, 0xe7, 0xcf, 0x06, 0x7f, 0x4f, 0x69, 0x5d, 0x21, 0x58, + 0x89, 0xbf, 0xe1, 0x59, 0x82, 0xf8, 0x18, 0x61, 0x3f, 0x79, 0x1a, 0x0f, 0x19, 0xf5, 0xc1, 0x0d, + 0xe2, 0x93, 0x44, 0xd1, 0xbf, 0xd2, 0xf4, 0x7b, 0x55, 0x7a, 0x3f, 0x33, 0x1e, 0xc6, 0x27, 0x49, + 0x89, 0xbc, 0xe6, 0x4f, 0x05, 0xb6, 0x6f, 0xa0, 0x95, 0xfd, 0x28, 0x15, 0x5f, 0x38, 0xc0, 0xd3, + 0x24, 0xe6, 0xb0, 0x9d, 0xa2, 0xcd, 0x4b, 0x3e, 0xcd, 0x18, 0xa3, 0x05, 0x75, 0x07, 0xab, 0xa9, + 0x3b, 0x98, 0xfa, 0x2d, 0xef, 0x66, 0xf9, 0x17, 0xcb, 0xdc, 0xcd, 0xb2, 0x67, 0x7c, 0x07, 0x2d, + 0xf3, 0x20, 0x4a, 0x43, 0x70, 0x45, 0x72, 0x0a, 0xfa, 0x6a, 0xd6, 0x70, 0x9a, 0x5a, 0x3b, 0x96, + 0xd2, 0xbb, 0x1b, 0xcf, 0xff, 0x6c, 0x5f, 0x79, 0x7e, 0xd1, 0xae, 0x9d, 0x5f, 0xb4, 0x6b, 0x7f, + 0x5c, 0xb4, 0x6b, 0x5f, 0xff, 0xd5, 0xbe, 0x32, 0x58, 0x54, 0x17, 0xc3, 0x07, 0xff, 0x07, 0x00, + 0x00, 0xff, 0xff, 0x94, 0x6f, 0x64, 0x0a, 0x98, 0x0a, 0x00, 0x00, +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group") +) + +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *RequestHeader) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *InternalRaftRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *EmptyResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } diff --git a/api/etcdserverpb/raft_internal.proto b/offical/etcdserverpb/raft_internal.proto similarity index 81% rename from api/etcdserverpb/raft_internal.proto rename to offical/etcdserverpb/raft_internal.proto index f1036b9f619..68926e59f6c 100644 --- a/api/etcdserverpb/raft_internal.proto +++ b/offical/etcdserverpb/raft_internal.proto @@ -4,7 +4,6 @@ package etcdserverpb; import "gogoproto/gogo.proto"; import "etcdserver.proto"; import "rpc.proto"; -import "etcd/api/versionpb/version.proto"; import "etcd/api/membershippb/membership.proto"; option (gogoproto.marshaler_all) = true; @@ -13,20 +12,16 @@ option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; message RequestHeader { - option (versionpb.etcd_version_msg) = "3.0"; - uint64 ID = 1; // username is a username that is associated with an auth token of gRPC connection string username = 2; // auth_revision is a revision number of auth.authStore. It is not related to mvcc - uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"]; + uint64 auth_revision = 3; } // An InternalRaftRequest is the union of all requests which can be // sent via raft. message InternalRaftRequest { - option (versionpb.etcd_version_msg) = "3.0"; - RequestHeader header = 100; uint64 ID = 1; @@ -43,11 +38,11 @@ message InternalRaftRequest { AlarmRequest alarm = 10; - LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"]; + LeaseCheckpointRequest lease_checkpoint = 11; AuthEnableRequest auth_enable = 1000; AuthDisableRequest auth_disable = 1011; - AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"]; + AuthStatusRequest auth_status = 1013; InternalAuthenticateRequest authenticate = 1012; @@ -66,9 +61,9 @@ message InternalRaftRequest { AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; - membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"]; - membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"]; - membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"]; + membershippb.ClusterVersionSetRequest cluster_version_set = 1300; + membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301; + membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302; } message EmptyResponse { @@ -78,7 +73,6 @@ message EmptyResponse { // InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. // For avoiding misusage the field, we have an internal version of AuthenticateRequest. message InternalAuthenticateRequest { - option (versionpb.etcd_version_msg) = "3.0"; string name = 1; string password = 2; diff --git a/api/etcdserverpb/raft_internal_stringer.go b/offical/etcdserverpb/raft_internal_stringer.go similarity index 89% rename from api/etcdserverpb/raft_internal_stringer.go rename to offical/etcdserverpb/raft_internal_stringer.go index 31e121ee0a6..d71c8f8dc12 100644 --- a/api/etcdserverpb/raft_internal_stringer.go +++ b/offical/etcdserverpb/raft_internal_stringer.go @@ -85,11 +85,9 @@ func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer { func (as *txnRequestStringer) String() string { var compare []string for _, c := range as.Request.Compare { - switch cv := c.TargetUnion.(type) { - case *Compare_Value: - compare = append(compare, newLoggableValueCompare(c, cv).String()) - default: - // nothing to redact + if c.Compare_Value != nil { + compare = append(compare, newLoggableValueCompare(c, c.Compare_Value).String()) + } else { compare = append(compare, c.String()) } } @@ -119,14 +117,13 @@ func newLoggableRequestOp(op *RequestOp) *requestOpStringer { } func (as *requestOpStringer) String() string { - switch op := as.Op.Request.(type) { - case *RequestOp_RequestPut: - return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String()) - case *RequestOp_RequestTxn: - return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String()) - default: - // nothing to redact + if as.Op.RequestOp_RequestPut != nil { + return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(as.Op.RequestOp_RequestPut.RequestPut).String()) } + if as.Op.RequestOp_RequestTxn != nil { + return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(as.Op.RequestOp_RequestTxn.RequestTxn).String()) + } + return as.Op.String() } @@ -136,9 +133,9 @@ func (as *requestOpStringer) String() string { type loggableValueCompare struct { Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"` Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3"` + Key string `protobuf:"bytes,3,opt,name=key,proto3"` ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"` - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"` + RangeEnd string `protobuf:"bytes,64,opt,name=range_end,proto3"` } func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare { @@ -159,7 +156,7 @@ func (*loggableValueCompare) ProtoMessage() {} // size field. // To preserve proto encoding of the key bytes, a faked out proto type is used here. type loggablePutRequest struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3"` + Key string `protobuf:"bytes,1,opt,name=key,proto3"` ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"` Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"` PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"` diff --git a/offical/etcdserverpb/rpc.pb.go b/offical/etcdserverpb/rpc.pb.go new file mode 100644 index 00000000000..382d813a044 --- /dev/null +++ b/offical/etcdserverpb/rpc.pb.go @@ -0,0 +1,5173 @@ +package etcdserverpb + +import ( + context "context" + "encoding/json" + fmt "fmt" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" + authpb "github.com/ls-2018/etcd_cn/offical/api/v3/authpb" + mvccpb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 + AlarmType_CORRUPT AlarmType = 2 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", + 2: "CORRUPT", +} + +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, + "CORRUPT": 2, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} + +func (AlarmType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 // 不排序 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 // 升序 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 // 降序 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} + +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} + +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1, 0} +} + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} + +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} + +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1, 1} +} + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} + +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} + +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9, 0} +} + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 + Compare_LEASE Compare_CompareTarget = 4 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", + 4: "LEASE", +} + +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, + "LEASE": 4, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} + +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9, 1} +} + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} + +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} + +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{21, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 // 激活 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 // 取消 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} + +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} + +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{54, 0} +} + +type DowngradeRequest_DowngradeAction int32 + +const ( + DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0 + DowngradeRequest_ENABLE DowngradeRequest_DowngradeAction = 1 + DowngradeRequest_CANCEL DowngradeRequest_DowngradeAction = 2 +) + +var DowngradeRequest_DowngradeAction_name = map[int32]string{ + 0: "VALIDATE", + 1: "ENABLE", + 2: "CANCEL", +} + +var DowngradeRequest_DowngradeAction_value = map[string]int32{ + "VALIDATE": 0, + "ENABLE": 1, + "CANCEL": 2, +} + +func (x DowngradeRequest_DowngradeAction) String() string { + return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x)) +} + +func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{57, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + Revision int64 // 当前 watchResponse 实例创建时对应的 revision 值 + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{0} +} + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd string `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // 返回的数据 排序方式 + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // 表示设置range请求通过可串行化( serializable)的方式从接受请求的节点读取本地数据.默认情况下, range 请求是可线性化的,它反映了当前集群的一致性.为了获得更好的性能和可用 + // 性,可以考虑使用可串行化的读,以有一定的概率读到过期数据为代价,不需要经过一致性协议与集群中其他节点的协同,而是直接从本地节点读数据. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` // 表示是否只返回key 而不返回value + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` // ,表示是否只返回range 请求返回的key 的数量. + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} +} + +func (m *RangeRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *RangeRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"` // 表示符合range 请求的key-value 对列表.如果Count_Only 设置为true ,则kvs 就为空. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` // 是否还有更多的数据,没有返回 + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` // 符合条件的总数 +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} +} + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` // 是否设置了前缀 + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} +} + +func (m *PutRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *PutRequest) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{4} +} + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // [key,RangeEnd] + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd string `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // 如果设置了prev_kv,etcd在删除前会获取之前的键值对.先前的键值对将在删除响应中返回. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{5} +} + +func (m *DeleteRangeRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *DeleteRangeRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{6} +} + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + RequestOp_RequestRange *RequestOp_RequestRange + RequestOp_RequestPut *RequestOp_RequestPut + RequestOp_RequestDeleteRange *RequestOp_RequestDeleteRange + RequestOp_RequestTxn *RequestOp_RequestTxn + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} + +type isRequestOp_Request interface { + isRequestOp_Request() + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"` +} +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if m.RequestOp_RequestRange != nil { + return m.RequestOp_RequestRange.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if m.RequestOp_RequestPut != nil { + return m.RequestOp_RequestPut.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if m.RequestOp_RequestDeleteRange != nil { + return m.RequestOp_RequestDeleteRange.RequestDeleteRange + } + return nil +} + +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if m.RequestOp_RequestTxn != nil { + return m.RequestOp_RequestTxn.RequestTxn + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RequestOp) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), + } +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn + ResponseOp_ResponseRange *ResponseOp_ResponseRange + ResponseOp_ResponsePut *ResponseOp_ResponsePut + ResponseOp_ResponseDeleteRange *ResponseOp_ResponseDeleteRange + ResponseOp_ResponseTxn *ResponseOp_ResponseTxn + // Response isResponseOp_Response `protobuf_oneof:"response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{8} +} + +type isResponseOp_Response interface { + isResponseOp_Response() + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"` +} +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if m.ResponseOp_ResponseRange != nil { + return m.ResponseOp_ResponseRange.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if m.ResponseOp_ResponsePut != nil { + return m.ResponseOp_ResponsePut.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if m.ResponseOp_ResponseDeleteRange != nil { + return m.ResponseOp_ResponseDeleteRange.ResponseDeleteRange + } + return nil +} + +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if m.ResponseOp_ResponseTxn != nil { + return m.ResponseOp_ResponseTxn.ResponseTxn + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), + } +} + +type Compare struct { + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + // *Compare_Lease + // TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + + Compare_Value *Compare_Value + Compare_Version *Compare_Version + Compare_CreateRevision *Compare_CreateRevision + Compare_ModRevision *Compare_ModRevision + Compare_Lease *Compare_Lease + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd string `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{9} +} + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"` +} +type Compare_Value struct { + Value string `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"` +} +type Compare_Lease struct { + Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} +func (*Compare_Lease) isCompare_TargetUnion() {} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Compare) GetVersion() int64 { + if m.Compare_Version != nil { + return m.Compare_Version.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if m.Compare_CreateRevision != nil { + return m.Compare_CreateRevision.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if m.Compare_ModRevision != nil { + return m.Compare_ModRevision.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() string { + if m.Compare_Value != nil { + return m.Compare_Value.Value + } + return "" +} + +func (m *Compare) GetLease() int64 { + if m.Compare_Lease != nil { + return m.Compare_Lease.Lease + } + return 0 +} + +func (m *Compare) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Compare) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + (*Compare_Lease)(nil), + } +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{10} +} + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{11} +} + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +type CompactionRequest struct { + // Revision是用于压缩操作的键-值存储修订. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{12} +} + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{13} +} + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct{} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{14} +} + +type HashKVRequest struct { + // revision是哈希操作的键值存储修订版. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{15} +} + +func (m *HashKVRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +type HashKVResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + // compact_revision is the compacted revision of key-value store when hash begins. + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` +} + +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{16} +} + +func (m *HashKVResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashKVResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +func (m *HashKVResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // hash is the hash value computed from the responding member's KV's backend. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{17} +} + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct{} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{18} +} + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` // 剩余数据量 + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{19} +} + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + WatchRequest_CreateRequest *WatchRequest_CreateRequest + WatchRequest_CancelRequest *WatchRequest_CancelRequest + WatchRequest_ProgressRequest *WatchRequest_ProgressRequest + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + err := json.Unmarshal(dAtA, m) + return err +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{20} +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"` +} +type WatchRequest_ProgressRequest struct { + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if m.WatchRequest_CreateRequest != nil { + return m.WatchRequest_CreateRequest.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if m.WatchRequest_CancelRequest != nil { + return m.WatchRequest_CancelRequest.CancelRequest + } + return nil +} + +func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest { + if m.WatchRequest_ProgressRequest != nil { + return m.WatchRequest_ProgressRequest.ProgressRequest + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + (*WatchRequest_ProgressRequest)(nil), + } +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd string `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify被设置为etcd服务器将定期发送WatchResponse如果没有最近的事件,它是有用的希望恢复断开的观察者从最近的已知修订开始. + // etcd服务器可以根据当前的负载决定发送通知的频率. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // 如果提供了watch_id且非零,则将它分配给这个监视程序.因为在etcd中创建监视器不是同步操作,所以当在同一流中创建多个监视器时, + // 可以使用它来确保顺序是正确的.在流上创建ID已在使用的监视程序将导致返回错误. + WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // 拆分大的变更 成多个watch响应. + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{21} +} + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return []byte(m.Key) + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return []byte(m.RangeEnd) + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *WatchCreateRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchCreateRequest) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{22} +} + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +// WatchProgressRequest 获取watch的状态 +type WatchProgressRequest struct{} + +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{23} +} + +type WatchResponse struct { + CompactRevision int64 // 压缩操作对应的 revison + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + // framgment is true if large watch response was split over multiple responses. + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{24} +} + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetFragment() bool { + if m != nil { + return m.Fragment + } + return false +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{39} +} + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddRequest struct { + // peerURLs是新增成员用来与集群通信的URL列表. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{40} +} + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{41} +} + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{42} +} + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{43} +} + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{44} +} + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{45} +} + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { + Linearizable bool `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"` +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{46} +} + +func (m *MemberListRequest) GetLinearizable() bool { + if m != nil { + return m.Linearizable + } + return false +} + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{47} +} + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{48} +} + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{49} +} + +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct{} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{50} +} + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{51} +} + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{52} +} + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{53} +} + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{54} +} + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{55} +} + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{56} +} + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type DowngradeRequest struct { + // action is the kind of downgrade request to issue. The action may + // VALIDATE the target version, DOWNGRADE the cluster version, + // or CANCEL the current downgrading job. + Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"` + // version is the target version to downgrade. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *DowngradeRequest) Reset() { *m = DowngradeRequest{} } +func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) } +func (*DowngradeRequest) ProtoMessage() {} +func (*DowngradeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{57} +} + +func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction { + if m != nil { + return m.Action + } + return DowngradeRequest_VALIDATE +} + +func (m *DowngradeRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type DowngradeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // version is the current cluster version. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *DowngradeResponse) Reset() { *m = DowngradeResponse{} } +func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) } +func (*DowngradeResponse) ProtoMessage() {} +func (*DowngradeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{58} +} + +func (m *DowngradeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DowngradeResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type StatusRequest struct{} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{59} +} + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft committed index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{60} +} + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex + } + return 0 +} + +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} + +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse + } + return 0 +} + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + +type AuthEnableRequest struct{} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{61} +} + +type AuthDisableRequest struct{} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{62} +} + +type AuthStatusRequest struct{} + +func (m *AuthStatusRequest) Reset() { *m = AuthStatusRequest{} } +func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) } +func (*AuthStatusRequest) ProtoMessage() {} +func (*AuthStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{63} +} + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{64} +} + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` + HashedPassword string `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{65} +} + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *AuthUserAddRequest) GetHashedPassword() string { + if m != nil { + return m.HashedPassword + } + return "" +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{66} +} + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{67} +} + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. Note that this field will be removed in the API layer. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. + HashedPassword string `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{68} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetHashedPassword() string { + if m != nil { + return m.HashedPassword + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{69} +} + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{70} +} + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{71} +} + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{72} +} + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct{} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{73} +} + +type AuthRoleListRequest struct{} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{74} +} + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{75} +} + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Perm是赋予角色的权限. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{76} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{77} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { + if m != nil { + return []byte(m.Key) + } + return nil +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { + if m != nil { + return []byte(m.RangeEnd) + } + return nil +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{78} +} + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{79} +} + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthStatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + AuthRevision uint64 `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"` +} + +func (m *AuthStatusResponse) Reset() { *m = AuthStatusResponse{} } +func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) } +func (*AuthStatusResponse) ProtoMessage() {} +func (*AuthStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{80} +} + +func (m *AuthStatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthStatusResponse) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *AuthStatusResponse) GetAuthRevision() uint64 { + if m != nil { + return m.AuthRevision + } + return 0 +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{81} +} + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{82} +} + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{83} +} + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{84} +} + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{85} +} + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{86} +} + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{87} +} + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{88} +} + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{89} +} + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{90} +} + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{91} +} + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{92} +} + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{93} +} + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{94} +} + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) + proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value) + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") + proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") + proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") + proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest") + proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 4107 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0xc9, + 0x75, 0xe6, 0x00, 0xc4, 0xed, 0xe0, 0x42, 0xb0, 0x79, 0x11, 0x84, 0x95, 0x28, 0x6e, 0x6b, 0xa5, + 0xe5, 0x4a, 0xbb, 0xc4, 0x9a, 0xb6, 0xb3, 0x55, 0x4a, 0xe2, 0x18, 0x22, 0xb1, 0x12, 0x97, 0x14, + 0xc9, 0x1d, 0x42, 0xda, 0x4b, 0xb9, 0xc2, 0x1a, 0x02, 0x2d, 0x72, 0x42, 0x60, 0x06, 0x9e, 0x19, + 0x40, 0xe4, 0xe6, 0xe2, 0x94, 0xcb, 0x71, 0x25, 0xaf, 0x76, 0x55, 0x2a, 0x79, 0x48, 0x5e, 0x52, + 0x29, 0x97, 0x1f, 0xfc, 0x9c, 0xbf, 0x90, 0xa7, 0x5c, 0x2a, 0x7f, 0x20, 0xb5, 0xf1, 0x4b, 0xf2, + 0x23, 0x52, 0xae, 0xbe, 0xcd, 0xf4, 0xdc, 0x40, 0xd9, 0xd8, 0xdd, 0x17, 0x11, 0x7d, 0xfa, 0xf4, + 0xf9, 0x4e, 0x9f, 0xee, 0x3e, 0xe7, 0xf4, 0xe9, 0x11, 0x94, 0x9c, 0x51, 0x6f, 0x73, 0xe4, 0xd8, + 0x9e, 0x8d, 0x2a, 0xc4, 0xeb, 0xf5, 0x5d, 0xe2, 0x4c, 0x88, 0x33, 0x3a, 0x6d, 0x2e, 0x9f, 0xd9, + 0x67, 0x36, 0xeb, 0x68, 0xd1, 0x5f, 0x9c, 0xa7, 0xd9, 0xa0, 0x3c, 0x2d, 0x63, 0x64, 0xb6, 0x86, + 0x93, 0x5e, 0x6f, 0x74, 0xda, 0xba, 0x98, 0x88, 0x9e, 0xa6, 0xdf, 0x63, 0x8c, 0xbd, 0xf3, 0xd1, + 0x29, 0xfb, 0x23, 0xfa, 0x6e, 0x9d, 0xd9, 0xf6, 0xd9, 0x80, 0xf0, 0x5e, 0xcb, 0xb2, 0x3d, 0xc3, + 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0xaf, 0x34, 0xa8, 0xe9, 0xc4, 0x1d, 0xd9, 0x96, 0x4b, 0x9e, + 0x12, 0xa3, 0x4f, 0x1c, 0x74, 0x1b, 0xa0, 0x37, 0x18, 0xbb, 0x1e, 0x71, 0x4e, 0xcc, 0x7e, 0x43, + 0x5b, 0xd7, 0x36, 0xe6, 0xf5, 0x92, 0xa0, 0xec, 0xf6, 0xd1, 0x1b, 0x50, 0x1a, 0x92, 0xe1, 0x29, + 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf6, 0x51, 0x13, 0x8a, 0x0e, 0x99, 0x98, 0xae, 0x69, + 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, 0x2f, 0xbd, 0x13, 0x8f, + 0x38, 0xc3, 0xc6, 0x3c, 0x1f, 0x48, 0x09, 0x5d, 0xe2, 0x0c, 0xf1, 0x4f, 0x72, 0x50, 0xd1, 0x0d, + 0xeb, 0x8c, 0xe8, 0xe4, 0x87, 0x63, 0xe2, 0x7a, 0xa8, 0x0e, 0xd9, 0x0b, 0x72, 0xc5, 0xe0, 0x2b, + 0x3a, 0xfd, 0xc9, 0xc7, 0x5b, 0x67, 0xe4, 0x84, 0x58, 0x1c, 0xb8, 0x42, 0xc7, 0x5b, 0x67, 0xa4, + 0x63, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x04, 0x2a, 0x6f, 0x84, 0xd4, 0x99, 0x8f, + 0xa8, 0xb3, 0x0d, 0xe0, 0xda, 0x8e, 0x77, 0x62, 0x3b, 0x7d, 0xe2, 0x34, 0x72, 0xeb, 0xda, 0x46, + 0x6d, 0xeb, 0xad, 0x4d, 0x75, 0x19, 0x36, 0x55, 0x85, 0x36, 0x8f, 0x6d, 0xc7, 0x3b, 0xa4, 0xbc, + 0x7a, 0xc9, 0x95, 0x3f, 0xd1, 0x87, 0x50, 0x66, 0x42, 0x3c, 0xc3, 0x39, 0x23, 0x5e, 0x23, 0xcf, + 0xa4, 0xdc, 0xbb, 0x46, 0x4a, 0x97, 0x31, 0xeb, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x2e, 0x71, + 0x4c, 0x63, 0x60, 0x7e, 0x61, 0x9c, 0x0e, 0x48, 0xa3, 0xb0, 0xae, 0x6d, 0x14, 0xf5, 0x10, 0x8d, + 0xce, 0xff, 0x82, 0x5c, 0xb9, 0x27, 0xb6, 0x35, 0xb8, 0x6a, 0x14, 0x19, 0x43, 0x91, 0x12, 0x0e, + 0xad, 0xc1, 0x15, 0x5b, 0x34, 0x7b, 0x6c, 0x79, 0xbc, 0xb7, 0xc4, 0x7a, 0x4b, 0x8c, 0xc2, 0xba, + 0x37, 0xa0, 0x3e, 0x34, 0xad, 0x93, 0xa1, 0xdd, 0x3f, 0xf1, 0x0d, 0x02, 0xcc, 0x20, 0xb5, 0xa1, + 0x69, 0x3d, 0xb3, 0xfb, 0xba, 0x34, 0x0b, 0xe5, 0x34, 0x2e, 0xc3, 0x9c, 0x65, 0xc1, 0x69, 0x5c, + 0xaa, 0x9c, 0x9b, 0xb0, 0x44, 0x65, 0xf6, 0x1c, 0x62, 0x78, 0x24, 0x60, 0xae, 0x30, 0xe6, 0xc5, + 0xa1, 0x69, 0x6d, 0xb3, 0x9e, 0x10, 0xbf, 0x71, 0x19, 0xe3, 0xaf, 0x0a, 0x7e, 0xe3, 0x32, 0xcc, + 0x8f, 0x37, 0xa1, 0xe4, 0xdb, 0x1c, 0x15, 0x61, 0xfe, 0xe0, 0xf0, 0xa0, 0x53, 0x9f, 0x43, 0x00, + 0xf9, 0xf6, 0xf1, 0x76, 0xe7, 0x60, 0xa7, 0xae, 0xa1, 0x32, 0x14, 0x76, 0x3a, 0xbc, 0x91, 0xc1, + 0x8f, 0x01, 0x02, 0xeb, 0xa2, 0x02, 0x64, 0xf7, 0x3a, 0x9f, 0xd5, 0xe7, 0x28, 0xcf, 0x8b, 0x8e, + 0x7e, 0xbc, 0x7b, 0x78, 0x50, 0xd7, 0xe8, 0xe0, 0x6d, 0xbd, 0xd3, 0xee, 0x76, 0xea, 0x19, 0xca, + 0xf1, 0xec, 0x70, 0xa7, 0x9e, 0x45, 0x25, 0xc8, 0xbd, 0x68, 0xef, 0x3f, 0xef, 0xd4, 0xe7, 0xf1, + 0xcf, 0x35, 0xa8, 0x8a, 0xf5, 0xe2, 0x67, 0x02, 0x7d, 0x07, 0xf2, 0xe7, 0xec, 0x5c, 0xb0, 0xad, + 0x58, 0xde, 0xba, 0x15, 0x59, 0xdc, 0xd0, 0xd9, 0xd1, 0x05, 0x2f, 0xc2, 0x90, 0xbd, 0x98, 0xb8, + 0x8d, 0xcc, 0x7a, 0x76, 0xa3, 0xbc, 0x55, 0xdf, 0xe4, 0xe7, 0x75, 0x73, 0x8f, 0x5c, 0xbd, 0x30, + 0x06, 0x63, 0xa2, 0xd3, 0x4e, 0x84, 0x60, 0x7e, 0x68, 0x3b, 0x84, 0xed, 0xd8, 0xa2, 0xce, 0x7e, + 0xd3, 0x6d, 0xcc, 0x16, 0x4d, 0xec, 0x56, 0xde, 0xc0, 0xbf, 0xd4, 0x00, 0x8e, 0xc6, 0x5e, 0xfa, + 0xd1, 0x58, 0x86, 0xdc, 0x84, 0x0a, 0x16, 0xc7, 0x82, 0x37, 0xd8, 0x99, 0x20, 0x86, 0x4b, 0xfc, + 0x33, 0x41, 0x1b, 0xe8, 0x06, 0x14, 0x46, 0x0e, 0x99, 0x9c, 0x5c, 0x4c, 0x18, 0x48, 0x51, 0xcf, + 0xd3, 0xe6, 0xde, 0x04, 0xbd, 0x09, 0x15, 0xf3, 0xcc, 0xb2, 0x1d, 0x72, 0xc2, 0x65, 0xe5, 0x58, + 0x6f, 0x99, 0xd3, 0x98, 0xde, 0x0a, 0x0b, 0x17, 0x9c, 0x57, 0x59, 0xf6, 0x29, 0x09, 0x5b, 0x50, + 0x66, 0xaa, 0xce, 0x64, 0xbe, 0x77, 0x02, 0x1d, 0x33, 0x6c, 0x58, 0xdc, 0x84, 0x42, 0x6b, 0xfc, + 0x03, 0x40, 0x3b, 0x64, 0x40, 0x3c, 0x32, 0x8b, 0xf7, 0x50, 0x6c, 0x92, 0x55, 0x6d, 0x82, 0x7f, + 0xa6, 0xc1, 0x52, 0x48, 0xfc, 0x4c, 0xd3, 0x6a, 0x40, 0xa1, 0xcf, 0x84, 0x71, 0x0d, 0xb2, 0xba, + 0x6c, 0xa2, 0x87, 0x50, 0x14, 0x0a, 0xb8, 0x8d, 0x6c, 0xca, 0xa6, 0x29, 0x70, 0x9d, 0x5c, 0xfc, + 0xcb, 0x0c, 0x94, 0xc4, 0x44, 0x0f, 0x47, 0xa8, 0x0d, 0x55, 0x87, 0x37, 0x4e, 0xd8, 0x7c, 0x84, + 0x46, 0xcd, 0x74, 0x27, 0xf4, 0x74, 0x4e, 0xaf, 0x88, 0x21, 0x8c, 0x8c, 0x7e, 0x1f, 0xca, 0x52, + 0xc4, 0x68, 0xec, 0x09, 0x93, 0x37, 0xc2, 0x02, 0x82, 0xfd, 0xf7, 0x74, 0x4e, 0x07, 0xc1, 0x7e, + 0x34, 0xf6, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x96, 0x49, 0x59, 0x0f, 0x4b, + 0x89, 0x2f, 0xd5, 0xd3, 0x39, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0xaa, 0x92, 0x77, 0xc9, 0x9d, 0x77, + 0x4c, 0xa5, 0xee, 0xa5, 0x15, 0x57, 0xa9, 0x7b, 0x69, 0x3d, 0x2e, 0x41, 0x41, 0xb4, 0xf0, 0xbf, + 0x64, 0x00, 0xe4, 0x6a, 0x1c, 0x8e, 0xd0, 0x0e, 0xd4, 0x1c, 0xd1, 0x0a, 0x59, 0xeb, 0x8d, 0x44, + 0x6b, 0x89, 0x45, 0x9c, 0xd3, 0xab, 0x72, 0x10, 0x57, 0xee, 0x7b, 0x50, 0xf1, 0xa5, 0x04, 0x06, + 0xbb, 0x99, 0x60, 0x30, 0x5f, 0x42, 0x59, 0x0e, 0xa0, 0x26, 0xfb, 0x04, 0x56, 0xfc, 0xf1, 0x09, + 0x36, 0x7b, 0x73, 0x8a, 0xcd, 0x7c, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc0, 0x6c, + 0x37, 0x13, 0xcc, 0x16, 0x57, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0xbf, 0x59, 0x28, + 0x6c, 0xdb, 0xc3, 0x91, 0xe1, 0xd0, 0xd5, 0xc8, 0x3b, 0xc4, 0x1d, 0x0f, 0x3c, 0x66, 0xae, 0xda, + 0xd6, 0xdd, 0xb0, 0x44, 0xc1, 0x26, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43, 0xe8, 0x60, 0x11, 0x1e, + 0x33, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x6c, 0x70, 0x90, 0x9b, 0x50, 0x98, + 0x10, 0x27, 0x08, 0xe9, 0x4f, 0xe7, 0x74, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x34, 0xbc, 0xe4, 0x04, + 0x4f, 0xad, 0x17, 0x8e, 0x46, 0x77, 0xa1, 0x12, 0x8a, 0x71, 0x79, 0xc1, 0x57, 0x1e, 0x2a, 0x21, + 0x6e, 0x55, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0xe7, 0xa4, 0x67, 0x5d, 0x95, 0x9e, 0xb5, 0x28, + 0x46, 0x09, 0xdf, 0x1a, 0x72, 0x32, 0xdf, 0x0f, 0x3b, 0x19, 0xfc, 0x7d, 0xa8, 0x86, 0x0c, 0x44, + 0xe3, 0x4e, 0xe7, 0xe3, 0xe7, 0xed, 0x7d, 0x1e, 0xa4, 0x9e, 0xb0, 0xb8, 0xa4, 0xd7, 0x35, 0x1a, + 0xeb, 0xf6, 0x3b, 0xc7, 0xc7, 0xf5, 0x0c, 0xaa, 0x42, 0xe9, 0xe0, 0xb0, 0x7b, 0xc2, 0xb9, 0xb2, + 0xf8, 0x89, 0x2f, 0x41, 0x04, 0x39, 0x25, 0xb6, 0xcd, 0x29, 0xb1, 0x4d, 0x93, 0xb1, 0x2d, 0x13, + 0xc4, 0x36, 0x16, 0xe6, 0xf6, 0x3b, 0xed, 0xe3, 0x4e, 0x7d, 0xfe, 0x71, 0x0d, 0x2a, 0xdc, 0xbe, + 0x27, 0x63, 0x8b, 0x86, 0xda, 0x7f, 0xd2, 0x00, 0x82, 0xd3, 0x84, 0x5a, 0x50, 0xe8, 0x71, 0x9c, + 0x86, 0xc6, 0x9c, 0xd1, 0x4a, 0xe2, 0x92, 0xe9, 0x92, 0x0b, 0x7d, 0x0b, 0x0a, 0xee, 0xb8, 0xd7, + 0x23, 0xae, 0x0c, 0x79, 0x37, 0xa2, 0xfe, 0x50, 0x78, 0x2b, 0x5d, 0xf2, 0xd1, 0x21, 0x2f, 0x0d, + 0x73, 0x30, 0x66, 0x01, 0x70, 0xfa, 0x10, 0xc1, 0x87, 0xff, 0x5e, 0x83, 0xb2, 0xb2, 0x79, 0x7f, + 0x47, 0x27, 0x7c, 0x0b, 0x4a, 0x4c, 0x07, 0xd2, 0x17, 0x6e, 0xb8, 0xa8, 0x07, 0x04, 0xf4, 0x7b, + 0x50, 0x92, 0x27, 0x40, 0x7a, 0xe2, 0x46, 0xb2, 0xd8, 0xc3, 0x91, 0x1e, 0xb0, 0xe2, 0x3d, 0x58, + 0x64, 0x56, 0xe9, 0xd1, 0xe4, 0x5a, 0xda, 0x51, 0x4d, 0x3f, 0xb5, 0x48, 0xfa, 0xd9, 0x84, 0xe2, + 0xe8, 0xfc, 0xca, 0x35, 0x7b, 0xc6, 0x40, 0x68, 0xe1, 0xb7, 0xf1, 0x47, 0x80, 0x54, 0x61, 0xb3, + 0x4c, 0x17, 0x57, 0xa1, 0xfc, 0xd4, 0x70, 0xcf, 0x85, 0x4a, 0xf8, 0x21, 0x54, 0x69, 0x73, 0xef, + 0xc5, 0x6b, 0xe8, 0xc8, 0x2e, 0x07, 0x92, 0x7b, 0x26, 0x9b, 0x23, 0x98, 0x3f, 0x37, 0xdc, 0x73, + 0x36, 0xd1, 0xaa, 0xce, 0x7e, 0xa3, 0x77, 0xa0, 0xde, 0xe3, 0x93, 0x3c, 0x89, 0x5c, 0x19, 0x16, + 0x04, 0xdd, 0xcf, 0x04, 0x3f, 0x85, 0x0a, 0x9f, 0xc3, 0x57, 0xad, 0x04, 0x5e, 0x84, 0x85, 0x63, + 0xcb, 0x18, 0xb9, 0xe7, 0xb6, 0x8c, 0x6e, 0x74, 0xd2, 0xf5, 0x80, 0x36, 0x13, 0xe2, 0xdb, 0xb0, + 0xe0, 0x90, 0xa1, 0x61, 0x5a, 0xa6, 0x75, 0x76, 0x72, 0x7a, 0xe5, 0x11, 0x57, 0x5c, 0x98, 0x6a, + 0x3e, 0xf9, 0x31, 0xa5, 0x52, 0xd5, 0x4e, 0x07, 0xf6, 0xa9, 0x70, 0x73, 0xec, 0x37, 0xfe, 0x69, + 0x06, 0x2a, 0x9f, 0x18, 0x5e, 0x4f, 0x2e, 0x1d, 0xda, 0x85, 0x9a, 0xef, 0xdc, 0x18, 0x45, 0xe8, + 0x12, 0x09, 0xb1, 0x6c, 0x8c, 0x4c, 0xa5, 0x65, 0x74, 0xac, 0xf6, 0x54, 0x02, 0x13, 0x65, 0x58, + 0x3d, 0x32, 0xf0, 0x45, 0x65, 0xd2, 0x45, 0x31, 0x46, 0x55, 0x94, 0x4a, 0x40, 0x87, 0x50, 0x1f, + 0x39, 0xf6, 0x99, 0x43, 0x5c, 0xd7, 0x17, 0xc6, 0xc3, 0x18, 0x4e, 0x10, 0x76, 0x24, 0x58, 0x03, + 0x71, 0x0b, 0xa3, 0x30, 0xe9, 0xf1, 0x42, 0x90, 0xcf, 0x70, 0xe7, 0xf4, 0x9f, 0x19, 0x40, 0xf1, + 0x49, 0xfd, 0xb6, 0x29, 0xde, 0x3d, 0xa8, 0xb9, 0x9e, 0xe1, 0xc4, 0x36, 0x5b, 0x95, 0x51, 0x7d, + 0x8f, 0xff, 0x36, 0xf8, 0x0a, 0x9d, 0x58, 0xb6, 0x67, 0xbe, 0xbc, 0x12, 0x59, 0x72, 0x4d, 0x92, + 0x0f, 0x18, 0x15, 0x75, 0xa0, 0xf0, 0xd2, 0x1c, 0x78, 0xc4, 0x71, 0x1b, 0xb9, 0xf5, 0xec, 0x46, + 0x6d, 0xeb, 0xe1, 0x75, 0xcb, 0xb0, 0xf9, 0x21, 0xe3, 0xef, 0x5e, 0x8d, 0x88, 0x2e, 0xc7, 0xaa, + 0x99, 0x67, 0x3e, 0x94, 0x8d, 0xdf, 0x84, 0xe2, 0x2b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, 0x59, + 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe9, 0x18, 0x67, 0x43, 0x62, 0x79, 0xf2, 0x1e, 0x28, 0xdb, 0xf8, + 0x1e, 0x40, 0x00, 0x43, 0x5d, 0xfe, 0xc1, 0xe1, 0xd1, 0xf3, 0x6e, 0x7d, 0x0e, 0x55, 0xa0, 0x78, + 0x70, 0xb8, 0xd3, 0xd9, 0xef, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xa1, 0xb5, 0x54, 0x31, 0xb5, + 0x10, 0x26, 0x5e, 0x85, 0xe5, 0xa4, 0x05, 0xa4, 0xb9, 0x68, 0x55, 0xec, 0xd2, 0x99, 0x8e, 0x8a, + 0x0a, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0x22, 0x39, 0x97, 0x4d, 0x6a, 0x08, + 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x74, 0x2f, 0xb9, 0x44, 0xf7, 0x82, 0xee, 0x42, + 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x72, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, 0xd2, 0x42, 0x46, 0x2f, + 0x84, 0x8d, 0x8e, 0xee, 0x41, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, 0x8b, 0x18, 0x55, 0x99, + 0xbb, 0x77, 0x28, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xee, 0x48, 0x4f, 0x1c, 0xc3, 0x52, + 0x2f, 0x73, 0xdd, 0xee, 0xbe, 0x30, 0x37, 0xfd, 0x89, 0x6a, 0x90, 0xd9, 0xdd, 0x11, 0x46, 0xc8, + 0xec, 0xee, 0xe0, 0x1f, 0x6b, 0x80, 0xd4, 0x71, 0x33, 0xd9, 0x39, 0x22, 0x5c, 0xc2, 0x67, 0x03, + 0xf8, 0x65, 0xc8, 0x11, 0xc7, 0xb1, 0x1d, 0x66, 0xd1, 0x92, 0xce, 0x1b, 0xf8, 0x2d, 0xa1, 0x83, + 0x4e, 0x26, 0xf6, 0x85, 0x7f, 0x06, 0xb9, 0x34, 0xcd, 0x57, 0x75, 0x0f, 0x96, 0x42, 0x5c, 0x33, + 0x45, 0xae, 0x0f, 0x61, 0x81, 0x09, 0xdb, 0x3e, 0x27, 0xbd, 0x8b, 0x91, 0x6d, 0x5a, 0x31, 0x3c, + 0xba, 0x72, 0x81, 0x83, 0xa5, 0xf3, 0xe0, 0x13, 0xab, 0xf8, 0xc4, 0x6e, 0x77, 0x1f, 0x7f, 0x06, + 0xab, 0x11, 0x39, 0x52, 0xfd, 0x3f, 0x82, 0x72, 0xcf, 0x27, 0xba, 0x22, 0xd7, 0xb9, 0x1d, 0x56, + 0x2e, 0x3a, 0x54, 0x1d, 0x81, 0x0f, 0xe1, 0x46, 0x4c, 0xf4, 0x4c, 0x73, 0x7e, 0x1b, 0x56, 0x98, + 0xc0, 0x3d, 0x42, 0x46, 0xed, 0x81, 0x39, 0x49, 0xb5, 0xf4, 0x48, 0x4c, 0x4a, 0x61, 0xfc, 0x7a, + 0xf7, 0x05, 0xfe, 0x03, 0x81, 0xd8, 0x35, 0x87, 0xa4, 0x6b, 0xef, 0xa7, 0xeb, 0x46, 0xa3, 0xd9, + 0x05, 0xb9, 0x72, 0x45, 0x5a, 0xc3, 0x7e, 0xe3, 0x7f, 0xd6, 0x84, 0xa9, 0xd4, 0xe1, 0x5f, 0xf3, + 0x4e, 0x5e, 0x03, 0x38, 0xa3, 0x47, 0x86, 0xf4, 0x69, 0x07, 0xaf, 0xa8, 0x28, 0x14, 0x5f, 0x4f, + 0xea, 0xbf, 0x2b, 0x42, 0xcf, 0x65, 0xb1, 0xcf, 0xd9, 0x3f, 0xbe, 0x97, 0xbb, 0x0d, 0x65, 0x46, + 0x38, 0xf6, 0x0c, 0x6f, 0xec, 0xc6, 0x16, 0xe3, 0x2f, 0xc4, 0xb6, 0x97, 0x83, 0x66, 0x9a, 0xd7, + 0xb7, 0x20, 0xcf, 0x2e, 0x13, 0x32, 0x95, 0xbe, 0x99, 0xb0, 0x1f, 0xb9, 0x1e, 0xba, 0x60, 0xc4, + 0x3f, 0xd5, 0x20, 0xff, 0x8c, 0x95, 0x60, 0x15, 0xd5, 0xe6, 0xe5, 0x5a, 0x58, 0xc6, 0x90, 0x17, + 0x86, 0x4a, 0x3a, 0xfb, 0xcd, 0x52, 0x4f, 0x42, 0x9c, 0xe7, 0xfa, 0x3e, 0x4f, 0x71, 0x4b, 0xba, + 0xdf, 0xa6, 0x36, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, 0x2a, 0x14, 0x9a, 0x3d, + 0x9b, 0xee, 0x3e, 0x31, 0x1c, 0x4b, 0x14, 0x4d, 0x8b, 0x7a, 0x40, 0xc0, 0xfb, 0x50, 0xe7, 0x7a, + 0xb4, 0xfb, 0x7d, 0x25, 0xc1, 0xf4, 0xd1, 0xb4, 0x08, 0x5a, 0x48, 0x5a, 0x26, 0x2a, 0xed, 0x17, + 0x1a, 0x2c, 0x2a, 0xe2, 0x66, 0xb2, 0xea, 0xbb, 0x90, 0xe7, 0x45, 0x6a, 0x91, 0xe9, 0x2c, 0x87, + 0x47, 0x71, 0x18, 0x5d, 0xf0, 0xa0, 0x4d, 0x28, 0xf0, 0x5f, 0xf2, 0x0e, 0x90, 0xcc, 0x2e, 0x99, + 0xf0, 0x3d, 0x58, 0x12, 0x24, 0x32, 0xb4, 0x93, 0x0e, 0x06, 0x5b, 0x0c, 0xfc, 0x67, 0xb0, 0x1c, + 0x66, 0x9b, 0x69, 0x4a, 0x8a, 0x92, 0x99, 0xd7, 0x51, 0xb2, 0x2d, 0x95, 0x7c, 0x3e, 0xea, 0x2b, + 0x79, 0x54, 0x74, 0xc7, 0xa8, 0xeb, 0x95, 0x09, 0xaf, 0x57, 0x30, 0x01, 0x29, 0xe2, 0x1b, 0x9d, + 0xc0, 0x07, 0x72, 0x3b, 0xec, 0x9b, 0xae, 0xef, 0xc3, 0x31, 0x54, 0x06, 0xa6, 0x45, 0x0c, 0x47, + 0x54, 0xce, 0x35, 0x5e, 0x39, 0x57, 0x69, 0xf8, 0x0b, 0x40, 0xea, 0xc0, 0x6f, 0x54, 0xe9, 0xfb, + 0xd2, 0x64, 0x47, 0x8e, 0x3d, 0xb4, 0x53, 0xcd, 0x8e, 0xff, 0x1c, 0x56, 0x22, 0x7c, 0xdf, 0xa8, + 0x9a, 0x4b, 0xb0, 0xb8, 0x43, 0x64, 0x42, 0x23, 0xdd, 0xde, 0x47, 0x80, 0x54, 0xe2, 0x4c, 0x91, + 0xad, 0x05, 0x8b, 0xcf, 0xec, 0x09, 0x75, 0x91, 0x94, 0x1a, 0xf8, 0x06, 0x5e, 0x87, 0xf0, 0x4d, + 0xe1, 0xb7, 0x29, 0xb8, 0x3a, 0x60, 0x26, 0xf0, 0x7f, 0xd7, 0xa0, 0xd2, 0x1e, 0x18, 0xce, 0x50, + 0x02, 0x7f, 0x0f, 0xf2, 0xfc, 0x76, 0x2d, 0x0a, 0x5a, 0xf7, 0xc3, 0x62, 0x54, 0x5e, 0xde, 0x68, + 0xf3, 0xbb, 0xb8, 0x18, 0x45, 0x15, 0x17, 0x6f, 0x5e, 0x3b, 0x91, 0x37, 0xb0, 0x1d, 0xf4, 0x1e, + 0xe4, 0x0c, 0x3a, 0x84, 0x85, 0xa2, 0x5a, 0xb4, 0xae, 0xc1, 0xa4, 0xb1, 0x3b, 0x00, 0xe7, 0xc2, + 0xdf, 0x81, 0xb2, 0x82, 0x80, 0x0a, 0x90, 0x7d, 0xd2, 0x11, 0x09, 0x7b, 0x7b, 0xbb, 0xbb, 0xfb, + 0x82, 0x17, 0x74, 0x6a, 0x00, 0x3b, 0x1d, 0xbf, 0x9d, 0xc1, 0x9f, 0x8a, 0x51, 0xc2, 0xed, 0xab, + 0xfa, 0x68, 0x69, 0xfa, 0x64, 0x5e, 0x4b, 0x9f, 0x4b, 0xa8, 0x8a, 0xe9, 0xcf, 0x1a, 0xc6, 0x98, + 0xbc, 0x94, 0x30, 0xa6, 0x28, 0xaf, 0x0b, 0x46, 0xfc, 0x2b, 0x0d, 0xea, 0x3b, 0xf6, 0x2b, 0xeb, + 0xcc, 0x31, 0xfa, 0xfe, 0x39, 0xf9, 0x30, 0xb2, 0x52, 0x9b, 0x91, 0xe2, 0x68, 0x84, 0x3f, 0x20, + 0x44, 0x56, 0xac, 0x11, 0x94, 0x0d, 0x79, 0x2c, 0x94, 0x4d, 0xfc, 0x01, 0x2c, 0x44, 0x06, 0x51, + 0xdb, 0xbf, 0x68, 0xef, 0xef, 0xee, 0x50, 0x5b, 0xb3, 0xc2, 0x5a, 0xe7, 0xa0, 0xfd, 0x78, 0xbf, + 0x23, 0x1e, 0x90, 0xda, 0x07, 0xdb, 0x9d, 0xfd, 0x7a, 0x06, 0xf7, 0x60, 0x51, 0x81, 0x9f, 0xf5, + 0x65, 0x20, 0x45, 0xbb, 0x05, 0xa8, 0x8a, 0x68, 0x2f, 0x0e, 0xe5, 0xbf, 0x65, 0xa0, 0x26, 0x29, + 0x5f, 0x0f, 0x26, 0x5a, 0x85, 0x7c, 0xff, 0xf4, 0xd8, 0xfc, 0x42, 0xbe, 0x1c, 0x89, 0x16, 0xa5, + 0x0f, 0x38, 0x0e, 0x7f, 0xbe, 0x15, 0x2d, 0x1a, 0xc6, 0x1d, 0xe3, 0xa5, 0xb7, 0x6b, 0xf5, 0xc9, + 0x25, 0x4b, 0x0a, 0xe6, 0xf5, 0x80, 0xc0, 0x2a, 0x4c, 0xe2, 0x99, 0x97, 0xdd, 0xac, 0x94, 0x67, + 0x5f, 0xf4, 0x00, 0xea, 0xf4, 0x77, 0x7b, 0x34, 0x1a, 0x98, 0xa4, 0xcf, 0x05, 0x14, 0x18, 0x4f, + 0x8c, 0x4e, 0xd1, 0xd9, 0x5d, 0xc4, 0x6d, 0x14, 0x59, 0x58, 0x12, 0x2d, 0xb4, 0x0e, 0x65, 0xae, + 0xdf, 0xae, 0xf5, 0xdc, 0x25, 0xec, 0xed, 0x33, 0xab, 0xab, 0xa4, 0x70, 0x9a, 0x01, 0xd1, 0x34, + 0x63, 0x09, 0x16, 0xdb, 0x63, 0xef, 0xbc, 0x63, 0xd1, 0x58, 0x21, 0xad, 0xbc, 0x0c, 0x88, 0x12, + 0x77, 0x4c, 0x57, 0xa5, 0x0a, 0xd6, 0xf0, 0x82, 0x74, 0x60, 0x89, 0x12, 0x89, 0xe5, 0x99, 0x3d, + 0x25, 0xae, 0xca, 0xcc, 0x4b, 0x8b, 0x64, 0x5e, 0x86, 0xeb, 0xbe, 0xb2, 0x9d, 0xbe, 0xb0, 0xb9, + 0xdf, 0xc6, 0xff, 0xa8, 0x71, 0xc8, 0xe7, 0x6e, 0x28, 0x7d, 0xfa, 0x2d, 0xc5, 0xa0, 0xf7, 0xa1, + 0x60, 0x8f, 0xd8, 0x0b, 0xbf, 0x28, 0xc3, 0xac, 0x6e, 0xf2, 0x6f, 0x02, 0x36, 0x85, 0xe0, 0x43, + 0xde, 0xab, 0x4b, 0x36, 0x74, 0x1f, 0x6a, 0xe7, 0x86, 0x7b, 0x4e, 0xfa, 0x47, 0x52, 0x26, 0xbf, + 0xf9, 0x45, 0xa8, 0x78, 0x23, 0xd0, 0xef, 0x09, 0xf1, 0xa6, 0xe8, 0x87, 0x1f, 0xc2, 0x8a, 0xe4, + 0x14, 0xaf, 0x13, 0x53, 0x98, 0x5f, 0xc1, 0x6d, 0xc9, 0xbc, 0x7d, 0x6e, 0x58, 0x67, 0x44, 0x02, + 0xfe, 0xae, 0x16, 0x88, 0xcf, 0x27, 0x9b, 0x38, 0x9f, 0xc7, 0xd0, 0xf0, 0xe7, 0xc3, 0x6e, 0xd6, + 0xf6, 0x40, 0x55, 0x74, 0xec, 0x8a, 0xf3, 0x54, 0xd2, 0xd9, 0x6f, 0x4a, 0x73, 0xec, 0x81, 0x9f, + 0x4a, 0xd3, 0xdf, 0x78, 0x1b, 0x6e, 0x4a, 0x19, 0xe2, 0xce, 0x1b, 0x16, 0x12, 0x53, 0x3c, 0x49, + 0x88, 0x30, 0x2c, 0x1d, 0x3a, 0x7d, 0xe1, 0x55, 0xce, 0xf0, 0x12, 0x30, 0x99, 0x9a, 0x22, 0x73, + 0x85, 0x6f, 0x4a, 0xaa, 0x98, 0x92, 0x2d, 0x49, 0x32, 0x15, 0xa0, 0x92, 0xc5, 0x82, 0x51, 0x72, + 0x6c, 0xc1, 0x62, 0xa2, 0x7f, 0x00, 0x6b, 0xbe, 0x12, 0xd4, 0x6e, 0x47, 0xc4, 0x19, 0x9a, 0xae, + 0xab, 0xd4, 0xbd, 0x93, 0x26, 0x7e, 0x1f, 0xe6, 0x47, 0x44, 0x04, 0xa1, 0xf2, 0x16, 0x92, 0x9b, + 0x52, 0x19, 0xcc, 0xfa, 0x71, 0x1f, 0xee, 0x48, 0xe9, 0xdc, 0xa2, 0x89, 0xe2, 0xa3, 0x4a, 0xc9, + 0x6a, 0x60, 0x26, 0xa5, 0x1a, 0x98, 0x8d, 0xbc, 0xc5, 0x7c, 0xc4, 0x0d, 0x29, 0xcf, 0xfc, 0x4c, + 0xc9, 0xc5, 0x1e, 0xb7, 0xa9, 0xef, 0x2a, 0x66, 0x12, 0xf6, 0xd7, 0xc2, 0x0b, 0x7c, 0x55, 0x1e, + 0x9e, 0xb0, 0x19, 0xca, 0x87, 0x0e, 0xd9, 0xa4, 0x59, 0x33, 0x5d, 0x00, 0x5d, 0xad, 0x85, 0xce, + 0xeb, 0x21, 0x1a, 0x3e, 0x85, 0xe5, 0xb0, 0x5f, 0x9b, 0x49, 0x97, 0x65, 0xc8, 0x79, 0xf6, 0x05, + 0x91, 0xb1, 0x86, 0x37, 0xa4, 0xed, 0x7c, 0x9f, 0x37, 0x93, 0xed, 0x8c, 0x40, 0x18, 0x3b, 0x1d, + 0xb3, 0xea, 0x4b, 0x37, 0x96, 0xbc, 0x03, 0xf1, 0x06, 0x3e, 0x80, 0xd5, 0xa8, 0x67, 0x9b, 0x49, + 0xe5, 0x17, 0xfc, 0x2c, 0x25, 0x39, 0xbf, 0x99, 0xe4, 0x7e, 0x1c, 0xf8, 0x25, 0xc5, 0xb7, 0xcd, + 0x24, 0x52, 0x87, 0x66, 0x92, 0xab, 0xfb, 0x2a, 0x8e, 0x8e, 0xef, 0xf9, 0x66, 0x12, 0xe6, 0x06, + 0xc2, 0x66, 0x5f, 0xfe, 0xc0, 0x5d, 0x65, 0xa7, 0xba, 0x2b, 0x71, 0x48, 0x02, 0x87, 0xfa, 0x35, + 0x6c, 0x3a, 0x81, 0x11, 0xf8, 0xf2, 0x59, 0x31, 0x68, 0x38, 0xf3, 0x31, 0x58, 0x43, 0x6e, 0x6c, + 0x35, 0x02, 0xcc, 0xb4, 0x18, 0x9f, 0x04, 0x6e, 0x3c, 0x16, 0x24, 0x66, 0x12, 0xfc, 0x29, 0xac, + 0xa7, 0xc7, 0x87, 0x59, 0x24, 0x3f, 0x68, 0x41, 0xc9, 0xbf, 0x0c, 0x29, 0xdf, 0x9b, 0x95, 0xa1, + 0x70, 0x70, 0x78, 0x7c, 0xd4, 0xde, 0xee, 0xf0, 0x0f, 0xce, 0xb6, 0x0f, 0x75, 0xfd, 0xf9, 0x51, + 0xb7, 0x9e, 0xd9, 0xfa, 0x75, 0x16, 0x32, 0x7b, 0x2f, 0xd0, 0x67, 0x90, 0xe3, 0x5f, 0x5f, 0x4c, + 0xf9, 0xe4, 0xa6, 0x39, 0xed, 0x03, 0x13, 0x7c, 0xe3, 0xc7, 0xff, 0xf5, 0xeb, 0x9f, 0x67, 0x16, + 0x71, 0xa5, 0x35, 0xf9, 0x76, 0xeb, 0x62, 0xd2, 0x62, 0x61, 0xea, 0x91, 0xf6, 0x00, 0x7d, 0x0c, + 0xd9, 0xa3, 0xb1, 0x87, 0x52, 0x3f, 0xc5, 0x69, 0xa6, 0x7f, 0x73, 0x82, 0x57, 0x98, 0xd0, 0x05, + 0x0c, 0x42, 0xe8, 0x68, 0xec, 0x51, 0x91, 0x3f, 0x84, 0xb2, 0xfa, 0xc5, 0xc8, 0xb5, 0xdf, 0xe7, + 0x34, 0xaf, 0xff, 0x1a, 0x05, 0xdf, 0x66, 0x50, 0x37, 0x30, 0x12, 0x50, 0xfc, 0x9b, 0x16, 0x75, + 0x16, 0xdd, 0x4b, 0x0b, 0xa5, 0x7e, 0xbd, 0xd3, 0x4c, 0xff, 0x40, 0x25, 0x36, 0x0b, 0xef, 0xd2, + 0xa2, 0x22, 0xff, 0x44, 0x7c, 0x9b, 0xd2, 0xf3, 0xd0, 0x9d, 0x84, 0x6f, 0x13, 0xd4, 0x57, 0xf8, + 0xe6, 0x7a, 0x3a, 0x83, 0x00, 0xb9, 0xc5, 0x40, 0x56, 0xf1, 0xa2, 0x00, 0xe9, 0xf9, 0x2c, 0x8f, + 0xb4, 0x07, 0x5b, 0x3d, 0xc8, 0xb1, 0x17, 0x2e, 0xf4, 0xb9, 0xfc, 0xd1, 0x4c, 0x78, 0xea, 0x4b, + 0x59, 0xe8, 0xd0, 0xdb, 0x18, 0x5e, 0x66, 0x40, 0x35, 0x5c, 0xa2, 0x40, 0xec, 0x7d, 0xeb, 0x91, + 0xf6, 0x60, 0x43, 0x7b, 0x5f, 0xdb, 0xfa, 0x55, 0x0e, 0x72, 0xac, 0xb4, 0x8b, 0x2e, 0x00, 0x82, + 0xd7, 0x9e, 0xe8, 0xec, 0x62, 0xef, 0x47, 0xd1, 0xd9, 0xc5, 0x1f, 0x8a, 0x70, 0x93, 0x81, 0x2e, + 0xe3, 0x05, 0x0a, 0xca, 0x2a, 0xc6, 0x2d, 0x56, 0x04, 0xa7, 0x76, 0xfc, 0x1b, 0x4d, 0x54, 0xb6, + 0xf9, 0x59, 0x42, 0x49, 0xd2, 0x42, 0x4f, 0x3e, 0xd1, 0xed, 0x90, 0xf0, 0xdc, 0x83, 0xbf, 0xcb, + 0x00, 0x5b, 0xb8, 0x1e, 0x00, 0x3a, 0x8c, 0xe3, 0x91, 0xf6, 0xe0, 0xf3, 0x06, 0x5e, 0x12, 0x56, + 0x8e, 0xf4, 0xa0, 0x1f, 0x41, 0x2d, 0xfc, 0xa4, 0x81, 0xee, 0x26, 0x60, 0x45, 0x5f, 0x46, 0x9a, + 0x6f, 0x4d, 0x67, 0x12, 0x3a, 0xad, 0x31, 0x9d, 0x04, 0x38, 0x47, 0xbe, 0x20, 0x64, 0x64, 0x50, + 0x26, 0xb1, 0x06, 0xe8, 0x1f, 0x34, 0xf1, 0xe2, 0x14, 0xbc, 0x51, 0xa0, 0x24, 0xe9, 0xb1, 0x17, + 0x90, 0xe6, 0xbd, 0x6b, 0xb8, 0x84, 0x12, 0x7f, 0xc8, 0x94, 0xf8, 0x00, 0x2f, 0x07, 0x4a, 0x78, + 0xe6, 0x90, 0x78, 0xb6, 0xd0, 0xe2, 0xf3, 0x5b, 0xf8, 0x46, 0xc8, 0x38, 0xa1, 0xde, 0x60, 0xb1, + 0xf8, 0x3b, 0x43, 0xe2, 0x62, 0x85, 0xde, 0x2d, 0x12, 0x17, 0x2b, 0xfc, 0x48, 0x91, 0xb4, 0x58, + 0xfc, 0x55, 0x21, 0x69, 0xb1, 0xfc, 0x9e, 0xad, 0xff, 0x9b, 0x87, 0xc2, 0x36, 0xff, 0x26, 0x1c, + 0xd9, 0x50, 0xf2, 0xcb, 0xf4, 0x68, 0x2d, 0xa9, 0xce, 0x18, 0x5c, 0x6b, 0x9a, 0x77, 0x52, 0xfb, + 0x85, 0x42, 0x6f, 0x32, 0x85, 0xde, 0xc0, 0xab, 0x14, 0x59, 0x7c, 0x76, 0xde, 0xe2, 0xc5, 0xac, + 0x96, 0xd1, 0xef, 0x53, 0x43, 0xfc, 0x29, 0x54, 0xd4, 0x3a, 0x3a, 0x7a, 0x33, 0xb1, 0xb6, 0xa9, + 0x96, 0xe2, 0x9b, 0x78, 0x1a, 0x8b, 0x40, 0x7e, 0x8b, 0x21, 0xaf, 0xe1, 0x9b, 0x09, 0xc8, 0x0e, + 0x63, 0x0d, 0x81, 0xf3, 0x1a, 0x78, 0x32, 0x78, 0xa8, 0xc4, 0x9e, 0x0c, 0x1e, 0x2e, 0xa1, 0x4f, + 0x05, 0x1f, 0x33, 0x56, 0x0a, 0xee, 0x02, 0x04, 0x95, 0x6c, 0x94, 0x68, 0x4b, 0xe5, 0x5e, 0x17, + 0x75, 0x0e, 0xf1, 0x22, 0x38, 0xc6, 0x0c, 0x56, 0xec, 0xbb, 0x08, 0xec, 0xc0, 0x74, 0x3d, 0x7e, + 0x30, 0xab, 0xa1, 0xd2, 0x34, 0x4a, 0x9c, 0x4f, 0xb8, 0xbe, 0xdd, 0xbc, 0x3b, 0x95, 0x47, 0xa0, + 0xdf, 0x63, 0xe8, 0x77, 0x70, 0x33, 0x01, 0x7d, 0xc4, 0x79, 0xe9, 0x66, 0xfb, 0xff, 0x3c, 0x94, + 0x9f, 0x19, 0xa6, 0xe5, 0x11, 0xcb, 0xb0, 0x7a, 0x04, 0x9d, 0x42, 0x8e, 0x45, 0xea, 0xa8, 0x23, + 0x56, 0xcb, 0xb6, 0x51, 0x47, 0x1c, 0xaa, 0x69, 0xe2, 0x75, 0x06, 0xdc, 0xc4, 0x2b, 0x14, 0x78, + 0x18, 0x88, 0x6e, 0xb1, 0x52, 0x24, 0x9d, 0xf4, 0x4b, 0xc8, 0x8b, 0xd7, 0xbe, 0x88, 0xa0, 0x50, + 0xf1, 0xa7, 0x79, 0x2b, 0xb9, 0x33, 0x69, 0x2f, 0xab, 0x30, 0x2e, 0xe3, 0xa3, 0x38, 0x13, 0x80, + 0xa0, 0xc6, 0x1e, 0x5d, 0xd1, 0x58, 0x49, 0xbe, 0xb9, 0x9e, 0xce, 0x90, 0x64, 0x53, 0x15, 0xb3, + 0xef, 0xf3, 0x52, 0xdc, 0x3f, 0x86, 0xf9, 0xa7, 0x86, 0x7b, 0x8e, 0x22, 0xb1, 0x57, 0xf9, 0x56, + 0xac, 0xd9, 0x4c, 0xea, 0x12, 0x28, 0x77, 0x18, 0xca, 0x4d, 0xee, 0xca, 0x54, 0x94, 0x73, 0xc3, + 0xa5, 0x41, 0x0d, 0xf5, 0x21, 0xcf, 0x3f, 0x1d, 0x8b, 0xda, 0x2f, 0xf4, 0xf9, 0x59, 0xd4, 0x7e, + 0xe1, 0xaf, 0xcd, 0xae, 0x47, 0x19, 0x41, 0x51, 0x7e, 0xab, 0x85, 0x22, 0x0f, 0xf7, 0x91, 0xef, + 0xba, 0x9a, 0x6b, 0x69, 0xdd, 0x02, 0xeb, 0x2e, 0xc3, 0xba, 0x8d, 0x1b, 0xb1, 0xb5, 0x12, 0x9c, + 0x8f, 0xb4, 0x07, 0xef, 0x6b, 0xe8, 0x47, 0x00, 0xc1, 0xb3, 0x44, 0xec, 0x04, 0x46, 0x5f, 0x38, + 0x62, 0x27, 0x30, 0xf6, 0xa2, 0x81, 0x37, 0x19, 0xee, 0x06, 0xbe, 0x1b, 0xc5, 0xf5, 0x1c, 0xc3, + 0x72, 0x5f, 0x12, 0xe7, 0x3d, 0x5e, 0x65, 0x75, 0xcf, 0xcd, 0x11, 0x9d, 0xb2, 0x03, 0x25, 0xbf, + 0xea, 0x1c, 0xf5, 0xb6, 0xd1, 0x6a, 0x78, 0xd4, 0xdb, 0xc6, 0xca, 0xd5, 0x61, 0xb7, 0x13, 0xda, + 0x2d, 0x92, 0x95, 0x1e, 0xc0, 0x5f, 0xd4, 0x61, 0x9e, 0x66, 0xdd, 0x34, 0x39, 0x09, 0xea, 0x26, + 0xd1, 0xd9, 0xc7, 0xaa, 0xa8, 0xd1, 0xd9, 0xc7, 0x4b, 0x2e, 0xe1, 0xe4, 0x84, 0x5e, 0xb2, 0x5a, + 0xbc, 0x44, 0x41, 0x67, 0x6a, 0x43, 0x59, 0x29, 0xac, 0xa0, 0x04, 0x61, 0xe1, 0xf2, 0x6c, 0x34, + 0xdc, 0x25, 0x54, 0x65, 0xf0, 0x1b, 0x0c, 0x6f, 0x85, 0x87, 0x3b, 0x86, 0xd7, 0xe7, 0x1c, 0x14, + 0x50, 0xcc, 0x4e, 0x9c, 0xfb, 0x84, 0xd9, 0x85, 0xcf, 0xfe, 0x7a, 0x3a, 0x43, 0xea, 0xec, 0x82, + 0x83, 0xff, 0x0a, 0x2a, 0x6a, 0x79, 0x05, 0x25, 0x28, 0x1f, 0x29, 0x29, 0x47, 0xe3, 0x48, 0x52, + 0x75, 0x26, 0xec, 0xd9, 0x18, 0xa4, 0xa1, 0xb0, 0x51, 0xe0, 0x01, 0x14, 0x44, 0xbd, 0x25, 0xc9, + 0xa4, 0xe1, 0xf2, 0x73, 0x92, 0x49, 0x23, 0xc5, 0x9a, 0x70, 0xf6, 0xcc, 0x10, 0xe9, 0x95, 0x52, + 0xc6, 0x6a, 0x81, 0xf6, 0x84, 0x78, 0x69, 0x68, 0x41, 0x25, 0x33, 0x0d, 0x4d, 0xb9, 0xce, 0xa7, + 0xa1, 0x9d, 0x11, 0x4f, 0xf8, 0x03, 0x79, 0x4d, 0x46, 0x29, 0xc2, 0xd4, 0xf8, 0x88, 0xa7, 0xb1, + 0x24, 0x5d, 0x6e, 0x02, 0x40, 0x19, 0x1c, 0x2f, 0x01, 0x82, 0x6a, 0x50, 0x34, 0x63, 0x4d, 0xac, + 0x82, 0x47, 0x33, 0xd6, 0xe4, 0x82, 0x52, 0xd8, 0xf7, 0x05, 0xb8, 0xfc, 0x6e, 0x45, 0x91, 0x7f, + 0xa6, 0x01, 0x8a, 0x17, 0x8e, 0xd0, 0xc3, 0x64, 0xe9, 0x89, 0xb5, 0xf5, 0xe6, 0xbb, 0xaf, 0xc7, + 0x9c, 0x14, 0xce, 0x02, 0x95, 0x7a, 0x8c, 0x7b, 0xf4, 0x8a, 0x2a, 0xf5, 0x97, 0x1a, 0x54, 0x43, + 0x55, 0x27, 0x74, 0x3f, 0x65, 0x4d, 0x23, 0x25, 0xf7, 0xe6, 0xdb, 0xd7, 0xf2, 0x25, 0xa5, 0xf2, + 0xca, 0x0e, 0x90, 0x77, 0x9a, 0x9f, 0x68, 0x50, 0x0b, 0x57, 0xa9, 0x50, 0x8a, 0xec, 0x58, 0xc9, + 0xbe, 0xb9, 0x71, 0x3d, 0xe3, 0xf4, 0xe5, 0x09, 0xae, 0x33, 0x03, 0x28, 0x88, 0xba, 0x56, 0xd2, + 0xc6, 0x0f, 0x17, 0xfb, 0x93, 0x36, 0x7e, 0xa4, 0x28, 0x96, 0xb0, 0xf1, 0x1d, 0x7b, 0x40, 0x94, + 0x63, 0x26, 0x0a, 0x5f, 0x69, 0x68, 0xd3, 0x8f, 0x59, 0xa4, 0x6a, 0x96, 0x86, 0x16, 0x1c, 0x33, + 0x59, 0xf1, 0x42, 0x29, 0xc2, 0xae, 0x39, 0x66, 0xd1, 0x82, 0x59, 0xc2, 0x31, 0x63, 0x80, 0xca, + 0x31, 0x0b, 0x6a, 0x53, 0x49, 0xc7, 0x2c, 0xf6, 0x76, 0x91, 0x74, 0xcc, 0xe2, 0xe5, 0xad, 0x84, + 0x75, 0x64, 0xb8, 0xa1, 0x63, 0xb6, 0x94, 0x50, 0xc6, 0x42, 0xef, 0xa6, 0x18, 0x31, 0xf1, 0x49, + 0xa4, 0xf9, 0xde, 0x6b, 0x72, 0xa7, 0xee, 0x71, 0x6e, 0x7e, 0xb9, 0xc7, 0xff, 0x56, 0x83, 0xe5, + 0xa4, 0x12, 0x18, 0x4a, 0xc1, 0x49, 0x79, 0x4a, 0x69, 0x6e, 0xbe, 0x2e, 0xfb, 0x74, 0x6b, 0xf9, + 0xbb, 0xfe, 0x71, 0xfd, 0x5f, 0xbf, 0x5c, 0xd3, 0xfe, 0xe3, 0xcb, 0x35, 0xed, 0xbf, 0xbf, 0x5c, + 0xd3, 0xfe, 0xee, 0x7f, 0xd6, 0xe6, 0x4e, 0xf3, 0xec, 0x3f, 0x1a, 0x7f, 0xfb, 0x37, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xee, 0x4f, 0x63, 0x90, 0xed, 0x3c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// KVClient is the client API for KV service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KVClient interface { + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KVServer k,v服务 +type KVServer interface { + Range(context.Context, *RangeRequest) (*RangeResponse, error) // 范围查询 + Put(context.Context, *PutRequest) (*PutResponse, error) // 更新、创建 + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) // 范围删除 + // Txn 在一个事务中处理多个请求.一个txn请求会增加键值存储的版本并为每个完成的请求生成具有相同版本的事件.不允许在一个txn中多次修改同一个键. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) // 压缩 etcd 键值存储中的事件历史 +} + +// UnimplementedKVServer can be embedded to have forward compatible implementations. +type UnimplementedKVServer struct{} + +func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") +} + +func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} + +func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented") +} + +func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented") +} + +func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// WatchClient is the client API for Watch service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +type WatchServer interface { + // Watch 观察正在发生或已经发生的事件.输入和输出都是流;输入流用于创建和取消监视和输出 + // 流发送事件.一个watch RPC可以在多个key range上watch ,一次为几个watch stream event .整个事件历史可以从最后的压缩修订开始观看. + Watch(Watch_WatchServer) error +} + +// UnimplementedWatchServer can be embedded to have forward compatible implementations. +type UnimplementedWatchServer struct{} + +func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// ClusterClient is the client API for Cluster service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ClusterClient interface { + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ClusterServer is the server API for Cluster service. +type ClusterServer interface { + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// MaintenanceClient is the client API for Maintenance service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MaintenanceClient interface { + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) + Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Defragment 碎片整理 +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { + out := new(HashKVResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) { + out := new(DowngradeResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +type MaintenanceServer interface { + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + Status(context.Context, *StatusRequest) (*StatusResponse, error) + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) // 碎片整理 + Hash(context.Context, *HashRequest) (*HashResponse, error) + HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) // 计算所有MVCC键的哈希值直到一个给定的修订.只遍历key桶 + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) + Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error) +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashKVRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).HashKV(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/HashKV", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DowngradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Downgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Downgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + { + MethodName: "HashKV", + Handler: _Maintenance_HashKV_Handler, + }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, + { + MethodName: "Downgrade", + Handler: _Maintenance_Downgrade_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// AuthClient is the client API for Auth service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AuthClient interface { + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) { + out := new(AuthStatusResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +type AuthServer interface { + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error) + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "AuthStatus", + Handler: _Auth_AuthStatus_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + offset -= sovRpc(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *PutRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *PutResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *RequestOp) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Compare) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *HashRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *HashResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } + +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *Member) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { return json.Marshal(m) } + +func (m *ResponseHeader) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RangeRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RangeResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *PutRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *PutResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DeleteRangeRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DeleteRangeResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RequestOp) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RequestOp_RequestRange) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RequestOp_RequestPut) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *RequestOp_RequestTxn) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *ResponseOp) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *ResponseOp_ResponseRange) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *ResponseOp_ResponsePut) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *ResponseOp_ResponseTxn) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare_Version) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare_CreateRevision) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare_ModRevision) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare_Value) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Compare_Lease) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *TxnRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *TxnResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *CompactionRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *CompactionResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *HashRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *HashKVRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *HashKVResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *HashResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *SnapshotRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *SnapshotResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *WatchRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *WatchRequest_CreateRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *WatchRequest_CancelRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *WatchRequest_ProgressRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *WatchCreateRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *WatchCancelRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *WatchProgressRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *WatchResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseGrantRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseGrantResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseRevokeRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseRevokeResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseCheckpoint) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseCheckpointRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseCheckpointResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseKeepAliveRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseKeepAliveResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseTimeToLiveRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseTimeToLiveResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseLeasesRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseStatus) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *LeaseLeasesResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *Member) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberAddRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberAddResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberRemoveRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberRemoveResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberUpdateRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberUpdateResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberListRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberListResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberPromoteRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MemberPromoteResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DefragmentRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DefragmentResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MoveLeaderRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *MoveLeaderResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AlarmRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AlarmMember) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AlarmResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DowngradeRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *DowngradeResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *StatusRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *StatusResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthEnableRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthDisableRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthStatusRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthenticateRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserAddRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserGetRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserDeleteRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserChangePasswordRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *AuthUserGrantRoleRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *AuthRoleAddRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleGetRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserListRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleListRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleDeleteRequest) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *AuthEnableResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthDisableResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthStatusResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthenticateResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserAddResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserGetResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserDeleteResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserChangePasswordResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} +func (m *AuthRoleAddResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleGetResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleListResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthUserListResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleDeleteResponse) Size() (n int) { marshal, _ := json.Marshal(m); return len(marshal) } +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func sovRpc(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") +) + +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *RangeRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *RangeResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *PutRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *PutResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *RequestOp) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *ResponseOp) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *Compare) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *TxnRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *TxnResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *HashRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *HashKVRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *HashKVResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *HashResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *WatchResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseStatus) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *Member) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } + +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DowngradeRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *DowngradeResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *StatusRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *StatusResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { return json.Unmarshal(dAtA, m) } +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +type alarmMember struct { + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + Alarm string `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + a := alarmMember{MemberID: m.MemberID} + switch m.Alarm { + case 0: + a.Alarm = "NONE" + case 1: + a.Alarm = "NOSPACE" + case 2: + a.Alarm = "CORRUPT" + } + + return json.Marshal(&a) +} + +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + a := alarmMember{} + err := json.Unmarshal(dAtA, &a) + if err == nil { + m.MemberID = a.MemberID + switch a.Alarm { + case "NONE": + m.Alarm = 0 + case "NOSPACE": + m.Alarm = 1 + case "CORRUPT": + m.Alarm = 2 + } + } + return err +} diff --git a/offical/etcdserverpb/rpc.proto b/offical/etcdserverpb/rpc.proto new file mode 100644 index 00000000000..b2b3936ddc4 --- /dev/null +++ b/offical/etcdserverpb/rpc.proto @@ -0,0 +1,1199 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/api/mvccpb/kv.proto"; +import "etcd/api/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3/lease/revoke" + body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3/lease/timetolive" + body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } + }; + } + + // LeaseLeases lists all existing leases. + rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { + option (google.api.http) = { + post: "/v3/lease/leases" + body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } + }; + } +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3/maintenance/defragment" + body: "*" + }; + } + + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. + rpc HashKV(HashKVRequest) returns (HashKVResponse) { + option (google.api.http) = { + post: "/v3/maintenance/hash" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3/maintenance/snapshot" + body: "*" + }; + } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3/maintenance/transfer-leadership" + body: "*" + }; + } + + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) { + option (google.api.http) = { + post: "/v3/maintenance/downgrade" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3/auth/disable" + body: "*" + }; + } + + // AuthStatus displays authentication status. + rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) { + option (google.api.http) = { + post: "/v3/auth/status" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. User name cannot be empty. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd 添加一个角色. Role name cannot be empty. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + // For watch progress responses, the header.revision indicates progress. All future events + // recieved in this stream are guaranteed to have a higher revision number than the + // header.revision number. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create revisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE = 3; + LEASE = 4; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + // lease is the lease id of the given key. + int64 lease = 8; + // leave room for more target_union field tags, jump to 64 + } + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 64; + // TODO: fill out with most of the rest of RangeRequest fields when needed. +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashKVRequest { + // revision is the key-value store revision for the hash operation. + int64 revision = 1; +} + +message HashKVResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + uint32 hash = 2; + // compact_revision is the compacted revision of key-value store when hash begins. + int64 compact_revision = 3; +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's KV's backend. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + WatchProgressRequest progress_request = 3; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; + + // If watch_id is provided and non-zero, it will be assigned to this watcher. + // Since creating a watcher in etcd is not a synchronous operation, + // this can be used ensure that ordering is correct when creating multiple + // watchers on the same stream. Creating a watcher with an ID already in + // use on the stream will cause an error to be returned. + int64 watch_id = 7; + + // fragment enables splitting large revisions into multiple watch responses. + bool fragment = 8; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +// Requests the a watch stream progress status be sent in the watch response stream as soon as +// possible. +message WatchProgressRequest { +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + // framgment is true if large watch response was split over multiple responses. + bool fragment = 7; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message LeaseLeasesRequest { +} + +message LeaseStatus { + int64 ID = 1; + // TODO: int64 TTL = 2; +} + +message LeaseLeasesResponse { + ResponseHeader header = 1; + repeated LeaseStatus leases = 2; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { + bool linearizable = 1; +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted + CORRUPT = 2; // kv store corruption detected +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message DowngradeRequest { + enum DowngradeAction { + VALIDATE = 0; + ENABLE = 1; + CANCEL = 2; + } + + // action is the kind of downgrade request to issue. The action may + // VALIDATE the target version, DOWNGRADE the cluster version, + // or CANCEL the current downgrading job. + DowngradeAction action = 1; + // version is the target version to downgrade. + string version = 2; +} + +message DowngradeResponse { + ResponseHeader header = 1; + // version is the current cluster version. + string version = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft committed index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthStatusRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; + authpb.UserAddOptions options = 3; + string hashedPassword = 4; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. Note that this field will be removed in the API layer. + string password = 2; + // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. + string hashedPassword = 3; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + bytes key = 2; + bytes range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthStatusResponse { + ResponseHeader header = 1; + bool enabled = 2; + // authRevision is the current revision of auth store + uint64 authRevision = 3; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/pkg/LICENSE b/pkg/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/README.md b/pkg/README.md deleted file mode 100644 index d7de4d33bc4..00000000000 --- a/pkg/README.md +++ /dev/null @@ -1,2 +0,0 @@ -pkg/ is a collection of utility packages used by etcd without being specific to etcd itself. A package belongs here -only if it could possibly be moved out into its own repository in the future. diff --git a/pkg/adt/README.md b/pkg/adt/README.md index 107c6bcae28..bb1e1785883 100644 --- a/pkg/adt/README.md +++ b/pkg/adt/README.md @@ -1,4 +1,3 @@ - ## Red-Black Tree *"Introduction to Algorithms" (Cormen et al, 3rd ed.), Chapter 13* @@ -7,27 +6,27 @@ 2. The root is black. 3. Every leaf (NIL) is black. 4. If a node is red, then both its children are black. -5. For each node, all simple paths from the node to descendant leaves contain the -same number of black nodes. +5. For each node, all simple paths from the node to descendant leaves contain the same number of black nodes. For example, ```go import ( - "fmt" +"fmt" - "go.etcd.io/etcd/pkg/v3/adt" +"github.com/ls-2018/etcd_cn/pkg/adt" ) func main() { - ivt := adt.NewIntervalTree() - ivt.Insert(NewInt64Interval(510, 511), 0) - ivt.Insert(NewInt64Interval(82, 83), 0) - ivt.Insert(NewInt64Interval(830, 831), 0) - ... +ivt := adt.NewIntervalTree() +ivt.Insert(NewInt64Interval(510, 511), 0) +ivt.Insert(NewInt64Interval(82, 83), 0) +ivt.Insert(NewInt64Interval(830, 831), 0) +... ``` -After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972`, `238`, `292`, `953`. +After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972` +, `238`, `292`, `953`. ![red-black-tree-01-insertion.png](img/red-black-tree-01-insertion.png) diff --git a/pkg/adt/adt.go b/pkg/adt/adt.go index 1a9559145b3..0998175ddc1 100644 --- a/pkg/adt/adt.go +++ b/pkg/adt/adt.go @@ -14,3 +14,5 @@ // Package adt implements useful abstract data types. package adt + +// 红黑树 diff --git a/pkg/adt/example_test.go b/pkg/adt/example_test.go deleted file mode 100644 index 034f7b3c418..00000000000 --- a/pkg/adt/example_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adt_test - -import ( - "fmt" - - "go.etcd.io/etcd/pkg/v3/adt" -) - -func Example() { - ivt := adt.NewIntervalTree() - ivt.Insert(adt.NewInt64Interval(1, 3), 123) - ivt.Insert(adt.NewInt64Interval(9, 13), 456) - ivt.Insert(adt.NewInt64Interval(7, 20), 789) - - rs := ivt.Stab(adt.NewInt64Point(10)) - for _, v := range rs { - fmt.Printf("Overlapping range: %+v\n", v) - } - // output: - // Overlapping range: &{Ivl:{Begin:7 End:20} Val:789} - // Overlapping range: &{Ivl:{Begin:9 End:13} Val:456} -} diff --git a/pkg/adt/interval_tree.go b/pkg/adt/interval_tree.go index bfd13fb73d5..e1081b9cdff 100644 --- a/pkg/adt/interval_tree.go +++ b/pkg/adt/interval_tree.go @@ -199,8 +199,7 @@ type IntervalTree interface { Visit(ivl Interval, ivv IntervalVisitor) // Find gets the IntervalValue for the node matching the given interval Find(ivl Interval) *IntervalValue - // Intersects returns true if there is some tree node intersecting the given interval. - Intersects(iv Interval) bool + Intersects(iv Interval) bool // 如果有一些树节点与给定的区间相交,则返回true. // Contains returns true if the interval tree's keys cover the entire given interval. Contains(ivl Interval) bool // Stab returns a slice with all elements in the tree intersecting the interval. @@ -209,7 +208,7 @@ type IntervalTree interface { Union(inIvt IntervalTree, ivl Interval) } -// NewIntervalTree returns a new interval tree. +// NewIntervalTree 线段树( interval tree )来支持范围查询、前缀查询等. func NewIntervalTree() IntervalTree { sentinel := &intervalNode{ iv: IntervalValue{}, @@ -226,6 +225,8 @@ func NewIntervalTree() IntervalTree { } } +// gorm + type intervalTree struct { root *intervalNode count int @@ -241,34 +242,34 @@ type intervalTree struct { // // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324 // -// RB-DELETE(T, z) -// -// y = z -// y-original-color = y.color -// -// if z.left == T.nil -// x = z.right -// RB-TRANSPLANT(T, z, z.right) -// else if z.right == T.nil -// x = z.left -// RB-TRANSPLANT(T, z, z.left) -// else -// y = TREE-MINIMUM(z.right) -// y-original-color = y.color -// x = y.right -// if y.p == z -// x.p = y -// else -// RB-TRANSPLANT(T, y, y.right) -// y.right = z.right -// y.right.p = y -// RB-TRANSPLANT(T, z, y) -// y.left = z.left -// y.left.p = y -// y.color = z.color -// -// if y-original-color == BLACK -// RB-DELETE-FIXUP(T, x) +// 0. RB-DELETE(T, z) +// 1. +// 2. y = z +// 3. y-original-color = y.color +// 4. +// 5. if z.left == T.nil +// 6. x = z.right +// 7. RB-TRANSPLANT(T, z, z.right) +// 8. else if z.right == T.nil +// 9. x = z.left +// 10. RB-TRANSPLANT(T, z, z.left) +// 11. else +// 12. y = TREE-MINIMUM(z.right) +// 13. y-original-color = y.color +// 14. x = y.right +// 15. if y.p == z +// 16. x.p = y +// 17. else +// 18. RB-TRANSPLANT(T, y, y.right) +// 19. y.right = z.right +// 20. y.right.p = y +// 21. RB-TRANSPLANT(T, z, y) +// 22. y.left = z.left +// 23. y.left.p = y +// 24. y.color = z.color +// 25. +// 26. if y-original-color == BLACK +// 27. RB-DELETE-FIXUP(T, x) // Delete removes the node with the given interval from the tree, returning // true if a node is in fact removed. @@ -317,47 +318,48 @@ func (ivt *intervalTree) Delete(ivl Interval) bool { // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326 // -// RB-DELETE-FIXUP(T, z) -// -// while x ≠ T.root and x.color == BLACK -// if x == x.p.left -// w = x.p.right -// if w.color == RED -// w.color = BLACK -// x.p.color = RED -// LEFT-ROTATE(T, x, p) -// if w.left.color == BLACK and w.right.color == BLACK -// w.color = RED -// x = x.p -// else if w.right.color == BLACK -// w.left.color = BLACK -// w.color = RED -// RIGHT-ROTATE(T, w) -// w = w.p.right -// w.color = x.p.color -// x.p.color = BLACK -// LEFT-ROTATE(T, w.p) -// x = T.root -// else -// w = x.p.left -// if w.color == RED -// w.color = BLACK -// x.p.color = RED -// RIGHT-ROTATE(T, x, p) -// if w.right.color == BLACK and w.left.color == BLACK -// w.color = RED -// x = x.p -// else if w.left.color == BLACK -// w.right.color = BLACK -// w.color = RED -// LEFT-ROTATE(T, w) -// w = w.p.left -// w.color = x.p.color -// x.p.color = BLACK -// RIGHT-ROTATE(T, w.p) -// x = T.root +// 0. RB-DELETE-FIXUP(T, z) +// 1. +// 2. while x ≠ T.root and x.color == BLACK +// 3. if x == x.p.left +// 4. w = x.p.right +// 5. if w.color == RED +// 6. w.color = BLACK +// 7. x.p.color = RED +// 8. LEFT-ROTATE(T, x, p) +// 9. if w.left.color == BLACK and w.right.color == BLACK +// 10. w.color = RED +// 11. x = x.p +// 12. else if w.right.color == BLACK +// 13. w.left.color = BLACK +// 14. w.color = RED +// 15. RIGHT-ROTATE(T, w) +// 16. w = w.p.right +// 17. w.color = x.p.color +// 18. x.p.color = BLACK +// 19. LEFT-ROTATE(T, w.p) +// 20. x = T.root +// 21. else +// 22. w = x.p.left +// 23. if w.color == RED +// 24. w.color = BLACK +// 25. x.p.color = RED +// 26. RIGHT-ROTATE(T, x, p) +// 27. if w.right.color == BLACK and w.left.color == BLACK +// 28. w.color = RED +// 29. x = x.p +// 30. else if w.left.color == BLACK +// 31. w.right.color = BLACK +// 32. w.color = RED +// 33. LEFT-ROTATE(T, w) +// 34. w = w.p.left +// 35. w.color = x.p.color +// 36. x.p.color = BLACK +// 37. RIGHT-ROTATE(T, w.p) +// 38. x = T.root +// 39. +// 40. x.color = BLACK // -// x.color = BLACK func (ivt *intervalTree) deleteFixup(x *intervalNode) { for x != ivt.root && x.color(ivt.sentinel) == black { if x == x.parent.left { // line 3-20 @@ -438,32 +440,32 @@ func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *inte // // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315 // -// RB-INSERT(T, z) -// -// y = T.nil -// x = T.root -// -// while x ≠ T.nil -// y = x -// if z.key < x.key -// x = x.left -// else -// x = x.right -// -// z.p = y -// -// if y == T.nil -// T.root = z -// else if z.key < y.key -// y.left = z -// else -// y.right = z -// -// z.left = T.nil -// z.right = T.nil -// z.color = RED -// -// RB-INSERT-FIXUP(T, z) +// 0. RB-INSERT(T, z) +// 1. +// 2. y = T.nil +// 3. x = T.root +// 4. +// 5. while x ≠ T.nil +// 6. y = x +// 7. if z.key < x.key +// 8. x = x.left +// 9. else +// 10. x = x.right +// 11. +// 12. z.p = y +// 13. +// 14. if y == T.nil +// 15. T.root = z +// 16. else if z.key < y.key +// 17. y.left = z +// 18. else +// 19. y.right = z +// 20. +// 21. z.left = T.nil +// 22. z.right = T.nil +// 23. z.color = RED +// 24. +// 25. RB-INSERT-FIXUP(T, z) // Insert adds a node with the given interval into the tree. func (ivt *intervalTree) Insert(ivl Interval, val interface{}) { @@ -498,37 +500,38 @@ func (ivt *intervalTree) Insert(ivl Interval, val interface{}) { // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316 // -// RB-INSERT-FIXUP(T, z) -// -// while z.p.color == RED -// if z.p == z.p.p.left -// y = z.p.p.right -// if y.color == RED -// z.p.color = BLACK -// y.color = BLACK -// z.p.p.color = RED -// z = z.p.p -// else if z == z.p.right -// z = z.p -// LEFT-ROTATE(T, z) -// z.p.color = BLACK -// z.p.p.color = RED -// RIGHT-ROTATE(T, z.p.p) -// else -// y = z.p.p.left -// if y.color == RED -// z.p.color = BLACK -// y.color = BLACK -// z.p.p.color = RED -// z = z.p.p -// else if z == z.p.right -// z = z.p -// RIGHT-ROTATE(T, z) -// z.p.color = BLACK -// z.p.p.color = RED -// LEFT-ROTATE(T, z.p.p) +// 0. RB-INSERT-FIXUP(T, z) +// 1. +// 2. while z.p.color == RED +// 3. if z.p == z.p.p.left +// 4. y = z.p.p.right +// 5. if y.color == RED +// 6. z.p.color = BLACK +// 7. y.color = BLACK +// 8. z.p.p.color = RED +// 9. z = z.p.p +// 10. else if z == z.p.right +// 11. z = z.p +// 12. LEFT-ROTATE(T, z) +// 13. z.p.color = BLACK +// 14. z.p.p.color = RED +// 15. RIGHT-ROTATE(T, z.p.p) +// 16. else +// 17. y = z.p.p.left +// 18. if y.color == RED +// 19. z.p.color = BLACK +// 20. y.color = BLACK +// 21. z.p.p.color = RED +// 22. z = z.p.p +// 23. else if z == z.p.right +// 24. z = z.p +// 25. RIGHT-ROTATE(T, z) +// 26. z.p.color = BLACK +// 27. z.p.p.color = RED +// 28. LEFT-ROTATE(T, z.p.p) +// 29. +// 30. T.root.color = BLACK // -// T.root.color = BLACK func (ivt *intervalTree) insertFixup(z *intervalNode) { for z.parent.color(ivt.sentinel) == red { if z.parent == z.parent.parent.left { // line 3-15 @@ -576,25 +579,26 @@ func (ivt *intervalTree) insertFixup(z *intervalNode) { // // "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313 // -// LEFT-ROTATE(T, x) -// -// y = x.right -// x.right = y.left -// -// if y.left ≠ T.nil -// y.left.p = x -// -// y.p = x.p +// 0. LEFT-ROTATE(T, x) +// 1. +// 2. y = x.right +// 3. x.right = y.left +// 4. +// 5. if y.left ≠ T.nil +// 6. y.left.p = x +// 7. +// 8. y.p = x.p +// 9. +// 10. if x.p == T.nil +// 11. T.root = y +// 12. else if x == x.p.left +// 13. x.p.left = y +// 14. else +// 15. x.p.right = y +// 16. +// 17. y.left = x +// 18. x.p = y // -// if x.p == T.nil -// T.root = y -// else if x == x.p.left -// x.p.left = y -// else -// x.p.right = y -// -// y.left = x -// x.p = y func (ivt *intervalTree) rotateLeft(x *intervalNode) { // rotateLeft x must have right child if x.right == ivt.sentinel { @@ -621,25 +625,26 @@ func (ivt *intervalTree) rotateLeft(x *intervalNode) { // rotateRight moves x so it is right of its left child // -// RIGHT-ROTATE(T, x) -// -// y = x.left -// x.left = y.right -// -// if y.right ≠ T.nil -// y.right.p = x -// -// y.p = x.p -// -// if x.p == T.nil -// T.root = y -// else if x == x.p.right -// x.p.right = y -// else -// x.p.left = y +// 0. RIGHT-ROTATE(T, x) +// 1. +// 2. y = x.left +// 3. x.left = y.right +// 4. +// 5. if y.right ≠ T.nil +// 6. y.right.p = x +// 7. +// 8. y.p = x.p +// 9. +// 10. if x.p == T.nil +// 11. T.root = y +// 12. else if x == x.p.right +// 13. x.p.right = y +// 14. else +// 15. x.p.left = y +// 16. +// 17. y.right = x +// 18. x.p = y // -// y.right = x -// x.p = y func (ivt *intervalTree) rotateRight(x *intervalNode) { // rotateRight x must have left child if x.left == ivt.sentinel { diff --git a/pkg/adt/interval_tree_test.go b/pkg/adt/interval_tree_test.go deleted file mode 100644 index 1e1a05a6452..00000000000 --- a/pkg/adt/interval_tree_test.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adt - -import ( - "math/rand" - "reflect" - "testing" - "time" -) - -// TestIntervalTreeInsert tests interval tree insertion. -func TestIntervalTreeInsert(t *testing.T) { - // "Introduction to Algorithms" (Cormen et al, 3rd ed.) chapter 14, Figure 14.4 - ivt := NewIntervalTree() - ivt.Insert(NewInt64Interval(16, 21), 30) - ivt.Insert(NewInt64Interval(8, 9), 23) - ivt.Insert(NewInt64Interval(0, 3), 3) - ivt.Insert(NewInt64Interval(5, 8), 10) - ivt.Insert(NewInt64Interval(6, 10), 10) - ivt.Insert(NewInt64Interval(15, 23), 23) - ivt.Insert(NewInt64Interval(17, 19), 20) - ivt.Insert(NewInt64Interval(25, 30), 30) - ivt.Insert(NewInt64Interval(26, 26), 26) - ivt.Insert(NewInt64Interval(19, 20), 20) - - expected := []visitedInterval{ - {root: NewInt64Interval(16, 21), color: black, left: NewInt64Interval(8, 9), right: NewInt64Interval(25, 30), depth: 0}, - - {root: NewInt64Interval(8, 9), color: red, left: NewInt64Interval(5, 8), right: NewInt64Interval(15, 23), depth: 1}, - {root: NewInt64Interval(25, 30), color: red, left: NewInt64Interval(17, 19), right: NewInt64Interval(26, 26), depth: 1}, - - {root: NewInt64Interval(5, 8), color: black, left: NewInt64Interval(0, 3), right: NewInt64Interval(6, 10), depth: 2}, - {root: NewInt64Interval(15, 23), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - {root: NewInt64Interval(17, 19), color: black, left: newInt64EmptyInterval(), right: NewInt64Interval(19, 20), depth: 2}, - {root: NewInt64Interval(26, 26), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - - {root: NewInt64Interval(0, 3), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(6, 10), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(19, 20), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - } - - tr := ivt.(*intervalTree) - visits := tr.visitLevel() - if !reflect.DeepEqual(expected, visits) { - t.Fatalf("level order expected %v, got %v", expected, visits) - } -} - -// TestIntervalTreeSelfBalanced ensures range tree is self-balanced after inserting ranges to the tree. -// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation. -// -// Regular Binary Search Tree -// -// [0,1] -// \ -// [1,2] -// \ -// [3,4] -// \ -// [5,6] -// \ -// [7,8] -// \ -// [8,9] -// -// Self-Balancing Binary Search Tree -// -// [1,2] -// / \ -// [0,1] [5,6] -// / \ -// [3,4] [7,8] -// \ -// [8,9] -func TestIntervalTreeSelfBalanced(t *testing.T) { - ivt := NewIntervalTree() - ivt.Insert(NewInt64Interval(0, 1), 0) - ivt.Insert(NewInt64Interval(1, 2), 0) - ivt.Insert(NewInt64Interval(3, 4), 0) - ivt.Insert(NewInt64Interval(5, 6), 0) - ivt.Insert(NewInt64Interval(7, 8), 0) - ivt.Insert(NewInt64Interval(8, 9), 0) - - expected := []visitedInterval{ - {root: NewInt64Interval(1, 2), color: black, left: NewInt64Interval(0, 1), right: NewInt64Interval(5, 6), depth: 0}, - - {root: NewInt64Interval(0, 1), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 1}, - {root: NewInt64Interval(5, 6), color: red, left: NewInt64Interval(3, 4), right: NewInt64Interval(7, 8), depth: 1}, - - {root: NewInt64Interval(3, 4), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - {root: NewInt64Interval(7, 8), color: black, left: newInt64EmptyInterval(), right: NewInt64Interval(8, 9), depth: 2}, - - {root: NewInt64Interval(8, 9), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - } - - tr := ivt.(*intervalTree) - visits := tr.visitLevel() - if !reflect.DeepEqual(expected, visits) { - t.Fatalf("level order expected %v, got %v", expected, visits) - } - - if visits[len(visits)-1].depth != 3 { - t.Fatalf("expected self-balanced tree with last level 3, but last level got %d", visits[len(visits)-1].depth) - } -} - -// TestIntervalTreeDelete ensures delete operation maintains red-black tree properties. -// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation. -// See https://github.com/etcd-io/etcd/issues/10877 for more detail. -// -// After insertion: -// -// [510,511] -// / \ -// ---------- ----------------------- -// / \ -// [82,83] [830,831] -// / \ / \ -// / \ / \ -// [11,12] [383,384](red) [647,648] [899,900](red) -// / \ / \ / \ -// / \ / \ / \ -// [261,262] [410,411] [514,515](red) [815,816](red) [888,889] [972,973] -// / \ / -// / \ / -// [238,239](red) [292,293](red) [953,954](red) -// -// After deleting 514 (no rebalance): -// -// [510,511] -// / \ -// ---------- ----------------------- -// / \ -// [82,83] [830,831] -// / \ / \ -// / \ / \ -// [11,12] [383,384](red) [647,648] [899,900](red) -// / \ \ / \ -// / \ \ / \ -// [261,262] [410,411] [815,816](red) [888,889] [972,973] -// / \ / -// / \ / -// [238,239](red) [292,293](red) [953,954](red) -// -// After deleting 11 (requires rebalancing): -// -// [510,511] -// / \ -// ---------- -------------------------- -// / \ -// [383,384] [830,831] -// / \ / \ -// / \ / \ -// [261,262](red) [410,411] [647,648] [899,900](red) -// / \ \ / \ -// / \ \ / \ -// [82,83] [292,293] [815,816](red) [888,889] [972,973] -// \ / -// \ / -// [238,239](red) [953,954](red) -func TestIntervalTreeDelete(t *testing.T) { - ivt := NewIntervalTree() - ivt.Insert(NewInt64Interval(510, 511), 0) - ivt.Insert(NewInt64Interval(82, 83), 0) - ivt.Insert(NewInt64Interval(830, 831), 0) - ivt.Insert(NewInt64Interval(11, 12), 0) - ivt.Insert(NewInt64Interval(383, 384), 0) - ivt.Insert(NewInt64Interval(647, 648), 0) - ivt.Insert(NewInt64Interval(899, 900), 0) - ivt.Insert(NewInt64Interval(261, 262), 0) - ivt.Insert(NewInt64Interval(410, 411), 0) - ivt.Insert(NewInt64Interval(514, 515), 0) - ivt.Insert(NewInt64Interval(815, 816), 0) - ivt.Insert(NewInt64Interval(888, 889), 0) - ivt.Insert(NewInt64Interval(972, 973), 0) - ivt.Insert(NewInt64Interval(238, 239), 0) - ivt.Insert(NewInt64Interval(292, 293), 0) - ivt.Insert(NewInt64Interval(953, 954), 0) - - tr := ivt.(*intervalTree) - - expectedBeforeDelete := []visitedInterval{ - {root: NewInt64Interval(510, 511), color: black, left: NewInt64Interval(82, 83), right: NewInt64Interval(830, 831), depth: 0}, - - {root: NewInt64Interval(82, 83), color: black, left: NewInt64Interval(11, 12), right: NewInt64Interval(383, 384), depth: 1}, - {root: NewInt64Interval(830, 831), color: black, left: NewInt64Interval(647, 648), right: NewInt64Interval(899, 900), depth: 1}, - - {root: NewInt64Interval(11, 12), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - {root: NewInt64Interval(383, 384), color: red, left: NewInt64Interval(261, 262), right: NewInt64Interval(410, 411), depth: 2}, - {root: NewInt64Interval(647, 648), color: black, left: NewInt64Interval(514, 515), right: NewInt64Interval(815, 816), depth: 2}, - {root: NewInt64Interval(899, 900), color: red, left: NewInt64Interval(888, 889), right: NewInt64Interval(972, 973), depth: 2}, - - {root: NewInt64Interval(261, 262), color: black, left: NewInt64Interval(238, 239), right: NewInt64Interval(292, 293), depth: 3}, - {root: NewInt64Interval(410, 411), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(514, 515), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(815, 816), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(888, 889), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(972, 973), color: black, left: NewInt64Interval(953, 954), right: newInt64EmptyInterval(), depth: 3}, - - {root: NewInt64Interval(238, 239), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - {root: NewInt64Interval(292, 293), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - {root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - } - visitsBeforeDelete := tr.visitLevel() - if !reflect.DeepEqual(expectedBeforeDelete, visitsBeforeDelete) { - t.Fatalf("level order after insertion expected %v, got %v", expectedBeforeDelete, visitsBeforeDelete) - } - - // delete the node "514" - range514 := NewInt64Interval(514, 515) - if deleted := tr.Delete(NewInt64Interval(514, 515)); !deleted { - t.Fatalf("range %v not deleted", range514) - } - - expectedAfterDelete514 := []visitedInterval{ - {root: NewInt64Interval(510, 511), color: black, left: NewInt64Interval(82, 83), right: NewInt64Interval(830, 831), depth: 0}, - - {root: NewInt64Interval(82, 83), color: black, left: NewInt64Interval(11, 12), right: NewInt64Interval(383, 384), depth: 1}, - {root: NewInt64Interval(830, 831), color: black, left: NewInt64Interval(647, 648), right: NewInt64Interval(899, 900), depth: 1}, - - {root: NewInt64Interval(11, 12), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - {root: NewInt64Interval(383, 384), color: red, left: NewInt64Interval(261, 262), right: NewInt64Interval(410, 411), depth: 2}, - {root: NewInt64Interval(647, 648), color: black, left: newInt64EmptyInterval(), right: NewInt64Interval(815, 816), depth: 2}, - {root: NewInt64Interval(899, 900), color: red, left: NewInt64Interval(888, 889), right: NewInt64Interval(972, 973), depth: 2}, - - {root: NewInt64Interval(261, 262), color: black, left: NewInt64Interval(238, 239), right: NewInt64Interval(292, 293), depth: 3}, - {root: NewInt64Interval(410, 411), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(815, 816), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(888, 889), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(972, 973), color: black, left: NewInt64Interval(953, 954), right: newInt64EmptyInterval(), depth: 3}, - - {root: NewInt64Interval(238, 239), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - {root: NewInt64Interval(292, 293), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - {root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - } - visitsAfterDelete514 := tr.visitLevel() - if !reflect.DeepEqual(expectedAfterDelete514, visitsAfterDelete514) { - t.Fatalf("level order after deleting '514' expected %v, got %v", expectedAfterDelete514, visitsAfterDelete514) - } - - // delete the node "11" - range11 := NewInt64Interval(11, 12) - if deleted := tr.Delete(NewInt64Interval(11, 12)); !deleted { - t.Fatalf("range %v not deleted", range11) - } - - expectedAfterDelete11 := []visitedInterval{ - {root: NewInt64Interval(510, 511), color: black, left: NewInt64Interval(383, 384), right: NewInt64Interval(830, 831), depth: 0}, - - {root: NewInt64Interval(383, 384), color: black, left: NewInt64Interval(261, 262), right: NewInt64Interval(410, 411), depth: 1}, - {root: NewInt64Interval(830, 831), color: black, left: NewInt64Interval(647, 648), right: NewInt64Interval(899, 900), depth: 1}, - - {root: NewInt64Interval(261, 262), color: red, left: NewInt64Interval(82, 83), right: NewInt64Interval(292, 293), depth: 2}, - {root: NewInt64Interval(410, 411), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 2}, - {root: NewInt64Interval(647, 648), color: black, left: newInt64EmptyInterval(), right: NewInt64Interval(815, 816), depth: 2}, - {root: NewInt64Interval(899, 900), color: red, left: NewInt64Interval(888, 889), right: NewInt64Interval(972, 973), depth: 2}, - - {root: NewInt64Interval(82, 83), color: black, left: newInt64EmptyInterval(), right: NewInt64Interval(238, 239), depth: 3}, - {root: NewInt64Interval(292, 293), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(815, 816), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(888, 889), color: black, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 3}, - {root: NewInt64Interval(972, 973), color: black, left: NewInt64Interval(953, 954), right: newInt64EmptyInterval(), depth: 3}, - - {root: NewInt64Interval(238, 239), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - {root: NewInt64Interval(953, 954), color: red, left: newInt64EmptyInterval(), right: newInt64EmptyInterval(), depth: 4}, - } - visitsAfterDelete11 := tr.visitLevel() - if !reflect.DeepEqual(expectedAfterDelete11, visitsAfterDelete11) { - t.Fatalf("level order after deleting '11' expected %v, got %v", expectedAfterDelete11, visitsAfterDelete11) - } -} - -func TestIntervalTreeIntersects(t *testing.T) { - ivt := NewIntervalTree() - ivt.Insert(NewStringInterval("1", "3"), 123) - - if ivt.Intersects(NewStringPoint("0")) { - t.Errorf("contains 0") - } - if !ivt.Intersects(NewStringPoint("1")) { - t.Errorf("missing 1") - } - if !ivt.Intersects(NewStringPoint("11")) { - t.Errorf("missing 11") - } - if !ivt.Intersects(NewStringPoint("2")) { - t.Errorf("missing 2") - } - if ivt.Intersects(NewStringPoint("3")) { - t.Errorf("contains 3") - } -} - -func TestIntervalTreeStringAffine(t *testing.T) { - ivt := NewIntervalTree() - ivt.Insert(NewStringAffineInterval("8", ""), 123) - if !ivt.Intersects(NewStringAffinePoint("9")) { - t.Errorf("missing 9") - } - if ivt.Intersects(NewStringAffinePoint("7")) { - t.Errorf("contains 7") - } -} - -func TestIntervalTreeStab(t *testing.T) { - ivt := NewIntervalTree() - ivt.Insert(NewStringInterval("0", "1"), 123) - ivt.Insert(NewStringInterval("0", "2"), 456) - ivt.Insert(NewStringInterval("5", "6"), 789) - ivt.Insert(NewStringInterval("6", "8"), 999) - ivt.Insert(NewStringInterval("0", "3"), 0) - - tr := ivt.(*intervalTree) - if tr.root.max.Compare(StringComparable("8")) != 0 { - t.Fatalf("wrong root max got %v, expected 8", tr.root.max) - } - if x := len(ivt.Stab(NewStringPoint("0"))); x != 3 { - t.Errorf("got %d, expected 3", x) - } - if x := len(ivt.Stab(NewStringPoint("1"))); x != 2 { - t.Errorf("got %d, expected 2", x) - } - if x := len(ivt.Stab(NewStringPoint("2"))); x != 1 { - t.Errorf("got %d, expected 1", x) - } - if x := len(ivt.Stab(NewStringPoint("3"))); x != 0 { - t.Errorf("got %d, expected 0", x) - } - if x := len(ivt.Stab(NewStringPoint("5"))); x != 1 { - t.Errorf("got %d, expected 1", x) - } - if x := len(ivt.Stab(NewStringPoint("55"))); x != 1 { - t.Errorf("got %d, expected 1", x) - } - if x := len(ivt.Stab(NewStringPoint("6"))); x != 1 { - t.Errorf("got %d, expected 1", x) - } -} - -type xy struct { - x int64 - y int64 -} - -func TestIntervalTreeRandom(t *testing.T) { - // generate unique intervals - ivs := make(map[xy]struct{}) - ivt := NewIntervalTree() - maxv := 128 - rand.Seed(time.Now().UnixNano()) - - for i := rand.Intn(maxv) + 1; i != 0; i-- { - x, y := int64(rand.Intn(maxv)), int64(rand.Intn(maxv)) - if x > y { - t := x - x = y - y = t - } else if x == y { - y++ - } - iv := xy{x, y} - if _, ok := ivs[iv]; ok { - // don't double insert - continue - } - ivt.Insert(NewInt64Interval(x, y), 123) - ivs[iv] = struct{}{} - } - - for ab := range ivs { - for xy := range ivs { - v := xy.x + int64(rand.Intn(int(xy.y-xy.x))) - if slen := len(ivt.Stab(NewInt64Point(v))); slen == 0 { - t.Fatalf("expected %v stab non-zero for [%+v)", v, xy) - } - if !ivt.Intersects(NewInt64Point(v)) { - t.Fatalf("did not get %d as expected for [%+v)", v, xy) - } - } - if !ivt.Delete(NewInt64Interval(ab.x, ab.y)) { - t.Errorf("did not delete %v as expected", ab) - } - delete(ivs, ab) - } - - if ivt.Len() != 0 { - t.Errorf("got ivt.Len() = %v, expected 0", ivt.Len()) - } -} - -// TestIntervalTreeSortedVisit tests that intervals are visited in sorted order. -func TestIntervalTreeSortedVisit(t *testing.T) { - tests := []struct { - ivls []Interval - visitRange Interval - }{ - { - ivls: []Interval{NewInt64Interval(1, 10), NewInt64Interval(2, 5), NewInt64Interval(3, 6)}, - visitRange: NewInt64Interval(0, 100), - }, - { - ivls: []Interval{NewInt64Interval(1, 10), NewInt64Interval(10, 12), NewInt64Interval(3, 6)}, - visitRange: NewInt64Interval(0, 100), - }, - { - ivls: []Interval{NewInt64Interval(2, 3), NewInt64Interval(3, 4), NewInt64Interval(6, 7), NewInt64Interval(5, 6)}, - visitRange: NewInt64Interval(0, 100), - }, - { - ivls: []Interval{ - NewInt64Interval(2, 3), - NewInt64Interval(2, 4), - NewInt64Interval(3, 7), - NewInt64Interval(2, 5), - NewInt64Interval(3, 8), - NewInt64Interval(3, 5), - }, - visitRange: NewInt64Interval(0, 100), - }, - } - for i, tt := range tests { - ivt := NewIntervalTree() - for _, ivl := range tt.ivls { - ivt.Insert(ivl, struct{}{}) - } - last := tt.ivls[0].Begin - count := 0 - chk := func(iv *IntervalValue) bool { - if last.Compare(iv.Ivl.Begin) > 0 { - t.Errorf("#%d: expected less than %d, got interval %+v", i, last, iv.Ivl) - } - last = iv.Ivl.Begin - count++ - return true - } - ivt.Visit(tt.visitRange, chk) - if count != len(tt.ivls) { - t.Errorf("#%d: did not cover all intervals. expected %d, got %d", i, len(tt.ivls), count) - } - } -} - -// TestIntervalTreeVisitExit tests that visiting can be stopped. -func TestIntervalTreeVisitExit(t *testing.T) { - ivls := []Interval{NewInt64Interval(1, 10), NewInt64Interval(2, 5), NewInt64Interval(3, 6), NewInt64Interval(4, 8)} - ivlRange := NewInt64Interval(0, 100) - tests := []struct { - f IntervalVisitor - - wcount int - }{ - { - f: func(n *IntervalValue) bool { return false }, - wcount: 1, - }, - { - f: func(n *IntervalValue) bool { return n.Ivl.Begin.Compare(ivls[0].Begin) <= 0 }, - wcount: 2, - }, - { - f: func(n *IntervalValue) bool { return n.Ivl.Begin.Compare(ivls[2].Begin) < 0 }, - wcount: 3, - }, - { - f: func(n *IntervalValue) bool { return true }, - wcount: 4, - }, - } - - for i, tt := range tests { - ivt := NewIntervalTree() - for _, ivl := range ivls { - ivt.Insert(ivl, struct{}{}) - } - count := 0 - ivt.Visit(ivlRange, func(n *IntervalValue) bool { - count++ - return tt.f(n) - }) - if count != tt.wcount { - t.Errorf("#%d: expected count %d, got %d", i, tt.wcount, count) - } - } -} - -// TestIntervalTreeContains tests that contains returns true iff the ivt maps the entire interval. -func TestIntervalTreeContains(t *testing.T) { - tests := []struct { - ivls []Interval - chkIvl Interval - - wContains bool - }{ - { - ivls: []Interval{NewInt64Interval(1, 10)}, - chkIvl: NewInt64Interval(0, 100), - - wContains: false, - }, - { - ivls: []Interval{NewInt64Interval(1, 10)}, - chkIvl: NewInt64Interval(1, 10), - - wContains: true, - }, - { - ivls: []Interval{NewInt64Interval(1, 10)}, - chkIvl: NewInt64Interval(2, 8), - - wContains: true, - }, - { - ivls: []Interval{NewInt64Interval(1, 5), NewInt64Interval(6, 10)}, - chkIvl: NewInt64Interval(1, 10), - - wContains: false, - }, - { - ivls: []Interval{NewInt64Interval(1, 5), NewInt64Interval(3, 10)}, - chkIvl: NewInt64Interval(1, 10), - - wContains: true, - }, - { - ivls: []Interval{NewInt64Interval(1, 4), NewInt64Interval(4, 7), NewInt64Interval(3, 10)}, - chkIvl: NewInt64Interval(1, 10), - - wContains: true, - }, - { - ivls: []Interval{}, - chkIvl: NewInt64Interval(1, 10), - - wContains: false, - }, - } - for i, tt := range tests { - ivt := NewIntervalTree() - for _, ivl := range tt.ivls { - ivt.Insert(ivl, struct{}{}) - } - if v := ivt.Contains(tt.chkIvl); v != tt.wContains { - t.Errorf("#%d: ivt.Contains got %v, expected %v", i, v, tt.wContains) - } - } -} diff --git a/pkg/cobrautl/help.go b/pkg/cobrautl/help.go index 2f7e003dfa3..44cdc9aa886 100644 --- a/pkg/cobrautl/help.go +++ b/pkg/cobrautl/help.go @@ -99,7 +99,7 @@ GLOBAL OPTIONS: {{end}} `[1:] - commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.ReplaceAll(commandUsage, "\\\n", ""))) + commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.Replace(commandUsage, "\\\n", "", -1))) } func etcdFlagUsages(flagSet *pflag.FlagSet) string { diff --git a/pkg/cpuutil/endian.go b/pkg/cpuutil/endian.go index d654b747664..06c06cd4a5f 100644 --- a/pkg/cpuutil/endian.go +++ b/pkg/cpuutil/endian.go @@ -19,7 +19,7 @@ import ( "unsafe" ) -const intWidth = int(unsafe.Sizeof(0)) +const intWidth int = int(unsafe.Sizeof(0)) var byteOrder binary.ByteOrder @@ -27,7 +27,7 @@ var byteOrder binary.ByteOrder func ByteOrder() binary.ByteOrder { return byteOrder } func init() { - i := 0x1 + i := int(0x1) if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 { byteOrder = binary.BigEndian } else { diff --git a/pkg/crc/crc_test.go b/pkg/crc/crc_test.go deleted file mode 100644 index 45759640904..00000000000 --- a/pkg/crc/crc_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package crc - -import ( - "hash/crc32" - "reflect" - "testing" -) - -// TestHash32 tests that Hash32 provided by this package can take an initial -// crc and behaves exactly the same as the standard one in the following calls. -func TestHash32(t *testing.T) { - stdhash := crc32.New(crc32.IEEETable) - if _, err := stdhash.Write([]byte("test data")); err != nil { - t.Fatalf("unexpected write error: %v", err) - } - // create a new hash with stdhash.Sum32() as initial crc - hash := New(stdhash.Sum32(), crc32.IEEETable) - - wsize := stdhash.Size() - if g := hash.Size(); g != wsize { - t.Errorf("size = %d, want %d", g, wsize) - } - wbsize := stdhash.BlockSize() - if g := hash.BlockSize(); g != wbsize { - t.Errorf("block size = %d, want %d", g, wbsize) - } - wsum32 := stdhash.Sum32() - if g := hash.Sum32(); g != wsum32 { - t.Errorf("Sum32 = %d, want %d", g, wsum32) - } - wsum := stdhash.Sum(make([]byte, 32)) - if g := hash.Sum(make([]byte, 32)); !reflect.DeepEqual(g, wsum) { - t.Errorf("sum = %v, want %v", g, wsum) - } - - // write something - if _, err := stdhash.Write([]byte("test data")); err != nil { - t.Fatalf("unexpected write error: %v", err) - } - if _, err := hash.Write([]byte("test data")); err != nil { - t.Fatalf("unexpected write error: %v", err) - } - wsum32 = stdhash.Sum32() - if g := hash.Sum32(); g != wsum32 { - t.Errorf("Sum32 after write = %d, want %d", g, wsum32) - } - - // reset - stdhash.Reset() - hash.Reset() - wsum32 = stdhash.Sum32() - if g := hash.Sum32(); g != wsum32 { - t.Errorf("Sum32 after reset = %d, want %d", g, wsum32) - } -} diff --git a/pkg/expect/expect.go b/pkg/expect/expect.go index afc9dbe3b2d..12f95f98d9f 100644 --- a/pkg/expect/expect.go +++ b/pkg/expect/expect.go @@ -18,8 +18,6 @@ package expect import ( "bufio" - "context" - "errors" "fmt" "io" "os" @@ -27,189 +25,109 @@ import ( "strings" "sync" "syscall" - "time" "github.com/creack/pty" ) const DEBUG_LINES_TAIL = 40 -var ( - ErrProcessRunning = fmt.Errorf("process is still running") -) - type ExpectProcess struct { - cfg expectConfig - cmd *exec.Cmd fpty *os.File wg sync.WaitGroup - mu sync.Mutex // protects lines, count, cur, exitErr and exitCode - lines []string - count int // increment whenever new line gets added - cur int // current read position - exitErr error // process exit error - exitCode int + cond *sync.Cond // for broadcasting updates are available + mu sync.Mutex // protects lines and err + lines []string + count int // increment whenever new line gets added + err error + + // StopSignal is the signal Stop sends to the process; defaults to SIGKILL. + StopSignal os.Signal } // NewExpect creates a new process for expect testing. func NewExpect(name string, arg ...string) (ep *ExpectProcess, err error) { - // if env[] is nil, use current system env and the default command as name - return NewExpectWithEnv(name, arg, nil, name) + // if env[] is nil, use current system env + return NewExpectWithEnv(name, arg, nil) } // NewExpectWithEnv creates a new process with user defined env variables for expect testing. -func NewExpectWithEnv(name string, args []string, env []string, serverProcessConfigName string) (ep *ExpectProcess, err error) { +func NewExpectWithEnv(name string, args []string, env []string) (ep *ExpectProcess, err error) { + cmd := exec.Command(name, args...) + cmd.Env = env ep = &ExpectProcess{ - cfg: expectConfig{ - name: serverProcessConfigName, - cmd: name, - args: args, - env: env, - }, + cmd: cmd, + StopSignal: syscall.SIGKILL, } - ep.cmd = commandFromConfig(ep.cfg) + ep.cond = sync.NewCond(&ep.mu) + ep.cmd.Stderr = ep.cmd.Stdout + ep.cmd.Stdin = nil if ep.fpty, err = pty.Start(ep.cmd); err != nil { return nil, err } - ep.wg.Add(2) + ep.wg.Add(1) go ep.read() - go ep.waitSaveExitErr() return ep, nil } -type expectConfig struct { - name string - cmd string - args []string - env []string -} - -func commandFromConfig(config expectConfig) *exec.Cmd { - cmd := exec.Command(config.cmd, config.args...) - cmd.Env = config.env - cmd.Stderr = cmd.Stdout - cmd.Stdin = nil - return cmd -} - -func (ep *ExpectProcess) Pid() int { - return ep.cmd.Process.Pid -} - func (ep *ExpectProcess) read() { defer ep.wg.Done() - defer func(fpty *os.File) { - err := fpty.Close() - if err != nil { - // we deliberately only log the error here, closing the PTY should mostly be (expected) broken pipes - fmt.Printf("error while closing fpty: %v", err) - } - }(ep.fpty) - + printDebugLines := os.Getenv("EXPECT_DEBUG") != "" r := bufio.NewReader(ep.fpty) - for { - err := ep.tryReadNextLine(r) - if err != nil { - break + for ep.err == nil { + l, rerr := r.ReadString('\n') + ep.mu.Lock() + ep.err = rerr + if l != "" { + if printDebugLines { + fmt.Printf("%s-%d: %s", ep.cmd.Path, ep.cmd.Process.Pid, l) + } + ep.lines = append(ep.lines, l) + ep.count++ + if len(ep.lines) == 1 { + ep.cond.Signal() + } } + ep.mu.Unlock() } + ep.cond.Signal() } -func (ep *ExpectProcess) tryReadNextLine(r *bufio.Reader) error { - printDebugLines := os.Getenv("EXPECT_DEBUG") != "" - l, err := r.ReadString('\n') - - ep.mu.Lock() - defer ep.mu.Unlock() - - if l != "" { - if printDebugLines { - fmt.Printf("%s (%s) (%d): %s", ep.cmd.Path, ep.cfg.name, ep.cmd.Process.Pid, l) - } - ep.lines = append(ep.lines, l) - ep.count++ - } - - // we're checking the error here at the bottom to ensure any leftover reads are still taken into account - return err -} - -func (ep *ExpectProcess) waitSaveExitErr() { - defer ep.wg.Done() - err := ep.waitProcess() +// ExpectFunc returns the first line satisfying the function f. +func (ep *ExpectProcess) ExpectFunc(f func(string) bool) (string, error) { + lastLinesBuffer := make([]string, 0) ep.mu.Lock() - defer ep.mu.Unlock() - if err != nil { - ep.exitErr = err - } -} - -// ExpectFunc returns the first line satisfying the function f. -func (ep *ExpectProcess) ExpectFunc(ctx context.Context, f func(string) bool) (string, error) { - i := 0 for { - line, errsFound := func() (string, bool) { - ep.mu.Lock() - defer ep.mu.Unlock() - - // check if this expect has been already closed - if ep.cmd == nil { - return "", true - } - - for i < len(ep.lines) { - line := ep.lines[i] - i++ - if f(line) { - return line, false - } - } - return "", ep.exitErr != nil - }() - - if line != "" { - return line, nil + for len(ep.lines) == 0 && ep.err == nil { + ep.cond.Wait() } - - if errsFound { + if len(ep.lines) == 0 { break } - - select { - case <-ctx.Done(): - return "", fmt.Errorf("failed to find match string: %w", ctx.Err()) - case <-time.After(time.Millisecond * 10): - // continue loop + l := ep.lines[0] + ep.lines = ep.lines[1:] + lastLinesBuffer = append(lastLinesBuffer, l) + if l := len(lastLinesBuffer); l > DEBUG_LINES_TAIL { + lastLinesBuffer = lastLinesBuffer[l-DEBUG_LINES_TAIL : l-1] + } + if f(l) { + ep.mu.Unlock() + return l, nil } } - - ep.mu.Lock() - defer ep.mu.Unlock() - - lastLinesIndex := len(ep.lines) - DEBUG_LINES_TAIL - if lastLinesIndex < 0 { - lastLinesIndex = 0 - } - lastLines := strings.Join(ep.lines[lastLinesIndex:], "") - return "", fmt.Errorf("match not found. "+ - " Set EXPECT_DEBUG for more info Errs: [%v], last lines:\n%s", - ep.exitErr, lastLines) -} - -// ExpectWithContext returns the first line containing the given string. -func (ep *ExpectProcess) ExpectWithContext(ctx context.Context, s string) (string, error) { - return ep.ExpectFunc(ctx, func(txt string) bool { return strings.Contains(txt, s) }) + ep.mu.Unlock() + return "", fmt.Errorf("match not found."+ + " Set EXPECT_DEBUG for more info Err: %v, last lines:\n%s", + ep.err, strings.Join(lastLinesBuffer, "")) } // Expect returns the first line containing the given string. -// Deprecated: please use ExpectWithContext instead. func (ep *ExpectProcess) Expect(s string) (string, error) { - return ep.ExpectWithContext(context.Background(), s) + return ep.ExpectFunc(func(txt string) bool { return strings.Contains(txt, s) }) } // LineCount returns the number of recorded lines since @@ -220,85 +138,42 @@ func (ep *ExpectProcess) LineCount() int { return ep.count } -// ExitCode returns the exit code of this process. -// If the process is still running, it returns exit code 0 and ErrProcessRunning. -func (ep *ExpectProcess) ExitCode() (int, error) { - ep.mu.Lock() - defer ep.mu.Unlock() - - if ep.cmd == nil { - return ep.exitCode, nil - } - - return 0, ErrProcessRunning -} - -// ExitError returns the exit error of this process (if any). -// If the process is still running, it returns ErrProcessRunning instead. -func (ep *ExpectProcess) ExitError() error { - ep.mu.Lock() - defer ep.mu.Unlock() - - if ep.cmd == nil { - return ep.exitErr - } - - return ErrProcessRunning -} - -// Stop signals the process to terminate via SIGTERM -func (ep *ExpectProcess) Stop() error { - err := ep.Signal(syscall.SIGTERM) - if err != nil && strings.Contains(err.Error(), "os: process already finished") { - return nil - } - return err -} +// Stop kills the expect process and waits for it to exit. +func (ep *ExpectProcess) Stop() error { return ep.close(true) } // Signal sends a signal to the expect process func (ep *ExpectProcess) Signal(sig os.Signal) error { - ep.mu.Lock() - defer ep.mu.Unlock() - - if ep.cmd == nil { - return errors.New("expect process already closed") - } - return ep.cmd.Process.Signal(sig) } -func (ep *ExpectProcess) waitProcess() error { - state, err := ep.cmd.Process.Wait() - if err != nil { - return err - } +// Close waits for the expect process to exit. +// Close currently does not return error if process exited with !=0 status. +// TODO: Close should expose underlying proces failure by default. +func (ep *ExpectProcess) Close() error { return ep.close(false) } - ep.mu.Lock() - defer ep.mu.Unlock() - ep.exitCode = state.ExitCode() - - if !state.Success() { - return fmt.Errorf("unexpected exit code [%d] after running [%s]", ep.exitCode, ep.cmd.String()) +func (ep *ExpectProcess) close(kill bool) error { + if ep.cmd == nil { + return ep.err + } + if kill { + ep.Signal(ep.StopSignal) } - return nil -} - -// Wait waits for the process to finish. -func (ep *ExpectProcess) Wait() { - ep.wg.Wait() -} - -// Close waits for the expect process to exit and return its error. -func (ep *ExpectProcess) Close() error { + err := ep.cmd.Wait() + ep.fpty.Close() ep.wg.Wait() - ep.mu.Lock() - defer ep.mu.Unlock() + if err != nil { + if !kill && strings.Contains(err.Error(), "exit status") { + // non-zero exit code + err = nil + } else if kill && strings.Contains(err.Error(), "signal:") { + err = nil + } + } - // this signals to other funcs that the process has finished ep.cmd = nil - return ep.exitErr + return err } func (ep *ExpectProcess) Send(command string) error { @@ -306,20 +181,11 @@ func (ep *ExpectProcess) Send(command string) error { return err } -func (ep *ExpectProcess) Lines() []string { - ep.mu.Lock() - defer ep.mu.Unlock() - return ep.lines -} - -// ReadLine returns line by line. -func (ep *ExpectProcess) ReadLine() string { - ep.mu.Lock() - defer ep.mu.Unlock() - if ep.count > ep.cur { - line := ep.lines[ep.cur] - ep.cur++ - return line +func (ep *ExpectProcess) ProcessError() error { + if strings.Contains(ep.err.Error(), "input/output error") { + // TODO: The expect library should not return + // `/dev/ptmx: input/output error` when process just exits. + return nil } - return "" + return ep.err } diff --git a/pkg/expect/expect_test.go b/pkg/expect/expect_test.go deleted file mode 100644 index b918df67c2f..00000000000 --- a/pkg/expect/expect_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// build !windows - -package expect - -import ( - "context" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestExpectFunc(t *testing.T) { - ep, err := NewExpect("echo", "hello world") - if err != nil { - t.Fatal(err) - } - wstr := "hello world\r\n" - l, eerr := ep.ExpectFunc(context.Background(), func(a string) bool { return len(a) > 10 }) - if eerr != nil { - t.Fatal(eerr) - } - if l != wstr { - t.Fatalf(`got "%v", expected "%v"`, l, wstr) - } - if cerr := ep.Close(); cerr != nil { - t.Fatal(cerr) - } -} - -func TestExpectFuncTimeout(t *testing.T) { - ep, err := NewExpect("tail", "-f", "/dev/null") - if err != nil { - t.Fatal(err) - } - go func() { - // It's enough to have "talkative" process to stuck in the infinite loop of reading - for { - err := ep.Send("new line\n") - if err != nil { - return - } - } - }() - - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel() - - _, err = ep.ExpectFunc(ctx, func(a string) bool { return false }) - - require.ErrorAs(t, err, &context.DeadlineExceeded) - - if err := ep.Stop(); err != nil { - t.Fatal(err) - } - - err = ep.Close() - require.ErrorContains(t, err, "unexpected exit code [-1] after running [/usr/bin/tail -f /dev/null]") - require.Equal(t, -1, ep.exitCode) -} - -func TestExpectFuncExitFailure(t *testing.T) { - // tail -x should not exist and return a non-zero exit code - ep, err := NewExpect("tail", "-x") - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel() - - _, err = ep.ExpectFunc(ctx, func(s string) bool { - return strings.Contains(s, "something entirely unexpected") - }) - require.ErrorContains(t, err, "unexpected exit code [1] after running [/usr/bin/tail -x]") - require.Equal(t, 1, ep.exitCode) -} - -func TestExpectFuncExitFailureStop(t *testing.T) { - // tail -x should not exist and return a non-zero exit code - ep, err := NewExpect("tail", "-x") - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel() - - _, err = ep.ExpectFunc(ctx, func(s string) bool { - return strings.Contains(s, "something entirely unexpected") - }) - require.ErrorContains(t, err, "unexpected exit code [1] after running [/usr/bin/tail -x]") - exitCode, err := ep.ExitCode() - require.Equal(t, 0, exitCode) - require.Equal(t, err, ErrProcessRunning) - if err := ep.Stop(); err != nil { - t.Fatal(err) - } - err = ep.Close() - require.ErrorContains(t, err, "unexpected exit code [1] after running [/usr/bin/tail -x]") - exitCode, err = ep.ExitCode() - require.Equal(t, 1, exitCode) - require.NoError(t, err) -} - -func TestEcho(t *testing.T) { - ep, err := NewExpect("echo", "hello world") - if err != nil { - t.Fatal(err) - } - ctx := context.Background() - l, eerr := ep.ExpectWithContext(ctx, "world") - if eerr != nil { - t.Fatal(eerr) - } - wstr := "hello world" - if l[:len(wstr)] != wstr { - t.Fatalf(`got "%v", expected "%v"`, l, wstr) - } - if cerr := ep.Close(); cerr != nil { - t.Fatal(cerr) - } - if _, eerr = ep.ExpectWithContext(ctx, "..."); eerr == nil { - t.Fatalf("expected error on closed expect process") - } -} - -func TestLineCount(t *testing.T) { - ep, err := NewExpect("printf", "1\n2\n3") - if err != nil { - t.Fatal(err) - } - wstr := "3" - l, eerr := ep.ExpectWithContext(context.Background(), wstr) - if eerr != nil { - t.Fatal(eerr) - } - if l != wstr { - t.Fatalf(`got "%v", expected "%v"`, l, wstr) - } - if ep.LineCount() != 3 { - t.Fatalf("got %d, expected 3", ep.LineCount()) - } - if cerr := ep.Close(); cerr != nil { - t.Fatal(cerr) - } -} - -func TestSend(t *testing.T) { - ep, err := NewExpect("tr", "a", "b") - if err != nil { - t.Fatal(err) - } - if err := ep.Send("a\r"); err != nil { - t.Fatal(err) - } - if _, err := ep.ExpectWithContext(context.Background(), "b"); err != nil { - t.Fatal(err) - } - if err := ep.Stop(); err != nil { - t.Fatal(err) - } -} - -func TestSignal(t *testing.T) { - ep, err := NewExpect("sleep", "100") - if err != nil { - t.Fatal(err) - } - ep.Signal(os.Interrupt) - donec := make(chan struct{}) - go func() { - defer close(donec) - err = ep.Close() - assert.ErrorContains(t, err, "unexpected exit code [-1]") - assert.ErrorContains(t, err, "sleep 100") - }() - select { - case <-time.After(5 * time.Second): - t.Fatalf("signal test timed out") - case <-donec: - } -} diff --git a/pkg/flags/flag.go b/pkg/flags/flag.go index 5e60b72adc9..afde2fef394 100644 --- a/pkg/flags/flag.go +++ b/pkg/flags/flag.go @@ -25,11 +25,9 @@ import ( "go.uber.org/zap" ) -// SetFlagsFromEnv parses all registered flags in the given flagset, -// and if they are not already set it attempts to set their values from -// environment variables. Environment variables take the name of the flag but -// are UPPERCASE, have the given prefix and any dashes are replaced by -// underscores - for example: some-flag => ETCD_SOME_FLAG +// SetFlagsFromEnv + +// 环境变量采用flag的名称,但为大写字母,有给定的前缀,任何破折号都由下划线代替 - 例如:Some-flag => ETCD_SOME_FLAG func SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error { var err error alreadySet := make(map[string]bool) @@ -42,6 +40,7 @@ func SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error { err = serr } }) + // usedEnvKey 环境变量中有值,但是命令行没有设置的 并将其设置到了flagSet verifyEnv(lg, prefix, usedEnvKey, alreadySet) return err } @@ -64,9 +63,9 @@ func SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error { return err } -// FlagToEnv converts flag string to upper-case environment variable key string. +// FlagToEnv 将标志字符串转换为大写的环境变量密钥字符串. func FlagToEnv(prefix, name string) string { - return prefix + "_" + strings.ToUpper(strings.ReplaceAll(name, "-", "_")) + return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1)) } func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string]bool) { @@ -74,7 +73,7 @@ func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string] kv := strings.SplitN(env, "=", 2) if len(kv) != 2 { if lg != nil { - lg.Warn("found invalid environment variable", zap.String("environment-variable", env)) + lg.Warn("发现无效的环境变量", zap.String("environment-variable", env)) } } if usedEnvKey[kv[0]] { @@ -83,14 +82,13 @@ func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string] if alreadySet[kv[0]] { if lg != nil { lg.Fatal( - "conflicting environment variable is shadowed by corresponding command-line flag (either unset environment variable or disable flag))", - zap.String("environment-variable", kv[0]), + "冲突的环境变量被相应的命令行标志所掩盖(取消环境变量或禁用标志)", zap.String("environment-variable", kv[0]), ) } } if strings.HasPrefix(env, prefix+"_") { if lg != nil { - lg.Warn("unrecognized environment variable", zap.String("environment-variable", env)) + lg.Warn("没有注册的环境变量", zap.String("environment-variable", env)) } } } @@ -107,14 +105,10 @@ func setFlagFromEnv(lg *zap.Logger, fs flagSetter, prefix, fname string, usedEnv if val != "" { usedEnvKey[key] = true if serr := fs.Set(fname, val); serr != nil { - return fmt.Errorf("invalid value %q for %s: %v", val, key, serr) + return fmt.Errorf("无效的值 %q for %s: %v", val, key, serr) } if log && lg != nil { - lg.Info( - "recognized and used environment variable", - zap.String("variable-name", key), - zap.String("variable-value", val), - ) + lg.Info("确认和使用的环境变量", zap.String("variable-name", key), zap.String("variable-value", val)) } } } diff --git a/pkg/flags/flag_test.go b/pkg/flags/flag_test.go deleted file mode 100644 index b7030da727c..00000000000 --- a/pkg/flags/flag_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "os" - "strings" - "testing" - - "go.uber.org/zap/zaptest" -) - -func TestSetFlagsFromEnv(t *testing.T) { - fs := flag.NewFlagSet("testing", flag.ExitOnError) - fs.String("a", "", "") - fs.String("b", "", "") - fs.String("c", "", "") - fs.Parse([]string{}) - - os.Clearenv() - // flags should be settable using env vars - os.Setenv("ETCD_A", "foo") - // and command-line flags - if err := fs.Set("b", "bar"); err != nil { - t.Fatal(err) - } - - // first verify that flags are as expected before reading the env - for f, want := range map[string]string{ - "a": "", - "b": "bar", - } { - if got := fs.Lookup(f).Value.String(); got != want { - t.Fatalf("flag %q=%q, want %q", f, got, want) - } - } - - // now read the env and verify flags were updated as expected - err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs) - if err != nil { - t.Errorf("err=%v, want nil", err) - } - for f, want := range map[string]string{ - "a": "foo", - "b": "bar", - } { - if got := fs.Lookup(f).Value.String(); got != want { - t.Errorf("flag %q=%q, want %q", f, got, want) - } - } -} - -func TestSetFlagsFromEnvBad(t *testing.T) { - // now verify that an error is propagated - fs := flag.NewFlagSet("testing", flag.ExitOnError) - fs.Int("x", 0, "") - os.Setenv("ETCD_X", "not_a_number") - if err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs); err == nil { - t.Errorf("err=nil, want != nil") - } -} - -func TestSetFlagsFromEnvParsingError(t *testing.T) { - fs := flag.NewFlagSet("etcd", flag.ContinueOnError) - var tickMs uint - fs.UintVar(&tickMs, "heartbeat-interval", 0, "Time (in milliseconds) of a heartbeat interval.") - - if oerr := os.Setenv("ETCD_HEARTBEAT_INTERVAL", "100 # ms"); oerr != nil { - t.Fatal(oerr) - } - defer os.Unsetenv("ETCD_HEARTBEAT_INTERVAL") - - err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs) - for _, v := range []string{"invalid syntax", "parse error"} { - if strings.Contains(err.Error(), v) { - err = nil - break - } - } - if err != nil { - t.Fatalf("unexpected error %v", err) - } -} diff --git a/pkg/flags/over_selective_string.go b/pkg/flags/over_selective_string.go new file mode 100644 index 00000000000..19a750029ac --- /dev/null +++ b/pkg/flags/over_selective_string.go @@ -0,0 +1,99 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +type SelectiveStringValue struct { + v string + valids map[string]struct{} +} + +// Set 检验参数是否为允许值中的有效成员 的有效成员,然后再设置基本的标志值. +func (ss *SelectiveStringValue) Set(s string) error { + if _, ok := ss.valids[s]; ok { + ss.v = s + return nil + } + return errors.New("无效的值") +} + +func (ss *SelectiveStringValue) String() string { + return ss.v +} + +func (ss *SelectiveStringValue) Valids() []string { + s := make([]string, 0, len(ss.valids)) + for k := range ss.valids { + s = append(s, k) + } + sort.Strings(s) + return s +} + +func NewSelectiveStringValue(valids ...string) *SelectiveStringValue { + vm := make(map[string]struct{}) + for _, v := range valids { + vm[v] = struct{}{} + } + return &SelectiveStringValue{valids: vm, v: valids[0]} +} + +// SelectiveStringsValue 实现了 flag.Value 接口. +type SelectiveStringsValue struct { + vs []string + valids map[string]struct{} +} + +func (ss *SelectiveStringsValue) Set(s string) error { + vs := strings.Split(s, ",") + for i := range vs { + if _, ok := ss.valids[vs[i]]; ok { + ss.vs = append(ss.vs, vs[i]) + } else { + return fmt.Errorf("invalid value %q", vs[i]) + } + } + sort.Strings(ss.vs) + return nil +} + +// OK +func (ss *SelectiveStringsValue) String() string { + return strings.Join(ss.vs, ",") +} + +// Valids OK +func (ss *SelectiveStringsValue) Valids() []string { + s := make([]string, 0, len(ss.valids)) + for k := range ss.valids { + s = append(s, k) + } + sort.Strings(s) + return s +} + +func NewSelectiveStringsValue(valids ...string) *SelectiveStringsValue { + vm := make(map[string]struct{}) + for _, v := range valids { + vm[v] = struct{}{} + } + return &SelectiveStringsValue{valids: vm, vs: []string{}} +} diff --git a/pkg/flags/over_strings.go b/pkg/flags/over_strings.go new file mode 100644 index 00000000000..627c42766fd --- /dev/null +++ b/pkg/flags/over_strings.go @@ -0,0 +1,48 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "fmt" + "sort" + "strings" +) + +type StringsValue sort.StringSlice + +var _ flag.Value = &StringsValue{} + +func (ss *StringsValue) Set(s string) error { + *ss = strings.Split(s, ",") + return nil +} + +func (ss *StringsValue) String() string { return strings.Join(*ss, ",") } + +func NewStringsValue(s string) (ss *StringsValue) { + if s == "" { + return &StringsValue{} + } + ss = new(StringsValue) + if err := ss.Set(s); err != nil { + panic(fmt.Sprintf("new StringsValue应该永远不会失败: %v", err)) + } + return ss +} + +func StringsFromFlag(fs *flag.FlagSet, flagName string) []string { + return []string(*fs.Lookup(flagName).Value.(*StringsValue)) +} diff --git a/pkg/flags/over_unique_strings.go b/pkg/flags/over_unique_strings.go new file mode 100644 index 00000000000..59b24dc2224 --- /dev/null +++ b/pkg/flags/over_unique_strings.go @@ -0,0 +1,70 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "fmt" + "sort" + "strings" +) + +// UniqueStringsValue wraps a list of unique strings. +// The values are set in order. +type UniqueStringsValue struct { + Values map[string]struct{} +} + +var _ flag.Value = &UniqueStringsValue{} + +func (us *UniqueStringsValue) Set(s string) error { + us.Values = make(map[string]struct{}) + for _, v := range strings.Split(s, ",") { + us.Values[v] = struct{}{} + } + return nil +} + +func (us *UniqueStringsValue) String() string { + return strings.Join(us.stringSlice(), ",") +} + +func (us *UniqueStringsValue) stringSlice() []string { + ss := make([]string, 0, len(us.Values)) + for v := range us.Values { + ss = append(ss, v) + } + sort.Strings(ss) + return ss +} + +func NewUniqueStringsValue(s string) (us *UniqueStringsValue) { + us = &UniqueStringsValue{Values: make(map[string]struct{})} + if s == "" { + return us + } + if err := us.Set(s); err != nil { + panic(fmt.Sprintf("new UniqueStringsValue不应该失败: %v", err)) + } + return us +} + +func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string { + return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice() +} + +func UniqueStringsMapFromFlag(fs *flag.FlagSet, flagName string) map[string]struct{} { + return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).Values +} diff --git a/pkg/flags/over_unique_urls.go b/pkg/flags/over_unique_urls.go new file mode 100644 index 00000000000..944aa2e15bd --- /dev/null +++ b/pkg/flags/over_unique_urls.go @@ -0,0 +1,91 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" +) + +// UniqueURLs 包含独特的URL,有非URL例外. +type UniqueURLs struct { + Values map[string]struct{} // url->struct{} + uss []url.URL + Allowed map[string]struct{} // url,url -> struct{} +} + +var _ flag.Value = &UniqueURLs{} + +// Set parses http://127.0.0.1:2380,http://10.1.1.2:80 +func (us *UniqueURLs) Set(s string) error { + if _, ok := us.Values[s]; ok { + return nil + } + if _, ok := us.Allowed[s]; ok { + us.Values[s] = struct{}{} + return nil + } + ss, err := types.NewURLs(strings.Split(s, ",")) + if err != nil { + return err + } + us.Values = make(map[string]struct{}) + us.uss = make([]url.URL, 0) + for _, v := range ss { + us.Values[v.String()] = struct{}{} + us.uss = append(us.uss, v) + } + return nil +} + +// String implements "flag.Value" interface. +func (us *UniqueURLs) String() string { + all := make([]string, 0, len(us.Values)) + for u := range us.Values { + all = append(all, u) + } + sort.Strings(all) + return strings.Join(all, ",") +} + +// NewUniqueURLsWithExceptions 实现 "url.URL "切片作为flag.Value接口. +func NewUniqueURLsWithExceptions(s string, exceptions ...string) *UniqueURLs { + us := &UniqueURLs{Values: make(map[string]struct{}), Allowed: make(map[string]struct{})} + for _, v := range exceptions { + us.Allowed[v] = struct{}{} + } + if s == "" { + return us + } + if err := us.Set(s); err != nil { + panic(fmt.Sprintf("new UniqueURLs不应该失败: %v", err)) + } + return us +} + +// UniqueURLsFromFlag 从该标志获取的url返回一个切片. +func UniqueURLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { + return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).uss +} + +// UniqueURLsMapFromFlag returns a map from url strings got from the flag. +func UniqueURLsMapFromFlag(fs *flag.FlagSet, urlsFlagName string) map[string]struct{} { + return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).Values +} diff --git a/pkg/flags/selective_string.go b/pkg/flags/selective_string.go deleted file mode 100644 index 4b90fbf4b49..00000000000 --- a/pkg/flags/selective_string.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// SelectiveStringValue implements the flag.Value interface. -type SelectiveStringValue struct { - v string - valids map[string]struct{} -} - -// Set verifies the argument to be a valid member of the allowed values -// before setting the underlying flag value. -func (ss *SelectiveStringValue) Set(s string) error { - if _, ok := ss.valids[s]; ok { - ss.v = s - return nil - } - return errors.New("invalid value") -} - -// String returns the set value (if any) of the SelectiveStringValue -func (ss *SelectiveStringValue) String() string { - return ss.v -} - -// Valids returns the list of valid strings. -func (ss *SelectiveStringValue) Valids() []string { - s := make([]string, 0, len(ss.valids)) - for k := range ss.valids { - s = append(s, k) - } - sort.Strings(s) - return s -} - -// NewSelectiveStringValue creates a new string flag -// for which any one of the given strings is a valid value, -// and any other value is an error. -// -// valids[0] will be default value. Caller must be sure -// len(valids) != 0 or it will panic. -func NewSelectiveStringValue(valids ...string) *SelectiveStringValue { - vm := make(map[string]struct{}) - for _, v := range valids { - vm[v] = struct{}{} - } - return &SelectiveStringValue{valids: vm, v: valids[0]} -} - -// SelectiveStringsValue implements the flag.Value interface. -type SelectiveStringsValue struct { - vs []string - valids map[string]struct{} -} - -// Set verifies the argument to be a valid member of the allowed values -// before setting the underlying flag value. -func (ss *SelectiveStringsValue) Set(s string) error { - vs := strings.Split(s, ",") - for i := range vs { - if _, ok := ss.valids[vs[i]]; ok { - ss.vs = append(ss.vs, vs[i]) - } else { - return fmt.Errorf("invalid value %q", vs[i]) - } - } - sort.Strings(ss.vs) - return nil -} - -// String returns the set value (if any) of the SelectiveStringsValue. -func (ss *SelectiveStringsValue) String() string { - return strings.Join(ss.vs, ",") -} - -// Valids returns the list of valid strings. -func (ss *SelectiveStringsValue) Valids() []string { - s := make([]string, 0, len(ss.valids)) - for k := range ss.valids { - s = append(s, k) - } - sort.Strings(s) - return s -} - -// NewSelectiveStringsValue creates a new string slice flag -// for which any one of the given strings is a valid value, -// and any other value is an error. -func NewSelectiveStringsValue(valids ...string) *SelectiveStringsValue { - vm := make(map[string]struct{}) - for _, v := range valids { - vm[v] = struct{}{} - } - return &SelectiveStringsValue{valids: vm, vs: []string{}} -} diff --git a/pkg/flags/selective_string_test.go b/pkg/flags/selective_string_test.go deleted file mode 100644 index cc310ed63bf..00000000000 --- a/pkg/flags/selective_string_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "testing" -) - -func TestSelectiveStringValue(t *testing.T) { - tests := []struct { - vals []string - - val string - pass bool - }{ - // known values - {[]string{"abc", "def"}, "abc", true}, - {[]string{"on", "off", "false"}, "on", true}, - - // unrecognized values - {[]string{"abc", "def"}, "ghi", false}, - {[]string{"on", "off"}, "", false}, - } - for i, tt := range tests { - sf := NewSelectiveStringValue(tt.vals...) - if sf.v != tt.vals[0] { - t.Errorf("#%d: want default val=%v,but got %v", i, tt.vals[0], sf.v) - } - err := sf.Set(tt.val) - if tt.pass != (err == nil) { - t.Errorf("#%d: want pass=%t, but got err=%v", i, tt.pass, err) - } - } -} - -func TestSelectiveStringsValue(t *testing.T) { - tests := []struct { - vals []string - - val string - pass bool - }{ - {[]string{"abc", "def"}, "abc", true}, - {[]string{"abc", "def"}, "abc,def", true}, - {[]string{"abc", "def"}, "abc, def", false}, - {[]string{"on", "off", "false"}, "on,false", true}, - {[]string{"abc", "def"}, "ghi", false}, - {[]string{"on", "off"}, "", false}, - {[]string{"a", "b", "c", "d", "e"}, "a,c,e", true}, - } - for i, tt := range tests { - sf := NewSelectiveStringsValue(tt.vals...) - err := sf.Set(tt.val) - if tt.pass != (err == nil) { - t.Errorf("#%d: want pass=%t, but got err=%v", i, tt.pass, err) - } - } -} diff --git a/pkg/flags/strings.go b/pkg/flags/strings.go deleted file mode 100644 index e3d131f7902..00000000000 --- a/pkg/flags/strings.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "fmt" - "sort" - "strings" -) - -// StringsValue wraps "sort.StringSlice". -type StringsValue sort.StringSlice - -// Set parses a command line set of strings, separated by comma. -// Implements "flag.Value" interface. -func (ss *StringsValue) Set(s string) error { - *ss = strings.Split(s, ",") - return nil -} - -// String implements "flag.Value" interface. -func (ss *StringsValue) String() string { return strings.Join(*ss, ",") } - -// NewStringsValue implements string slice as "flag.Value" interface. -// Given value is to be separated by comma. -func NewStringsValue(s string) (ss *StringsValue) { - if s == "" { - return &StringsValue{} - } - ss = new(StringsValue) - if err := ss.Set(s); err != nil { - panic(fmt.Sprintf("new StringsValue should never fail: %v", err)) - } - return ss -} - -// StringsFromFlag returns a string slice from the flag. -func StringsFromFlag(fs *flag.FlagSet, flagName string) []string { - return *fs.Lookup(flagName).Value.(*StringsValue) -} diff --git a/pkg/flags/strings_test.go b/pkg/flags/strings_test.go deleted file mode 100644 index 3835612b052..00000000000 --- a/pkg/flags/strings_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "reflect" - "testing" -) - -func TestStringsValue(t *testing.T) { - tests := []struct { - s string - exp []string - }{ - {s: "a,b,c", exp: []string{"a", "b", "c"}}, - {s: "a, b,c", exp: []string{"a", " b", "c"}}, - {s: "", exp: []string{}}, - } - for i := range tests { - ss := []string(*NewStringsValue(tests[i].s)) - if !reflect.DeepEqual(tests[i].exp, ss) { - t.Fatalf("#%d: expected %q, got %q", i, tests[i].exp, ss) - } - } -} diff --git a/pkg/flags/uint32.go b/pkg/flags/uint32.go deleted file mode 100644 index 496730a4549..00000000000 --- a/pkg/flags/uint32.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "strconv" -) - -type uint32Value uint32 - -// NewUint32Value creates an uint32 instance with the provided value. -func NewUint32Value(v uint32) *uint32Value { - val := new(uint32Value) - *val = uint32Value(v) - return val -} - -// Set parses a command line uint32 value. -// Implements "flag.Value" interface. -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -// Uint32FromFlag return the uint32 value of a flag with the given name -func Uint32FromFlag(fs *flag.FlagSet, name string) uint32 { - val := *fs.Lookup(name).Value.(*uint32Value) - return uint32(val) -} diff --git a/pkg/flags/uint32_test.go b/pkg/flags/uint32_test.go deleted file mode 100644 index aa7487a2320..00000000000 --- a/pkg/flags/uint32_test.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestUint32Value(t *testing.T) { - cases := []struct { - name string - s string - expectedVal uint32 - expectError bool - }{ - { - name: "normal uint32 value", - s: "200", - expectedVal: 200, - }, - { - name: "zero value", - s: "0", - expectedVal: 0, - }, - { - name: "negative int value", - s: "-200", - expectError: true, - }, - { - name: "invalid integer value", - s: "invalid", - expectError: true, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - var val uint32Value - err := val.Set(tc.s) - - if tc.expectError { - if err == nil { - t.Errorf("Expected failure on parsing uint32 value from %s", tc.s) - } - } else { - if err != nil { - t.Errorf("Unexpected error when parsing %s: %v", tc.s, err) - } - assert.Equal(t, uint32(val), tc.expectedVal) - } - }) - } -} - -func TestUint32FromFlag(t *testing.T) { - const flagName = "max-concurrent-streams" - - cases := []struct { - name string - defaultVal uint32 - arguments []string - expectedVal uint32 - }{ - { - name: "only default value", - defaultVal: 15, - arguments: []string{}, - expectedVal: 15, - }, - { - name: "argument has different value from the default one", - defaultVal: 16, - arguments: []string{"--max-concurrent-streams", "200"}, - expectedVal: 200, - }, - { - name: "argument has the same value from the default one", - defaultVal: 105, - arguments: []string{"--max-concurrent-streams", "105"}, - expectedVal: 105, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - fs := flag.NewFlagSet("etcd", flag.ContinueOnError) - fs.Var(NewUint32Value(tc.defaultVal), flagName, "Maximum concurrent streams that each client can open at a time.") - if err := fs.Parse(tc.arguments); err != nil { - t.Fatalf("Unexpected error: %v\n", err) - } - actualMaxStream := Uint32FromFlag(fs, flagName) - assert.Equal(t, actualMaxStream, tc.expectedVal) - }) - } -} diff --git a/pkg/flags/unique_strings.go b/pkg/flags/unique_strings.go deleted file mode 100644 index e67af1f9b5a..00000000000 --- a/pkg/flags/unique_strings.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "fmt" - "sort" - "strings" -) - -// UniqueStringsValue wraps a list of unique strings. -// The values are set in order. -type UniqueStringsValue struct { - Values map[string]struct{} -} - -// Set parses a command line set of strings, separated by comma. -// Implements "flag.Value" interface. -// The values are set in order. -func (us *UniqueStringsValue) Set(s string) error { - us.Values = make(map[string]struct{}) - for _, v := range strings.Split(s, ",") { - us.Values[v] = struct{}{} - } - return nil -} - -// String implements "flag.Value" interface. -func (us *UniqueStringsValue) String() string { - return strings.Join(us.stringSlice(), ",") -} - -func (us *UniqueStringsValue) stringSlice() []string { - ss := make([]string, 0, len(us.Values)) - for v := range us.Values { - ss = append(ss, v) - } - sort.Strings(ss) - return ss -} - -// NewUniqueStringsValue implements string slice as "flag.Value" interface. -// Given value is to be separated by comma. -// The values are set in order. -func NewUniqueStringsValue(s string) (us *UniqueStringsValue) { - us = &UniqueStringsValue{Values: make(map[string]struct{})} - if s == "" { - return us - } - if err := us.Set(s); err != nil { - panic(fmt.Sprintf("new UniqueStringsValue should never fail: %v", err)) - } - return us -} - -// UniqueStringsFromFlag returns a string slice from the flag. -func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string { - return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice() -} - -// UniqueStringsMapFromFlag returns a map of strings from the flag. -func UniqueStringsMapFromFlag(fs *flag.FlagSet, flagName string) map[string]struct{} { - return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).Values -} diff --git a/pkg/flags/unique_strings_test.go b/pkg/flags/unique_strings_test.go deleted file mode 100644 index 86d2b0fc2b0..00000000000 --- a/pkg/flags/unique_strings_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "reflect" - "testing" -) - -func TestNewUniqueStrings(t *testing.T) { - tests := []struct { - s string - exp map[string]struct{} - rs string - }{ - { // non-URL but allowed by exception - s: "*", - exp: map[string]struct{}{"*": {}}, - rs: "*", - }, - { - s: "", - exp: map[string]struct{}{}, - rs: "", - }, - { - s: "example.com", - exp: map[string]struct{}{"example.com": {}}, - rs: "example.com", - }, - { - s: "localhost,localhost", - exp: map[string]struct{}{"localhost": {}}, - rs: "localhost", - }, - { - s: "b.com,a.com", - exp: map[string]struct{}{"a.com": {}, "b.com": {}}, - rs: "a.com,b.com", - }, - { - s: "c.com,b.com", - exp: map[string]struct{}{"b.com": {}, "c.com": {}}, - rs: "b.com,c.com", - }, - } - for i := range tests { - uv := NewUniqueStringsValue(tests[i].s) - if !reflect.DeepEqual(tests[i].exp, uv.Values) { - t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values) - } - if uv.String() != tests[i].rs { - t.Fatalf("#%d: expected %q, got %q", i, tests[i].rs, uv.String()) - } - } -} diff --git a/pkg/flags/unique_urls.go b/pkg/flags/unique_urls.go deleted file mode 100644 index 5b22ef21ad7..00000000000 --- a/pkg/flags/unique_urls.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "flag" - "fmt" - "net/url" - "sort" - "strings" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -// UniqueURLs contains unique URLs -// with non-URL exceptions. -type UniqueURLs struct { - Values map[string]struct{} - uss []url.URL - Allowed map[string]struct{} -} - -// Set parses a command line set of URLs formatted like: -// http://127.0.0.1:2380,http://10.1.1.2:80 -// Implements "flag.Value" interface. -func (us *UniqueURLs) Set(s string) error { - if _, ok := us.Values[s]; ok { - return nil - } - if _, ok := us.Allowed[s]; ok { - us.Values[s] = struct{}{} - return nil - } - ss, err := types.NewURLs(strings.Split(s, ",")) - if err != nil { - return err - } - us.Values = make(map[string]struct{}) - us.uss = make([]url.URL, 0) - for _, v := range ss { - us.Values[v.String()] = struct{}{} - us.uss = append(us.uss, v) - } - return nil -} - -// String implements "flag.Value" interface. -func (us *UniqueURLs) String() string { - all := make([]string, 0, len(us.Values)) - for u := range us.Values { - all = append(all, u) - } - sort.Strings(all) - return strings.Join(all, ",") -} - -// NewUniqueURLsWithExceptions implements "url.URL" slice as flag.Value interface. -// Given value is to be separated by comma. -func NewUniqueURLsWithExceptions(s string, exceptions ...string) *UniqueURLs { - us := &UniqueURLs{Values: make(map[string]struct{}), Allowed: make(map[string]struct{})} - for _, v := range exceptions { - us.Allowed[v] = struct{}{} - } - if s == "" { - return us - } - if err := us.Set(s); err != nil { - panic(fmt.Sprintf("new UniqueURLs should never fail: %v", err)) - } - return us -} - -// UniqueURLsFromFlag returns a slice from urls got from the flag. -func UniqueURLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { - return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).uss -} - -// UniqueURLsMapFromFlag returns a map from url strings got from the flag. -func UniqueURLsMapFromFlag(fs *flag.FlagSet, urlsFlagName string) map[string]struct{} { - return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).Values -} diff --git a/pkg/flags/unique_urls_test.go b/pkg/flags/unique_urls_test.go deleted file mode 100644 index adc4a6b5a19..00000000000 --- a/pkg/flags/unique_urls_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "reflect" - "testing" -) - -func TestNewUniqueURLsWithExceptions(t *testing.T) { - tests := []struct { - s string - exp map[string]struct{} - rs string - exception string - }{ - { // non-URL but allowed by exception - s: "*", - exp: map[string]struct{}{"*": {}}, - rs: "*", - exception: "*", - }, - { - s: "", - exp: map[string]struct{}{}, - rs: "", - exception: "*", - }, - { - s: "https://1.2.3.4:8080", - exp: map[string]struct{}{"https://1.2.3.4:8080": {}}, - rs: "https://1.2.3.4:8080", - exception: "*", - }, - { - s: "https://1.2.3.4:8080,https://1.2.3.4:8080", - exp: map[string]struct{}{"https://1.2.3.4:8080": {}}, - rs: "https://1.2.3.4:8080", - exception: "*", - }, - { - s: "http://10.1.1.1:80", - exp: map[string]struct{}{"http://10.1.1.1:80": {}}, - rs: "http://10.1.1.1:80", - exception: "*", - }, - { - s: "http://localhost:80", - exp: map[string]struct{}{"http://localhost:80": {}}, - rs: "http://localhost:80", - exception: "*", - }, - { - s: "http://:80", - exp: map[string]struct{}{"http://:80": {}}, - rs: "http://:80", - exception: "*", - }, - { - s: "https://localhost:5,https://localhost:3", - exp: map[string]struct{}{"https://localhost:3": {}, "https://localhost:5": {}}, - rs: "https://localhost:3,https://localhost:5", - exception: "*", - }, - { - s: "http://localhost:5,https://localhost:3", - exp: map[string]struct{}{"https://localhost:3": {}, "http://localhost:5": {}}, - rs: "http://localhost:5,https://localhost:3", - exception: "*", - }, - } - for i := range tests { - uv := NewUniqueURLsWithExceptions(tests[i].s, tests[i].exception) - if !reflect.DeepEqual(tests[i].exp, uv.Values) { - t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uv.Values) - } - if uv.String() != tests[i].rs { - t.Fatalf("#%d: expected %q, got %q", i, tests[i].rs, uv.String()) - } - } -} diff --git a/pkg/flags/urls.go b/pkg/flags/urls.go index 27db58743be..ed47810945d 100644 --- a/pkg/flags/urls.go +++ b/pkg/flags/urls.go @@ -20,7 +20,7 @@ import ( "net/url" "strings" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" ) // URLsValue wraps "types.URLs". @@ -62,5 +62,5 @@ func NewURLsValue(s string) *URLsValue { // URLsFromFlag returns a slices from url got from the flag. func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { - return *fs.Lookup(urlsFlagName).Value.(*URLsValue) + return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue)) } diff --git a/pkg/flags/urls_test.go b/pkg/flags/urls_test.go deleted file mode 100644 index ebc9a267410..00000000000 --- a/pkg/flags/urls_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package flags - -import ( - "net/url" - "reflect" - "testing" -) - -func TestValidateURLsValueBad(t *testing.T) { - tests := []string{ - // bad IP specification - ":2379", - "127.0:8080", - "123:456", - // bad port specification - "127.0.0.1:foo", - "127.0.0.1:", - // bad strings - "somewhere", - "234#$", - "file://foo/bar", - "http://hello/asdf", - "http://10.1.1.1", - } - for i, in := range tests { - u := URLsValue{} - if err := u.Set(in); err == nil { - t.Errorf(`#%d: unexpected nil error for in=%q`, i, in) - } - } -} - -func TestNewURLsValue(t *testing.T) { - tests := []struct { - s string - exp []url.URL - }{ - {s: "https://1.2.3.4:8080", exp: []url.URL{{Scheme: "https", Host: "1.2.3.4:8080"}}}, - {s: "http://10.1.1.1:80", exp: []url.URL{{Scheme: "http", Host: "10.1.1.1:80"}}}, - {s: "http://localhost:80", exp: []url.URL{{Scheme: "http", Host: "localhost:80"}}}, - {s: "http://:80", exp: []url.URL{{Scheme: "http", Host: ":80"}}}, - {s: "unix://tmp/etcd.sock", exp: []url.URL{{Scheme: "unix", Host: "tmp", Path: "/etcd.sock"}}}, - {s: "unix:///tmp/127.27.84.4:23432", exp: []url.URL{{Scheme: "unix", Path: "/tmp/127.27.84.4:23432"}}}, - {s: "unix://127.0.0.5:1456", exp: []url.URL{{Scheme: "unix", Host: "127.0.0.5:1456"}}}, - { - s: "http://localhost:1,https://localhost:2", - exp: []url.URL{ - {Scheme: "http", Host: "localhost:1"}, - {Scheme: "https", Host: "localhost:2"}, - }, - }, - } - for i := range tests { - uu := []url.URL(*NewURLsValue(tests[i].s)) - if !reflect.DeepEqual(tests[i].exp, uu) { - t.Fatalf("#%d: expected %+v, got %+v", i, tests[i].exp, uu) - } - } -} diff --git a/pkg/go.mod b/pkg/go.mod deleted file mode 100644 index 8ad9741095f..00000000000 --- a/pkg/go.mod +++ /dev/null @@ -1,44 +0,0 @@ -module go.etcd.io/etcd/pkg/v3 - -go 1.19 - -require ( - github.com/creack/pty v1.1.18 - github.com/dustin/go-humanize v1.0.1 - github.com/golang/protobuf v1.5.2 // indirect - github.com/spf13/cobra v1.6.1 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.uber.org/zap v1.24.0 - google.golang.org/grpc v1.51.0 -) - -require ( - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace go.etcd.io/etcd/client/pkg/v3 => ../client/pkg - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -// Etcd contains lots of packages and dependency relationship. -// Shouldn't import unnecessary dependencies -replace ( - go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/api/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY - go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY -) diff --git a/pkg/go.sum b/pkg/go.sum deleted file mode 100644 index 600ce50563d..00000000000 --- a/pkg/go.sum +++ /dev/null @@ -1,171 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/grpc_testing/stub_server.go b/pkg/grpc_testing/stub_server.go index e9f0d094f8d..03d0846e612 100644 --- a/pkg/grpc_testing/stub_server.go +++ b/pkg/grpc_testing/stub_server.go @@ -1,17 +1,3 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package grpc_testing import ( @@ -28,7 +14,7 @@ import ( // Since it cannot be imported directly, we have to copy and paste it here, // and useless code for our testing is removed. -// StubServer is a server that is easy to customize within individual test +// StubServer is a etcd that is easy to customize within individual test // cases. type StubServer struct { testService testpb.TestServiceServer @@ -40,17 +26,13 @@ type StubServer struct { s *grpc.Server cleanups []func() // Lambdas executed in Stop(); populated by Start(). - started chan struct{} } func New(testService testpb.TestServiceServer) *StubServer { - return &StubServer{ - testService: testService, - started: make(chan struct{}), - } + return &StubServer{testService: testService} } -// Start starts the server and creates a client connected to it. +// Start starts the etcd and creates a client connected to it. func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error { if ss.Network == "" { ss.Network = "tcp" @@ -68,10 +50,7 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) s := grpc.NewServer(sopts...) testpb.RegisterTestServiceServer(s, ss.testService) - go func() { - close(ss.started) - s.Serve(lis) - }() + go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) ss.s = s @@ -80,13 +59,12 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) // Stop stops ss and cleans up all resources it consumed. func (ss *StubServer) Stop() { - <-ss.started for i := len(ss.cleanups) - 1; i >= 0; i-- { ss.cleanups[i]() } } -// Addr gets the address the server listening on. +// Addr gets the address the etcd listening on. func (ss *StubServer) Addr() string { return ss.Address } @@ -105,7 +83,7 @@ func (d dummyStubServer) UnaryCall(context.Context, *testpb.SimpleRequest) (*tes }, nil } -// NewDummyStubServer creates a simple test server that serves Unary calls with +// NewDummyStubServer creates a simple test etcd that serves Unary calls with // responses with the given payload. func NewDummyStubServer(body []byte) *StubServer { return New(dummyStubServer{body: body}) diff --git a/pkg/httputil/httputil.go b/pkg/httputil/httputil.go index 41758138a47..8b9bc98a155 100644 --- a/pkg/httputil/httputil.go +++ b/pkg/httputil/httputil.go @@ -21,6 +21,7 @@ package httputil import ( "io" + "io/ioutil" "net" "net/http" ) @@ -30,13 +31,11 @@ import ( // therefore available for reuse. // Borrowed from golang/net/context/ctxhttp/cancelreq.go. func GracefulClose(resp *http.Response) { - io.Copy(io.Discard, resp.Body) + io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() } -// GetHostname returns the hostname from request Host field. -// It returns empty string, if Host field contains invalid -// value (e.g. "localhost:::" with too many colons). +// GetHostname 获取请求的主机名 func GetHostname(req *http.Request) string { if req == nil { return "" diff --git a/pkg/httputil/httputil_test.go b/pkg/httputil/httputil_test.go deleted file mode 100644 index f14d597ed9c..00000000000 --- a/pkg/httputil/httputil_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httputil - -import ( - "net/http" - "testing" -) - -func TestGetHostname(t *testing.T) { - tt := []struct { - req *http.Request - host string - }{ - {&http.Request{Host: "localhost"}, "localhost"}, - {&http.Request{Host: "localhost:2379"}, "localhost"}, - {&http.Request{Host: "localhost."}, "localhost."}, - {&http.Request{Host: "localhost.:2379"}, "localhost."}, - {&http.Request{Host: "127.0.0.1"}, "127.0.0.1"}, - {&http.Request{Host: "127.0.0.1:2379"}, "127.0.0.1"}, - - {&http.Request{Host: "localhos"}, "localhos"}, - {&http.Request{Host: "localhos:2379"}, "localhos"}, - {&http.Request{Host: "localhos."}, "localhos."}, - {&http.Request{Host: "localhos.:2379"}, "localhos."}, - {&http.Request{Host: "1.2.3.4"}, "1.2.3.4"}, - {&http.Request{Host: "1.2.3.4:2379"}, "1.2.3.4"}, - - // too many colons in address - {&http.Request{Host: "localhost:::::"}, "localhost:::::"}, - } - for i := range tt { - hv := GetHostname(tt[i].req) - if hv != tt[i].host { - t.Errorf("#%d: %q expected host %q, got '%v'", i, tt[i].req.Host, tt[i].host, hv) - } - } -} diff --git a/pkg/idutil/id_test.go b/pkg/idutil/id_test.go deleted file mode 100644 index 92be7fb3569..00000000000 --- a/pkg/idutil/id_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idutil - -import ( - "testing" - "time" -) - -func TestNewGenerator(t *testing.T) { - g := NewGenerator(0x12, time.Unix(0, 0).Add(0x3456*time.Millisecond)) - id := g.Next() - wid := uint64(0x12000000345601) - if id != wid { - t.Errorf("id = %x, want %x", id, wid) - } -} - -func TestNewGeneratorUnique(t *testing.T) { - g := NewGenerator(0, time.Time{}) - id := g.Next() - // different server generates different ID - g1 := NewGenerator(1, time.Time{}) - if gid := g1.Next(); id == gid { - t.Errorf("generate the same id %x using different server ID", id) - } - // restarted server generates different ID - g2 := NewGenerator(0, time.Now()) - if gid := g2.Next(); id == gid { - t.Errorf("generate the same id %x after restart", id) - } -} - -func TestNext(t *testing.T) { - g := NewGenerator(0x12, time.Unix(0, 0).Add(0x3456*time.Millisecond)) - wid := uint64(0x12000000345601) - for i := 0; i < 1000; i++ { - id := g.Next() - if id != wid+uint64(i) { - t.Errorf("id = %x, want %x", id, wid+uint64(i)) - } - } -} - -func BenchmarkNext(b *testing.B) { - g := NewGenerator(0x12, time.Now()) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - g.Next() - } -} diff --git a/pkg/ioutil/pagewriter.go b/pkg/ioutil/pagewriter.go index ebab6487e08..cf9a8dc664d 100644 --- a/pkg/ioutil/pagewriter.go +++ b/pkg/ioutil/pagewriter.go @@ -16,8 +16,6 @@ package ioutil import ( "io" - - "go.etcd.io/etcd/client/pkg/v3/verify" ) var defaultBufferBytes = 128 * 1024 @@ -43,7 +41,6 @@ type PageWriter struct { // NewPageWriter creates a new PageWriter. pageBytes is the number of bytes // to write per page. pageOffset is the starting offset of io.Writer. func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter { - verify.Assert(pageBytes > 0, "invalid pageBytes (%d) value, it must be greater than 0", pageBytes) return &PageWriter{ w: w, pageOffset: pageOffset, @@ -104,6 +101,11 @@ func (pw *PageWriter) Flush() error { return err } +// FlushN flushes buffered data and returns the number of written bytes. +func (pw *PageWriter) FlushN() (int, error) { + return pw.flush() +} + func (pw *PageWriter) flush() (int, error) { if pw.bufferedBytes == 0 { return 0, nil diff --git a/pkg/ioutil/pagewriter_test.go b/pkg/ioutil/pagewriter_test.go deleted file mode 100644 index 3a5e0d79aab..00000000000 --- a/pkg/ioutil/pagewriter_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPageWriterRandom(t *testing.T) { - // smaller buffer for stress testing - defaultBufferBytes = 8 * 1024 - pageBytes := 128 - buf := make([]byte, 4*defaultBufferBytes) - cw := &checkPageWriter{pageBytes: pageBytes, t: t} - w := NewPageWriter(cw, pageBytes, 0) - n := 0 - for i := 0; i < 4096; i++ { - c, err := w.Write(buf[:rand.Intn(len(buf))]) - if err != nil { - t.Fatal(err) - } - n += c - } - if cw.writeBytes > n { - t.Fatalf("wrote %d bytes to io.Writer, but only wrote %d bytes", cw.writeBytes, n) - } - if n-cw.writeBytes > pageBytes { - t.Fatalf("got %d bytes pending, expected less than %d bytes", n-cw.writeBytes, pageBytes) - } - t.Logf("total writes: %d", cw.writes) - t.Logf("total write bytes: %d (of %d)", cw.writeBytes, n) -} - -// TestPageWriterPartialSlack tests the case where a write overflows the buffer -// but there is not enough data to complete the slack write. -func TestPageWriterPartialSlack(t *testing.T) { - defaultBufferBytes = 1024 - pageBytes := 128 - buf := make([]byte, defaultBufferBytes) - cw := &checkPageWriter{pageBytes: 64, t: t} - w := NewPageWriter(cw, pageBytes, 0) - // put writer in non-zero page offset - if _, err := w.Write(buf[:64]); err != nil { - t.Fatal(err) - } - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if cw.writes != 1 { - t.Fatalf("got %d writes, expected 1", cw.writes) - } - // nearly fill buffer - if _, err := w.Write(buf[:1022]); err != nil { - t.Fatal(err) - } - // overflow buffer, but without enough to write as aligned - if _, err := w.Write(buf[:8]); err != nil { - t.Fatal(err) - } - if cw.writes != 1 { - t.Fatalf("got %d writes, expected 1", cw.writes) - } - // finish writing slack space - if _, err := w.Write(buf[:128]); err != nil { - t.Fatal(err) - } - if cw.writes != 2 { - t.Fatalf("got %d writes, expected 2", cw.writes) - } -} - -// TestPageWriterOffset tests if page writer correctly repositions when offset is given. -func TestPageWriterOffset(t *testing.T) { - defaultBufferBytes = 1024 - pageBytes := 128 - buf := make([]byte, defaultBufferBytes) - cw := &checkPageWriter{pageBytes: 64, t: t} - w := NewPageWriter(cw, pageBytes, 0) - if _, err := w.Write(buf[:64]); err != nil { - t.Fatal(err) - } - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if w.pageOffset != 64 { - t.Fatalf("w.pageOffset expected 64, got %d", w.pageOffset) - } - - w = NewPageWriter(cw, w.pageOffset, pageBytes) - if _, err := w.Write(buf[:64]); err != nil { - t.Fatal(err) - } - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if w.pageOffset != 0 { - t.Fatalf("w.pageOffset expected 0, got %d", w.pageOffset) - } -} - -func TestPageWriterPageBytes(t *testing.T) { - cases := []struct { - name string - pageBytes int - expectPanic bool - }{ - { - name: "normal page bytes", - pageBytes: 4096, - expectPanic: false, - }, - { - name: "negative page bytes", - pageBytes: -1, - expectPanic: true, - }, - { - name: "zero page bytes", - pageBytes: 0, - expectPanic: true, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - defaultBufferBytes = 1024 - cw := &checkPageWriter{pageBytes: tc.pageBytes, t: t} - if tc.expectPanic { - assert.Panicsf(t, func() { - NewPageWriter(cw, tc.pageBytes, 0) - }, "expected panic when pageBytes is %d", tc.pageBytes) - } else { - pw := NewPageWriter(cw, tc.pageBytes, 0) - assert.NotEqual(t, pw, nil) - } - }) - } -} - -// checkPageWriter implements an io.Writer that fails a test on unaligned writes. -type checkPageWriter struct { - pageBytes int - writes int - writeBytes int - t *testing.T -} - -func (cw *checkPageWriter) Write(p []byte) (int, error) { - if len(p)%cw.pageBytes != 0 { - cw.t.Fatalf("got write len(p) = %d, expected len(p) == k*cw.pageBytes", len(p)) - } - cw.writes++ - cw.writeBytes += len(p) - return len(p), nil -} diff --git a/pkg/ioutil/readcloser_test.go b/pkg/ioutil/readcloser_test.go deleted file mode 100644 index 6d13bdcec02..00000000000 --- a/pkg/ioutil/readcloser_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "bytes" - "io" - "testing" -) - -type readerNilCloser struct{ io.Reader } - -func (rc *readerNilCloser) Close() error { return nil } - -// TestExactReadCloserExpectEOF expects an eof when reading too much. -func TestExactReadCloserExpectEOF(t *testing.T) { - buf := bytes.NewBuffer(make([]byte, 10)) - rc := NewExactReadCloser(&readerNilCloser{buf}, 1) - if _, err := rc.Read(make([]byte, 10)); err != ErrExpectEOF { - t.Fatalf("expected %v, got %v", ErrExpectEOF, err) - } -} - -// TestExactReadCloserShort expects an eof when reading too little -func TestExactReadCloserShort(t *testing.T) { - buf := bytes.NewBuffer(make([]byte, 5)) - rc := NewExactReadCloser(&readerNilCloser{buf}, 10) - if _, err := rc.Read(make([]byte, 10)); err != nil { - t.Fatalf("Read expected nil err, got %v", err) - } - if err := rc.Close(); err != ErrShortRead { - t.Fatalf("Close expected %v, got %v", ErrShortRead, err) - } -} diff --git a/pkg/ioutil/reader.go b/pkg/ioutil/reader.go index 0703ed476d8..7cd8bdbf258 100644 --- a/pkg/ioutil/reader.go +++ b/pkg/ioutil/reader.go @@ -17,8 +17,7 @@ package ioutil import "io" -// NewLimitedBufferReader returns a reader that reads from the given reader -// but limits the amount of data returned to at most n bytes. +// NewLimitedBufferReader 限制返回的数据大小 func NewLimitedBufferReader(r io.Reader, n int) io.Reader { return &limitedBufferReader{ r: r, diff --git a/pkg/ioutil/reader_test.go b/pkg/ioutil/reader_test.go deleted file mode 100644 index 06ff2906c40..00000000000 --- a/pkg/ioutil/reader_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "bytes" - "testing" -) - -func TestLimitedBufferReaderRead(t *testing.T) { - buf := bytes.NewBuffer(make([]byte, 10)) - ln := 1 - lr := NewLimitedBufferReader(buf, ln) - n, err := lr.Read(make([]byte, 10)) - if err != nil { - t.Fatalf("unexpected read error: %v", err) - } - if n != ln { - t.Errorf("len(data read) = %d, want %d", n, ln) - } -} diff --git a/pkg/ioutil/util.go b/pkg/ioutil/util.go index dc36e183c27..2f2da933e61 100644 --- a/pkg/ioutil/util.go +++ b/pkg/ioutil/util.go @@ -18,7 +18,7 @@ import ( "io" "os" - "go.etcd.io/etcd/client/pkg/v3/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" ) // WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library, diff --git a/pkg/netutil/netutil.go b/pkg/netutil/netutil.go index 689927b4c67..fe88ca9ef0d 100644 --- a/pkg/netutil/netutil.go +++ b/pkg/netutil/netutil.go @@ -23,7 +23,7 @@ import ( "sort" "time" - "go.etcd.io/etcd/client/pkg/v3/types" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" "go.uber.org/zap" ) @@ -103,7 +103,7 @@ func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error) ) return "", err } - if host == "localhost" { + if host == "localhost" || net.ParseIP(host) != nil { return "", nil } for ctx.Err() == nil { @@ -148,50 +148,51 @@ func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (b if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b)) } - - sort.Sort(types.URLs(a)) - sort.Sort(types.URLs(b)) - var needResolve bool - for i := range a { - if !reflect.DeepEqual(a[i], b[i]) { - needResolve = true - break - } - } - if !needResolve { - return true, nil - } - - // If URLs are not equal, try to resolve it and compare again. urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b}) if err != nil { return false, err } + preva, prevb := a, b a, b = urls[0], urls[1] sort.Sort(types.URLs(a)) sort.Sort(types.URLs(b)) for i := range a { if !reflect.DeepEqual(a[i], b[i]) { - return false, fmt.Errorf("resolved urls: %q != %q", a[i].String(), b[i].String()) + return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)", + a[i].String(), preva[i].String(), + b[i].String(), prevb[i].String(), + ) } } return true, nil } -// URLStringsEqual returns "true" if given URLs are valid -// and resolved to same IP addresses. Otherwise, return "false" -// and error, if any. +// URLStringsEqual a,b解析出的ip是否一致 func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", a, b) } - urlsA, err := stringsToURLs(a) - if err != nil { - return false, err + urlsA := make([]url.URL, 0) + for _, str := range a { + u, err := url.Parse(str) + if err != nil { + return false, fmt.Errorf("failed to parse %q", str) + } + urlsA = append(urlsA, *u) } - urlsB, err := stringsToURLs(b) - if err != nil { - return false, err + urlsB := make([]url.URL, 0) + for _, str := range b { + u, err := url.Parse(str) + if err != nil { + return false, fmt.Errorf("failed to parse %q", str) + } + urlsB = append(urlsB, *u) + } + if lg == nil { + lg, _ = zap.NewProduction() + if lg == nil { + lg = zap.NewExample() + } } return urlsEqual(ctx, lg, urlsA, urlsB) } @@ -204,18 +205,6 @@ func urlsToStrings(us []url.URL) []string { return rs } -func stringsToURLs(us []string) ([]url.URL, error) { - urls := make([]url.URL, 0, len(us)) - for _, str := range us { - u, err := url.Parse(str) - if err != nil { - return nil, fmt.Errorf("failed to parse string to URL: %q", str) - } - urls = append(urls, *u) - } - return urls, nil -} - func IsNetworkTimeoutError(err error) bool { nerr, ok := err.(net.Error) return ok && nerr.Timeout() diff --git a/pkg/netutil/netutil_test.go b/pkg/netutil/netutil_test.go deleted file mode 100644 index 119d06a352c..00000000000 --- a/pkg/netutil/netutil_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package netutil - -import ( - "context" - "errors" - "fmt" - "net" - "net/url" - "reflect" - "strconv" - "testing" - "time" - - "go.uber.org/zap/zaptest" -) - -func TestResolveTCPAddrs(t *testing.T) { - defer func() { resolveTCPAddr = resolveTCPAddrDefault }() - tests := []struct { - urls [][]url.URL - expected [][]url.URL - hostMap map[string]string - hasError bool - }{ - { - urls: [][]url.URL{ - { - {Scheme: "http", Host: "127.0.0.1:4001"}, - {Scheme: "http", Host: "127.0.0.1:2379"}, - }, - { - {Scheme: "http", Host: "127.0.0.1:7001"}, - {Scheme: "http", Host: "127.0.0.1:2380"}, - }, - }, - expected: [][]url.URL{ - { - {Scheme: "http", Host: "127.0.0.1:4001"}, - {Scheme: "http", Host: "127.0.0.1:2379"}, - }, - { - {Scheme: "http", Host: "127.0.0.1:7001"}, - {Scheme: "http", Host: "127.0.0.1:2380"}, - }, - }, - }, - { - urls: [][]url.URL{ - { - {Scheme: "http", Host: "infra0.example.com:4001"}, - {Scheme: "http", Host: "infra0.example.com:2379"}, - }, - { - {Scheme: "http", Host: "infra0.example.com:7001"}, - {Scheme: "http", Host: "infra0.example.com:2380"}, - }, - }, - expected: [][]url.URL{ - { - {Scheme: "http", Host: "10.0.1.10:4001"}, - {Scheme: "http", Host: "10.0.1.10:2379"}, - }, - { - {Scheme: "http", Host: "10.0.1.10:7001"}, - {Scheme: "http", Host: "10.0.1.10:2380"}, - }, - }, - hostMap: map[string]string{ - "infra0.example.com": "10.0.1.10", - }, - hasError: false, - }, - { - urls: [][]url.URL{ - { - {Scheme: "http", Host: "infra0.example.com:4001"}, - {Scheme: "http", Host: "infra0.example.com:2379"}, - }, - { - {Scheme: "http", Host: "infra0.example.com:7001"}, - {Scheme: "http", Host: "infra0.example.com:2380"}, - }, - }, - hostMap: map[string]string{ - "infra0.example.com": "", - }, - hasError: true, - }, - { - urls: [][]url.URL{ - { - {Scheme: "http", Host: "ssh://infra0.example.com:4001"}, - {Scheme: "http", Host: "ssh://infra0.example.com:2379"}, - }, - { - {Scheme: "http", Host: "ssh://infra0.example.com:7001"}, - {Scheme: "http", Host: "ssh://infra0.example.com:2380"}, - }, - }, - hasError: true, - }, - } - for _, tt := range tests { - resolveTCPAddr = func(ctx context.Context, addr string) (*net.TCPAddr, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - i, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - if ip := net.ParseIP(host); ip != nil { - return &net.TCPAddr{IP: ip, Port: i, Zone: ""}, nil - } - if tt.hostMap[host] == "" { - return nil, errors.New("cannot resolve host") - } - return &net.TCPAddr{IP: net.ParseIP(tt.hostMap[host]), Port: i, Zone: ""}, nil - } - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - urls, err := resolveTCPAddrs(ctx, zaptest.NewLogger(t), tt.urls) - cancel() - if tt.hasError { - if err == nil { - t.Errorf("expected error") - } - continue - } - if !reflect.DeepEqual(urls, tt.expected) { - t.Errorf("expected: %v, got %v", tt.expected, urls) - } - } -} - -func TestURLsEqual(t *testing.T) { - defer func() { resolveTCPAddr = resolveTCPAddrDefault }() - hostm := map[string]string{ - "example.com": "10.0.10.1", - "first.com": "10.0.11.1", - "second.com": "10.0.11.2", - } - resolveTCPAddr = func(ctx context.Context, addr string) (*net.TCPAddr, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - i, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - if ip := net.ParseIP(host); ip != nil { - return &net.TCPAddr{IP: ip, Port: i, Zone: ""}, nil - } - if hostm[host] == "" { - return nil, errors.New("cannot resolve host") - } - return &net.TCPAddr{IP: net.ParseIP(hostm[host]), Port: i, Zone: ""}, nil - } - - tests := []struct { - n int - a []url.URL - b []url.URL - expect bool - err error - }{ - { - n: 0, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}}, - b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}}, - expect: true, - }, - { - n: 1, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}}, - expect: true, - }, - { - n: 2, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}}, - b: []url.URL{{Scheme: "https", Host: "10.0.10.1:2379"}}, - expect: false, - err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "https://10.0.10.1:2379"`), - }, - { - n: 3, - a: []url.URL{{Scheme: "https", Host: "example.com:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}}, - expect: false, - err: errors.New(`resolved urls: "https://10.0.10.1:2379" != "http://10.0.10.1:2379"`), - }, - { - n: 4, - a: []url.URL{{Scheme: "unix", Host: "abc:2379"}}, - b: []url.URL{{Scheme: "unix", Host: "abc:2379"}}, - expect: true, - }, - { - n: 5, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: true, - }, - { - n: 6, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: true, - }, - { - n: 7, - a: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: true, - }, - { - n: 8, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}}, - b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`), - }, - { - n: 9, - a: []url.URL{{Scheme: "http", Host: "example.com:2380"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}}, - expect: false, - err: errors.New(`resolved urls: "http://10.0.10.1:2380" != "http://10.0.10.1:2379"`), - }, - { - n: 10, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, - expect: false, - err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`), - }, - { - n: 11, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, - expect: false, - err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`), - }, - { - n: 12, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`), - }, - { - n: 13, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://127.0.0.1:2380"`), - }, - { - n: 14, - a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`), - }, - { - n: 15, - a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`), - }, - { - n: 16, - a: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}}, - expect: false, - err: errors.New(`len(["http://10.0.0.1:2379"]) != len(["http://10.0.0.1:2379" "http://127.0.0.1:2380"])`), - }, - { - n: 17, - a: []url.URL{{Scheme: "http", Host: "first.com:2379"}, {Scheme: "http", Host: "second.com:2380"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}}, - expect: true, - }, - { - n: 18, - a: []url.URL{{Scheme: "http", Host: "second.com:2380"}, {Scheme: "http", Host: "first.com:2379"}}, - b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}}, - expect: true, - }, - } - - for i, test := range tests { - result, err := urlsEqual(context.TODO(), zaptest.NewLogger(t), test.a, test.b) - if result != test.expect { - t.Errorf("idx=%d #%d: a:%v b:%v, expected %v but %v", i, test.n, test.a, test.b, test.expect, result) - } - if test.err != nil { - if err.Error() != test.err.Error() { - t.Errorf("idx=%d #%d: err expected %v but %v", i, test.n, test.err, err) - } - } - } -} -func TestURLStringsEqual(t *testing.T) { - defer func() { resolveTCPAddr = resolveTCPAddrDefault }() - errOnResolve := func(ctx context.Context, addr string) (*net.TCPAddr, error) { - return nil, fmt.Errorf("unexpected attempt to resolve: %q", addr) - } - cases := []struct { - urlsA []string - urlsB []string - resolver func(ctx context.Context, addr string) (*net.TCPAddr, error) - }{ - {[]string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"}, resolveTCPAddrDefault}, - {[]string{ - "http://host1:8080", - "http://host2:8080", - }, []string{ - "http://host1:8080", - "http://host2:8080", - }, errOnResolve}, - { - urlsA: []string{"https://[c262:266f:fa53:0ee6:966e:e3f0:d68f:b046]:2380"}, - urlsB: []string{"https://[c262:266f:fa53:ee6:966e:e3f0:d68f:b046]:2380"}, - resolver: resolveTCPAddrDefault, - }, - } - for idx, c := range cases { - t.Logf("TestURLStringsEqual, case #%d", idx) - resolveTCPAddr = c.resolver - result, err := URLStringsEqual(context.TODO(), zaptest.NewLogger(t), c.urlsA, c.urlsB) - if !result { - t.Errorf("unexpected result %v", result) - } - if err != nil { - t.Errorf("unexpected error %v", err) - } - } -} diff --git a/pkg/netutil/routes.go b/pkg/netutil/routes.go index a7d67df3d41..f66719ea163 100644 --- a/pkg/netutil/routes.go +++ b/pkg/netutil/routes.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux +// +build !linux package netutil diff --git a/pkg/netutil/routes_linux.go b/pkg/netutil/routes_linux.go index 163063550c5..4902d1a940a 100644 --- a/pkg/netutil/routes_linux.go +++ b/pkg/netutil/routes_linux.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build linux +// +build linux package netutil @@ -24,12 +25,14 @@ import ( "sort" "syscall" - "go.etcd.io/etcd/pkg/v3/cpuutil" + "github.com/ls-2018/etcd_cn/pkg/cpuutil" ) -var errNoDefaultRoute = fmt.Errorf("could not find default route") -var errNoDefaultHost = fmt.Errorf("could not find default host") -var errNoDefaultInterface = fmt.Errorf("could not find default interface") +var ( + errNoDefaultRoute = fmt.Errorf("could not find default route") + errNoDefaultHost = fmt.Errorf("could not find default host") + errNoDefaultInterface = fmt.Errorf("could not find default interface") +) // GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string. // An IPv4 address is preferred to an IPv6 address for backward compatibility. @@ -153,7 +156,6 @@ func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) { } return nil, fmt.Errorf("could not find address for interface index %v", idx) - } // Used to get a name of interface. diff --git a/pkg/netutil/routes_linux_test.go b/pkg/netutil/routes_linux_test.go deleted file mode 100644 index a0056e990e7..00000000000 --- a/pkg/netutil/routes_linux_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux - -package netutil - -import "testing" - -func TestGetDefaultInterface(t *testing.T) { - ifc, err := GetDefaultInterfaces() - if err != nil { - t.Fatal(err) - } - t.Logf("default network interfaces: %+v\n", ifc) -} - -func TestGetDefaultHost(t *testing.T) { - ip, err := GetDefaultHost() - if err != nil { - t.Fatal(err) - } - t.Logf("default ip: %v", ip) -} diff --git a/pkg/notify/notify.go b/pkg/notify/notify.go deleted file mode 100644 index 8925a1ea218..00000000000 --- a/pkg/notify/notify.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package notify - -import ( - "sync" -) - -// Notifier is a thread safe struct that can be used to send notification about -// some event to multiple consumers. -type Notifier struct { - mu sync.RWMutex - channel chan struct{} -} - -// NewNotifier returns new notifier -func NewNotifier() *Notifier { - return &Notifier{ - channel: make(chan struct{}), - } -} - -// Receive returns channel that can be used to wait for notification. -// Consumers will be informed by closing the channel. -func (n *Notifier) Receive() <-chan struct{} { - n.mu.RLock() - defer n.mu.RUnlock() - return n.channel -} - -// Notify closes the channel passed to consumers and creates new channel to used -// for next notification. -func (n *Notifier) Notify() { - newChannel := make(chan struct{}) - n.mu.Lock() - channelToClose := n.channel - n.channel = newChannel - n.mu.Unlock() - close(channelToClose) -} diff --git a/pkg/osutil/interrupt_unix.go b/pkg/osutil/interrupt_unix.go index ec9dc7b67a6..1d7f87498e5 100644 --- a/pkg/osutil/interrupt_unix.go +++ b/pkg/osutil/interrupt_unix.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !windows && !plan9 +// +build !windows,!plan9 package osutil @@ -22,8 +23,6 @@ import ( "sync" "syscall" - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.uber.org/zap" ) @@ -35,11 +34,10 @@ var ( interruptRegisterMu, interruptExitMu sync.Mutex // interruptHandlers holds all registered InterruptHandlers in order // they will be executed. - interruptHandlers []InterruptHandler + interruptHandlers = []InterruptHandler{} ) -// RegisterInterruptHandler registers a new InterruptHandler. Handlers registered -// after interrupt handing was initiated will not be executed. +// RegisterInterruptHandler 注册中断处理程序,但不会执行 func RegisterInterruptHandler(h InterruptHandler) { interruptRegisterMu.Lock() defer interruptRegisterMu.Unlock() @@ -48,7 +46,6 @@ func RegisterInterruptHandler(h InterruptHandler) { // HandleInterrupts calls the handler functions on receiving a SIGINT or SIGTERM. func HandleInterrupts(lg *zap.Logger) { - verify.Assert(lg != nil, "the logger should not be nil") notifier := make(chan os.Signal, 1) signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM) @@ -62,7 +59,9 @@ func HandleInterrupts(lg *zap.Logger) { interruptExitMu.Lock() - lg.Info("received signal; shutting down", zap.String("signal", sig.String())) + if lg != nil { + lg.Info("received signal; shutting down", zap.String("signal", sig.String())) + } for _, h := range ihs { h() diff --git a/pkg/osutil/interrupt_windows.go b/pkg/osutil/interrupt_windows.go index a4c82b99e06..0e0bb19a06b 100644 --- a/pkg/osutil/interrupt_windows.go +++ b/pkg/osutil/interrupt_windows.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build windows +// +build windows package osutil @@ -24,7 +25,7 @@ import ( type InterruptHandler func() -// RegisterInterruptHandler is a no-op on windows +// RegisterInterruptHandler 注册中断处理程序,但不会执行 func RegisterInterruptHandler(h InterruptHandler) {} // HandleInterrupts is a no-op on windows diff --git a/pkg/osutil/osutil.go b/pkg/osutil/osutil.go index cbf96e2e04d..9efb02e891f 100644 --- a/pkg/osutil/osutil.go +++ b/pkg/osutil/osutil.go @@ -20,10 +20,8 @@ import ( "strings" ) -var ( - // support to override setting SIG_DFL so tests don't terminate early - setDflSignal = dflSignal -) +// support to override setting SIG_DFL so tests don't terminate early +var setDflSignal = dflSignal func Unsetenv(key string) error { envs := os.Environ() diff --git a/pkg/osutil/osutil_test.go b/pkg/osutil/osutil_test.go deleted file mode 100644 index 322d2770c3a..00000000000 --- a/pkg/osutil/osutil_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package osutil - -import ( - "os" - "os/signal" - "reflect" - "syscall" - "testing" - "time" - - "go.uber.org/zap/zaptest" -) - -func init() { setDflSignal = func(syscall.Signal) {} } - -func TestUnsetenv(t *testing.T) { - tests := []string{ - "data", - "space data", - "equal=data", - } - for i, tt := range tests { - key := "ETCD_UNSETENV_TEST" - if os.Getenv(key) != "" { - t.Fatalf("#%d: cannot get empty %s", i, key) - } - env := os.Environ() - if err := os.Setenv(key, tt); err != nil { - t.Fatalf("#%d: cannot set %s: %v", i, key, err) - } - if err := Unsetenv(key); err != nil { - t.Errorf("#%d: unsetenv %s error: %v", i, key, err) - } - if g := os.Environ(); !reflect.DeepEqual(g, env) { - t.Errorf("#%d: env = %+v, want %+v", i, g, env) - } - } -} - -func waitSig(t *testing.T, c <-chan os.Signal, sig os.Signal) { - select { - case s := <-c: - if s != sig { - t.Fatalf("signal was %v, want %v", s, sig) - } - case <-time.After(1 * time.Second): - t.Fatalf("timeout waiting for %v", sig) - } -} - -func TestHandleInterrupts(t *testing.T) { - for _, sig := range []syscall.Signal{syscall.SIGINT, syscall.SIGTERM} { - n := 1 - RegisterInterruptHandler(func() { n++ }) - RegisterInterruptHandler(func() { n *= 2 }) - - c := make(chan os.Signal, 2) - signal.Notify(c, sig) - - HandleInterrupts(zaptest.NewLogger(t)) - syscall.Kill(syscall.Getpid(), sig) - - // we should receive the signal once from our own kill and - // a second time from HandleInterrupts - waitSig(t, c, sig) - waitSig(t, c, sig) - - if n == 3 { - t.Fatalf("interrupt handlers were called in wrong order") - } - if n != 4 { - t.Fatalf("interrupt handlers were not called properly") - } - // reset interrupt handlers - interruptHandlers = interruptHandlers[:0] - interruptExitMu.Unlock() - } -} diff --git a/pkg/osutil/signal.go b/pkg/osutil/signal.go index 0e42b9c35f4..c324ea16ec1 100644 --- a/pkg/osutil/signal.go +++ b/pkg/osutil/signal.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux || cov +// +build !linux cov package osutil diff --git a/pkg/osutil/signal_linux.go b/pkg/osutil/signal_linux.go index 766ed26f307..93e0f350805 100644 --- a/pkg/osutil/signal_linux.go +++ b/pkg/osutil/signal_linux.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build linux && !cov +// +build linux,!cov package osutil diff --git a/pkg/pbutil/over_pbutil.go b/pkg/pbutil/over_pbutil.go new file mode 100644 index 00000000000..a6cd83ccb43 --- /dev/null +++ b/pkg/pbutil/over_pbutil.go @@ -0,0 +1,63 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil defines interfaces for handling Protocol Buffer objects. +package pbutil + +import "fmt" + +type Marshaler interface { + Marshal() (data []byte, err error) +} + +type Unmarshaler interface { + Unmarshal(data []byte) error +} + +// MustMarshal OK +func MustMarshal(m Marshaler) []byte { + d, err := m.Marshal() + if err != nil { + panic(fmt.Sprintf("序列化不应该失败 (%v)", err)) + } + return d +} + +func MustUnmarshal(um Unmarshaler, data []byte) { + if len(data) == 0 { + return + } + if err := um.Unmarshal(data); err != nil { + panic(fmt.Sprintf("反序列化不应该失败(%v)", err)) + } +} + +func MaybeUnmarshal(um Unmarshaler, data []byte) bool { + if len(data) == 0 { + return false + } + if err := um.Unmarshal(data); err != nil { + return false + } + return true +} + +func GetBool(v *bool) (vv bool, set bool) { + if v == nil { + return false, false + } + return *v, true +} + +func Boolp(b bool) *bool { return &b } diff --git a/pkg/pbutil/pbutil.go b/pkg/pbutil/pbutil.go deleted file mode 100644 index 821f59703ae..00000000000 --- a/pkg/pbutil/pbutil.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil defines interfaces for handling Protocol Buffer objects. -package pbutil - -import "fmt" - -type Marshaler interface { - Marshal() (data []byte, err error) -} - -type Unmarshaler interface { - Unmarshal(data []byte) error -} - -func MustMarshal(m Marshaler) []byte { - d, err := m.Marshal() - if err != nil { - panic(fmt.Sprintf("marshal should never fail (%v)", err)) - } - return d -} - -func MustUnmarshal(um Unmarshaler, data []byte) { - if err := um.Unmarshal(data); err != nil { - panic(fmt.Sprintf("unmarshal should never fail (%v)", err)) - } -} - -func MaybeUnmarshal(um Unmarshaler, data []byte) bool { - if err := um.Unmarshal(data); err != nil { - return false - } - return true -} - -func GetBool(v *bool) (vv bool, set bool) { - if v == nil { - return false, false - } - return *v, true -} - -func Boolp(b bool) *bool { return &b } diff --git a/pkg/pbutil/pbutil_test.go b/pkg/pbutil/pbutil_test.go deleted file mode 100644 index 5a8dd9a17ef..00000000000 --- a/pkg/pbutil/pbutil_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "errors" - "reflect" - "testing" -) - -func TestMarshaler(t *testing.T) { - data := []byte("test data") - m := &fakeMarshaler{data: data} - if g := MustMarshal(m); !reflect.DeepEqual(g, data) { - t.Errorf("data = %s, want %s", g, m.data) - } -} - -func TestMarshalerPanic(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Errorf("recover = nil, want error") - } - }() - m := &fakeMarshaler{err: errors.New("blah")} - MustMarshal(m) -} - -func TestUnmarshaler(t *testing.T) { - data := []byte("test data") - m := &fakeUnmarshaler{} - MustUnmarshal(m, data) - if !reflect.DeepEqual(m.data, data) { - t.Errorf("data = %s, want %s", m.data, data) - } -} - -func TestUnmarshalerPanic(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Errorf("recover = nil, want error") - } - }() - m := &fakeUnmarshaler{err: errors.New("blah")} - MustUnmarshal(m, nil) -} - -func TestGetBool(t *testing.T) { - tests := []struct { - b *bool - wb bool - wset bool - }{ - {nil, false, false}, - {Boolp(true), true, true}, - {Boolp(false), false, true}, - } - for i, tt := range tests { - b, set := GetBool(tt.b) - if b != tt.wb { - t.Errorf("#%d: value = %v, want %v", i, b, tt.wb) - } - if set != tt.wset { - t.Errorf("#%d: set = %v, want %v", i, set, tt.wset) - } - } -} - -type fakeMarshaler struct { - data []byte - err error -} - -func (m *fakeMarshaler) Marshal() ([]byte, error) { - return m.data, m.err -} - -type fakeUnmarshaler struct { - data []byte - err error -} - -func (m *fakeUnmarshaler) Unmarshal(data []byte) error { - m.data = data - return m.err -} diff --git a/pkg/proxy/fixtures/gencert.json b/pkg/proxy/fixtures/gencert.json index 09b67267bb2..799622b64d5 100644 --- a/pkg/proxy/fixtures/gencert.json +++ b/pkg/proxy/fixtures/gencert.json @@ -1,13 +1,13 @@ { "signing": { "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" } } } diff --git a/pkg/proxy/server.go b/pkg/proxy/server.go index 72a0c7483d2..1c5658213fb 100644 --- a/pkg/proxy/server.go +++ b/pkg/proxy/server.go @@ -27,7 +27,9 @@ import ( "sync" "time" - "go.etcd.io/etcd/client/pkg/v3/transport" + cm "github.com/ls-2018/etcd_cn/code_debug/conn" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport" humanize "github.com/dustin/go-humanize" "go.uber.org/zap" @@ -37,9 +39,18 @@ var ( defaultDialTimeout = 3 * time.Second defaultBufferSize = 48 * 1024 defaultRetryInterval = 10 * time.Millisecond + defaultLogger *zap.Logger ) -// Server defines proxy server layer that simulates common network faults: +func init() { + var err error + defaultLogger, err = zap.NewProduction() + if err != nil { + panic(err) + } +} + +// Server defines proxy etcd layer that simulates common network faults: // latency spikes and packet drop or corruption. The proxy overhead is very // small overhead (<500μs per request). Please run tests to compute actual // overhead. @@ -128,7 +139,7 @@ type Server interface { ResetListener() error } -// ServerConfig defines proxy server configuration. +// ServerConfig defines proxy etcd configuration. type ServerConfig struct { Logger *zap.Logger From url.URL @@ -231,6 +242,9 @@ func NewServer(cfg ServerConfig) Server { if s.retryInterval == 0 { s.retryInterval = defaultRetryInterval } + if s.lg == nil { + s.lg = defaultLogger + } close(s.pauseAcceptc) close(s.pauseTxc) @@ -352,7 +366,7 @@ func (s *server) listenAndServe() { continue } - + cm.PrintConn("server", in) var out net.Conn if !s.tlsInfo.Empty() { var tp *http.Transport diff --git a/pkg/proxy/server_test.go b/pkg/proxy/server_test.go deleted file mode 100644 index 6690c619d11..00000000000 --- a/pkg/proxy/server_test.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "bytes" - "context" - "crypto/tls" - "fmt" - "io" - "log" - "math/rand" - "net" - "net/http" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/transport" -) - -func TestServer_Unix_Insecure(t *testing.T) { testServer(t, "unix", false, false) } -func TestServer_TCP_Insecure(t *testing.T) { testServer(t, "tcp", false, false) } -func TestServer_Unix_Secure(t *testing.T) { testServer(t, "unix", true, false) } -func TestServer_TCP_Secure(t *testing.T) { testServer(t, "tcp", true, false) } -func TestServer_Unix_Insecure_DelayTx(t *testing.T) { testServer(t, "unix", false, true) } -func TestServer_TCP_Insecure_DelayTx(t *testing.T) { testServer(t, "tcp", false, true) } -func TestServer_Unix_Secure_DelayTx(t *testing.T) { testServer(t, "unix", true, true) } -func TestServer_TCP_Secure_DelayTx(t *testing.T) { testServer(t, "tcp", true, true) } - -func testServer(t *testing.T, scheme string, secure bool, delayTx bool) { - lg := zaptest.NewLogger(t) - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - if scheme == "tcp" { - ln1, ln2 := listen(t, "tcp", "localhost:0", transport.TLSInfo{}), listen(t, "tcp", "localhost:0", transport.TLSInfo{}) - srcAddr, dstAddr = ln1.Addr().String(), ln2.Addr().String() - ln1.Close() - ln2.Close() - } else { - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - } - tlsInfo := createTLSInfo(lg, secure) - ln := listen(t, scheme, dstAddr, tlsInfo) - defer ln.Close() - - cfg := ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - } - if secure { - cfg.TLSInfo = tlsInfo - } - p := NewServer(cfg) - - waitForServer(t, p) - - defer p.Close() - - data1 := []byte("Hello World!") - donec, writec := make(chan struct{}), make(chan []byte) - - go func() { - defer close(donec) - for data := range writec { - send(t, data, scheme, srcAddr, tlsInfo) - } - }() - - recvc := make(chan []byte, 1) - go func() { - for i := 0; i < 2; i++ { - recvc <- receive(t, ln) - } - }() - - writec <- data1 - now := time.Now() - if d := <-recvc; !bytes.Equal(data1, d) { - close(writec) - t.Fatalf("expected %q, got %q", string(data1), string(d)) - } - took1 := time.Since(now) - t.Logf("took %v with no latency", took1) - - lat, rv := 50*time.Millisecond, 5*time.Millisecond - if delayTx { - p.DelayTx(lat, rv) - } - - data2 := []byte("new data") - writec <- data2 - now = time.Now() - if d := <-recvc; !bytes.Equal(data2, d) { - close(writec) - t.Fatalf("expected %q, got %q", string(data2), string(d)) - } - took2 := time.Since(now) - if delayTx { - t.Logf("took %v with latency %v+-%v", took2, lat, rv) - } else { - t.Logf("took %v with no latency", took2) - } - - if delayTx { - p.UndelayTx() - if took2 < lat-rv { - close(writec) - t.Fatalf("expected took2 %v (with latency) > delay: %v", took2, lat-rv) - } - } - - close(writec) - select { - case <-donec: - case <-time.After(3 * time.Second): - t.Fatal("took too long to write") - } - - select { - case <-p.Done(): - t.Fatal("unexpected done") - case err := <-p.Error(): - t.Fatal(err) - default: - } - - if err := p.Close(); err != nil { - t.Fatal(err) - } - - select { - case <-p.Done(): - case err := <-p.Error(): - if !strings.HasPrefix(err.Error(), "accept ") && - !strings.HasSuffix(err.Error(), "use of closed network connection") { - t.Fatal(err) - } - case <-time.After(3 * time.Second): - t.Fatal("took too long to close") - } -} - -func createTLSInfo(lg *zap.Logger, secure bool) transport.TLSInfo { - if secure { - return transport.TLSInfo{ - KeyFile: "../../tests/fixtures/server.key.insecure", - CertFile: "../../tests/fixtures/server.crt", - TrustedCAFile: "../../tests/fixtures/ca.crt", - ClientCertAuth: true, - Logger: lg, - } - } - return transport.TLSInfo{Logger: lg} -} - -func TestServer_Unix_Insecure_DelayAccept(t *testing.T) { testServerDelayAccept(t, false) } -func TestServer_Unix_Secure_DelayAccept(t *testing.T) { testServerDelayAccept(t, true) } -func testServerDelayAccept(t *testing.T, secure bool) { - lg := zaptest.NewLogger(t) - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - tlsInfo := createTLSInfo(lg, secure) - scheme := "unix" - ln := listen(t, scheme, dstAddr, tlsInfo) - defer ln.Close() - - cfg := ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - } - if secure { - cfg.TLSInfo = tlsInfo - } - p := NewServer(cfg) - - waitForServer(t, p) - - defer p.Close() - - data := []byte("Hello World!") - - now := time.Now() - send(t, data, scheme, srcAddr, tlsInfo) - if d := receive(t, ln); !bytes.Equal(data, d) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } - took1 := time.Since(now) - t.Logf("took %v with no latency", took1) - - lat, rv := 700*time.Millisecond, 10*time.Millisecond - p.DelayAccept(lat, rv) - defer p.UndelayAccept() - if err := p.ResetListener(); err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) - - now = time.Now() - send(t, data, scheme, srcAddr, tlsInfo) - if d := receive(t, ln); !bytes.Equal(data, d) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } - took2 := time.Since(now) - t.Logf("took %v with latency %v±%v", took2, lat, rv) - - if took1 >= took2 { - t.Fatalf("expected took1 %v < took2 %v", took1, took2) - } -} - -func TestServer_PauseTx(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - p.PauseTx() - - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - - recvc := make(chan []byte, 1) - go func() { - recvc <- receive(t, ln) - }() - - select { - case d := <-recvc: - t.Fatalf("received unexpected data %q during pause", string(d)) - case <-time.After(200 * time.Millisecond): - } - - p.UnpauseTx() - - select { - case d := <-recvc: - if !bytes.Equal(data, d) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } - case <-time.After(2 * time.Second): - t.Fatal("took too long to receive after unpause") - } -} - -func TestServer_ModifyTx_corrupt(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - p.ModifyTx(func(d []byte) []byte { - d[len(d)/2]++ - return d - }) - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); bytes.Equal(d, data) { - t.Fatalf("expected corrupted data, got %q", string(d)) - } - - p.UnmodifyTx() - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); !bytes.Equal(d, data) { - t.Fatalf("expected uncorrupted data, got %q", string(d)) - } -} - -func TestServer_ModifyTx_packet_loss(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - // 50% packet loss - p.ModifyTx(func(d []byte) []byte { - half := len(d) / 2 - return d[:half:half] - }) - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); bytes.Equal(d, data) { - t.Fatalf("expected corrupted data, got %q", string(d)) - } - - p.UnmodifyTx() - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); !bytes.Equal(d, data) { - t.Fatalf("expected uncorrupted data, got %q", string(d)) - } -} - -func TestServer_BlackholeTx(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - p.BlackholeTx() - - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - - recvc := make(chan []byte, 1) - go func() { - recvc <- receive(t, ln) - }() - - select { - case d := <-recvc: - t.Fatalf("unexpected data receive %q during blackhole", string(d)) - case <-time.After(200 * time.Millisecond): - } - - p.UnblackholeTx() - - // expect different data, old data dropped - data[0]++ - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - - select { - case d := <-recvc: - if !bytes.Equal(data, d) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } - case <-time.After(2 * time.Second): - t.Fatal("took too long to receive after unblackhole") - } -} - -func TestServer_Shutdown(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - s, _ := p.(*server) - s.listener.Close() - time.Sleep(200 * time.Millisecond) - - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); !bytes.Equal(d, data) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } -} - -func TestServer_ShutdownListener(t *testing.T) { - lg := zaptest.NewLogger(t) - scheme := "unix" - srcAddr, dstAddr := newUnixAddr(), newUnixAddr() - defer func() { - os.RemoveAll(srcAddr) - os.RemoveAll(dstAddr) - }() - - ln := listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - p := NewServer(ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - }) - - waitForServer(t, p) - - defer p.Close() - - // shut down destination - ln.Close() - time.Sleep(200 * time.Millisecond) - - ln = listen(t, scheme, dstAddr, transport.TLSInfo{}) - defer ln.Close() - - data := []byte("Hello World!") - send(t, data, scheme, srcAddr, transport.TLSInfo{}) - if d := receive(t, ln); !bytes.Equal(d, data) { - t.Fatalf("expected %q, got %q", string(data), string(d)) - } -} - -func TestServerHTTP_Insecure_DelayTx(t *testing.T) { testServerHTTP(t, false, true) } -func TestServerHTTP_Secure_DelayTx(t *testing.T) { testServerHTTP(t, true, true) } -func TestServerHTTP_Insecure_DelayRx(t *testing.T) { testServerHTTP(t, false, false) } -func TestServerHTTP_Secure_DelayRx(t *testing.T) { testServerHTTP(t, true, false) } -func testServerHTTP(t *testing.T, secure, delayTx bool) { - lg := zaptest.NewLogger(t) - scheme := "tcp" - ln1, ln2 := listen(t, scheme, "localhost:0", transport.TLSInfo{}), listen(t, scheme, "localhost:0", transport.TLSInfo{}) - srcAddr, dstAddr := ln1.Addr().String(), ln2.Addr().String() - ln1.Close() - ln2.Close() - - mux := http.NewServeMux() - mux.HandleFunc("/hello", func(w http.ResponseWriter, req *http.Request) { - d, err := io.ReadAll(req.Body) - req.Body.Close() - if err != nil { - t.Fatal(err) - } - if _, err = w.Write([]byte(fmt.Sprintf("%q(confirmed)", string(d)))); err != nil { - t.Fatal(err) - } - }) - tlsInfo := createTLSInfo(lg, secure) - var tlsConfig *tls.Config - if secure { - _, err := tlsInfo.ServerConfig() - if err != nil { - t.Fatal(err) - } - } - srv := &http.Server{ - Addr: dstAddr, - Handler: mux, - TLSConfig: tlsConfig, - ErrorLog: log.New(io.Discard, "net/http", 0), - } - - donec := make(chan struct{}) - defer func() { - srv.Close() - <-donec - }() - go func() { - if !secure { - srv.ListenAndServe() - } else { - srv.ListenAndServeTLS(tlsInfo.CertFile, tlsInfo.KeyFile) - } - defer close(donec) - }() - time.Sleep(200 * time.Millisecond) - - cfg := ServerConfig{ - Logger: lg, - From: url.URL{Scheme: scheme, Host: srcAddr}, - To: url.URL{Scheme: scheme, Host: dstAddr}, - } - if secure { - cfg.TLSInfo = tlsInfo - } - p := NewServer(cfg) - - waitForServer(t, p) - - defer func() { - lg.Info("closing Proxy server...") - p.Close() - lg.Info("closed Proxy server.") - }() - - data := "Hello World!" - - var resp *http.Response - var err error - now := time.Now() - if secure { - tp, terr := transport.NewTransport(tlsInfo, 3*time.Second) - assert.NoError(t, terr) - cli := &http.Client{Transport: tp} - resp, err = cli.Post("https://"+srcAddr+"/hello", "", strings.NewReader(data)) - defer cli.CloseIdleConnections() - defer tp.CloseIdleConnections() - } else { - resp, err = http.Post("http://"+srcAddr+"/hello", "", strings.NewReader(data)) - defer http.DefaultClient.CloseIdleConnections() - } - assert.NoError(t, err) - d, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - resp.Body.Close() - took1 := time.Since(now) - t.Logf("took %v with no latency", took1) - - rs1 := string(d) - exp := fmt.Sprintf("%q(confirmed)", data) - if rs1 != exp { - t.Fatalf("got %q, expected %q", rs1, exp) - } - - lat, rv := 100*time.Millisecond, 10*time.Millisecond - if delayTx { - p.DelayTx(lat, rv) - defer p.UndelayTx() - } else { - p.DelayRx(lat, rv) - defer p.UndelayRx() - } - - now = time.Now() - if secure { - tp, terr := transport.NewTransport(tlsInfo, 3*time.Second) - if terr != nil { - t.Fatal(terr) - } - cli := &http.Client{Transport: tp} - resp, err = cli.Post("https://"+srcAddr+"/hello", "", strings.NewReader(data)) - defer cli.CloseIdleConnections() - defer tp.CloseIdleConnections() - } else { - resp, err = http.Post("http://"+srcAddr+"/hello", "", strings.NewReader(data)) - defer http.DefaultClient.CloseIdleConnections() - } - if err != nil { - t.Fatal(err) - } - d, err = io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - resp.Body.Close() - took2 := time.Since(now) - t.Logf("took %v with latency %v±%v", took2, lat, rv) - - rs2 := string(d) - if rs2 != exp { - t.Fatalf("got %q, expected %q", rs2, exp) - } - if took1 > took2 { - t.Fatalf("expected took1 %v < took2 %v", took1, took2) - } -} - -func newUnixAddr() string { - now := time.Now().UnixNano() - rand.Seed(now) - addr := fmt.Sprintf("%X%X.unix-conn", now, rand.Intn(35000)) - os.RemoveAll(addr) - return addr -} - -func listen(t *testing.T, scheme, addr string, tlsInfo transport.TLSInfo) (ln net.Listener) { - var err error - if !tlsInfo.Empty() { - ln, err = transport.NewListener(addr, scheme, &tlsInfo) - } else { - ln, err = net.Listen(scheme, addr) - } - if err != nil { - t.Fatal(err) - } - return ln -} - -func send(t *testing.T, data []byte, scheme, addr string, tlsInfo transport.TLSInfo) { - var out net.Conn - var err error - if !tlsInfo.Empty() { - tp, terr := transport.NewTransport(tlsInfo, 3*time.Second) - if terr != nil { - t.Fatal(terr) - } - out, err = tp.DialContext(context.Background(), scheme, addr) - } else { - out, err = net.Dial(scheme, addr) - } - if err != nil { - t.Fatal(err) - } - if _, err = out.Write(data); err != nil { - t.Fatal(err) - } - if err = out.Close(); err != nil { - t.Fatal(err) - } -} - -func receive(t *testing.T, ln net.Listener) (data []byte) { - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - for { - in, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - var n int64 - n, err = buf.ReadFrom(in) - if err != nil { - t.Fatal(err) - } - if n > 0 { - break - } - } - return buf.Bytes() -} - -// Waits until a proxy is ready to serve. -// Aborts test on proxy start-up error. -func waitForServer(t *testing.T, s Server) { - select { - case <-s.Ready(): - case err := <-s.Error(): - t.Fatal(err) - } -} diff --git a/pkg/report/report.go b/pkg/report/report.go index 4d138f9744e..87831b4f364 100644 --- a/pkg/report/report.go +++ b/pkg/report/report.go @@ -36,7 +36,9 @@ type Result struct { Weight float64 } -func (res *Result) Duration() time.Duration { return res.End.Sub(res.Start) } +func (res *Result) Duration() time.Duration { + return res.End.Sub(res.Start) +} type report struct { results chan Result @@ -49,13 +51,13 @@ type report struct { // Stats exposes results raw data. type Stats struct { AvgTotal float64 - Fastest float64 - Slowest float64 + Fastest float64 // 请求最快的时间 + Slowest float64 // 请求最长的时间 Average float64 Stddev float64 RPS float64 - Total time.Duration - ErrorDist map[string]int + Total time.Duration // 总花费时间 + ErrorDist map[string]int // 错误计数 Lats []float64 TimeSeries TimeSeries } @@ -67,15 +69,10 @@ func (s *Stats) copy() Stats { return ss } -// Report processes a result stream until it is closed, then produces a -// string with information about the consumed result data. +// Report 处理结果流直到它被关闭,然后生成一个包含有关所使用的结果数据的信息的字符串. type Report interface { Results() chan<- Result - - // Run returns results in print-friendly format. Run() <-chan string - - // Stats returns results in raw data. Stats() <-chan Stats } @@ -96,7 +93,9 @@ func NewReportSample(precision string) Report { return r } -func (r *report) Results() chan<- Result { return r.results } +func (r *report) Results() chan<- Result { + return r.results +} func (r *report) Run() <-chan string { donec := make(chan string, 1) @@ -138,7 +137,7 @@ func copyFloats(s []float64) (c []float64) { func (r *report) String() (s string) { if len(r.stats.Lats) > 0 { - s += "\nSummary:\n" + s += fmt.Sprintf("\nSummary:\n") s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.stats.Total.Seconds())) s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.stats.Slowest)) s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.stats.Fastest)) @@ -226,7 +225,7 @@ func percentiles(nums []float64) (data []float64) { func (r *report) sprintLatencies() string { data := percentiles(r.stats.Lats) - s := "\nLatency distribution:\n" + s := fmt.Sprintf("\nLatency distribution:\n") for i := 0; i < len(pctls); i++ { if data[i] > 0 { s += fmt.Sprintf(" %v%% in %s.\n", pctls[i], r.sec2str(data[i])) @@ -257,7 +256,7 @@ func (r *report) histogram() string { bi++ } } - s := "\nResponse time histogram:\n" + s := fmt.Sprintf("\nResponse time histogram:\n") for i := 0; i < len(buckets); i++ { // Normalize bar lengths. var barLen int @@ -270,7 +269,7 @@ func (r *report) histogram() string { } func (r *report) errors() string { - s := "\nError distribution:\n" + s := fmt.Sprintf("\nError distribution:\n") for err, num := range r.stats.ErrorDist { s += fmt.Sprintf(" [%d]\t%s\n", num, err) } diff --git a/pkg/report/report_test.go b/pkg/report/report_test.go deleted file mode 100644 index 6f073f3e8d3..00000000000 --- a/pkg/report/report_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package report - -import ( - "fmt" - "reflect" - "strings" - "testing" - "time" -) - -func TestPercentiles(t *testing.T) { - nums := make([]float64, 100) - nums[99] = 1 // 99-percentile (1 out of 100) - data := percentiles(nums) - if data[len(pctls)-2] != 1 { - t.Fatalf("99-percentile expected 1, got %f", data[len(pctls)-2]) - } - - nums = make([]float64, 1000) - nums[999] = 1 // 99.9-percentile (1 out of 1000) - data = percentiles(nums) - if data[len(pctls)-1] != 1 { - t.Fatalf("99.9-percentile expected 1, got %f", data[len(pctls)-1]) - } -} - -func TestReport(t *testing.T) { - r := NewReportSample("%f") - go func() { - start := time.Now() - for i := 0; i < 5; i++ { - end := start.Add(time.Second) - r.Results() <- Result{Start: start, End: end} - start = end - } - r.Results() <- Result{Start: start, End: start.Add(time.Second), Err: fmt.Errorf("oops")} - close(r.Results()) - }() - - stats := <-r.Stats() - stats.TimeSeries = nil // ignore timeseries since it uses wall clock - wStats := Stats{ - AvgTotal: 5.0, - Fastest: 1.0, - Slowest: 1.0, - Average: 1.0, - Stddev: 0.0, - Total: stats.Total, - RPS: 5.0 / stats.Total.Seconds(), - ErrorDist: map[string]int{"oops": 1}, - Lats: []float64{1.0, 1.0, 1.0, 1.0, 1.0}, - } - if !reflect.DeepEqual(stats, wStats) { - t.Fatalf("got %+v, want %+v", stats, wStats) - } - - wstrs := []string{ - "Stddev:\t0", - "Average:\t1.0", - "Slowest:\t1.0", - "Fastest:\t1.0", - } - ss := <-r.Run() - for i, ws := range wstrs { - if !strings.Contains(ss, ws) { - t.Errorf("#%d: stats string missing %s", i, ws) - } - } -} - -func TestWeightedReport(t *testing.T) { - r := NewWeightedReport(NewReport("%f"), "%f") - go func() { - start := time.Now() - for i := 0; i < 5; i++ { - end := start.Add(time.Second) - r.Results() <- Result{Start: start, End: end, Weight: 2.0} - start = end - } - r.Results() <- Result{Start: start, End: start.Add(time.Second), Err: fmt.Errorf("oops")} - close(r.Results()) - }() - - stats := <-r.Stats() - stats.TimeSeries = nil // ignore timeseries since it uses wall clock - wStats := Stats{ - AvgTotal: 10.0, - Fastest: 0.5, - Slowest: 0.5, - Average: 0.5, - Stddev: 0.0, - Total: stats.Total, - RPS: 10.0 / stats.Total.Seconds(), - ErrorDist: map[string]int{"oops": 1}, - Lats: []float64{0.5, 0.5, 0.5, 0.5, 0.5}, - } - if !reflect.DeepEqual(stats, wStats) { - t.Fatalf("got %+v, want %+v", stats, wStats) - } -} diff --git a/pkg/report/timeseries.go b/pkg/report/timeseries.go index eb01a690bbb..a999c2dcc77 100644 --- a/pkg/report/timeseries.go +++ b/pkg/report/timeseries.go @@ -124,7 +124,7 @@ func (t TimeSeries) String() string { if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil { log.Fatal(err) } - var rows [][]string + rows := [][]string{} for i := range t { row := []string{ fmt.Sprintf("%d", t[i].Timestamp), diff --git a/pkg/report/timeseries_test.go b/pkg/report/timeseries_test.go deleted file mode 100644 index 13fcbfa397d..00000000000 --- a/pkg/report/timeseries_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package report - -import ( - "testing" - "time" -) - -func TestGetTimeseries(t *testing.T) { - sp := newSecondPoints() - now := time.Now() - sp.Add(now, time.Second) - sp.Add(now.Add(5*time.Second), time.Second) - n := sp.getTimeSeries().Len() - if n < 3 { - t.Fatalf("expected at 6 points of time series, got %s", sp.getTimeSeries()) - } - - // add a point with duplicate timestamp - sp.Add(now, 3*time.Second) - ts := sp.getTimeSeries() - if ts[0].MinLatency != time.Second { - t.Fatalf("ts[0] min latency expected %v, got %s", time.Second, ts[0].MinLatency) - } - if ts[0].AvgLatency != 2*time.Second { - t.Fatalf("ts[0] average latency expected %v, got %s", 2*time.Second, ts[0].AvgLatency) - } - if ts[0].MaxLatency != 3*time.Second { - t.Fatalf("ts[0] max latency expected %v, got %s", 3*time.Second, ts[0].MaxLatency) - } -} diff --git a/pkg/report/weighted.go b/pkg/report/weighted.go index 411214f6d16..f3c0ba229e3 100644 --- a/pkg/report/weighted.go +++ b/pkg/report/weighted.go @@ -38,7 +38,9 @@ func NewWeightedReport(r Report, precision string) Report { } } -func (wr *weightedReport) Results() chan<- Result { return wr.results } +func (wr *weightedReport) Results() chan<- Result { + return wr.results +} func (wr *weightedReport) Run() <-chan string { donec := make(chan string, 2) diff --git a/pkg/runtime/fds_other.go b/pkg/runtime/fds_other.go index 2311bb19725..034f3d42646 100644 --- a/pkg/runtime/fds_other.go +++ b/pkg/runtime/fds_other.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build !linux +// +build !linux package runtime diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index ea19cf0181b..5940a1e7188 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -17,36 +17,9 @@ package schedule import ( "context" "sync" - - "go.etcd.io/etcd/client/pkg/v3/verify" - - "go.uber.org/zap" ) -type Job interface { - Name() string - Do(context.Context) -} - -type job struct { - name string - do func(context.Context) -} - -func (j job) Name() string { - return j.name -} - -func (j job) Do(ctx context.Context) { - j.do(ctx) -} - -func NewJob(name string, do func(ctx context.Context)) Job { - return job{ - name: name, - do: do, - } -} +type Job func(context.Context) // Scheduler can schedule jobs. type Scheduler interface { @@ -73,28 +46,24 @@ type Scheduler interface { type fifo struct { mu sync.Mutex - resume chan struct{} + resume chan struct{} // 重新开始 scheduled int finished int - pendings []Job + pendings []Job // 将每从raft获取到的一批待apply的消息封装成一个job ctx context.Context cancel context.CancelFunc finishCond *sync.Cond donec chan struct{} - lg *zap.Logger } // NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO // order sequentially -func NewFIFOScheduler(lg *zap.Logger) Scheduler { - verify.Assert(lg != nil, "the logger should not be nil") - +func NewFIFOScheduler() Scheduler { f := &fifo{ resume: make(chan struct{}, 1), donec: make(chan struct{}, 1), - lg: lg, } f.finishCond = sync.NewCond(&f.mu) f.ctx, f.cancel = context.WithCancel(context.Background()) @@ -102,13 +71,13 @@ func NewFIFOScheduler(lg *zap.Logger) Scheduler { return f } -// Schedule schedules a job that will be ran in FIFO order sequentially. +// Schedule 调度一个作业,该作业将按照FIFO顺序顺序运行. func (f *fifo) Schedule(j Job) { f.mu.Lock() defer f.mu.Unlock() if f.cancel == nil { - panic("schedule: schedule to stopped scheduler") + panic("调度:调度到停止的调度程序") } if len(f.pendings) == 0 { @@ -156,6 +125,7 @@ func (f *fifo) Stop() { } func (f *fifo) run() { + // TODO: recover from job panic? defer func() { close(f.donec) close(f.resume) @@ -179,29 +149,17 @@ func (f *fifo) run() { f.mu.Unlock() // clean up pending jobs for _, todo := range pendings { - f.executeJob(todo, true) + todo(f.ctx) } return } } else { - f.executeJob(todo, false) - } - } -} - -func (f *fifo) executeJob(todo Job, updatedFinishedStats bool) { - defer func() { - if !updatedFinishedStats { + todo(f.ctx) f.finishCond.L.Lock() f.finished++ f.pendings = f.pendings[1:] f.finishCond.Broadcast() f.finishCond.L.Unlock() } - if err := recover(); err != nil { - f.lg.Panic("execute job failed", zap.String("job", todo.Name()), zap.Any("panic", err)) - } - }() - - todo.Do(f.ctx) + } } diff --git a/pkg/schedule/schedule_test.go b/pkg/schedule/schedule_test.go deleted file mode 100644 index c6bb73ca9f8..00000000000 --- a/pkg/schedule/schedule_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schedule - -import ( - "context" - "fmt" - "testing" - - "go.uber.org/zap/zaptest" -) - -func TestFIFOSchedule(t *testing.T) { - s := NewFIFOScheduler(zaptest.NewLogger(t)) - defer s.Stop() - - next := 0 - jobCreator := func(i int) Job { - return NewJob(fmt.Sprintf("i_%d_increse", i), func(ctx context.Context) { - defer func() { - if err := recover(); err != nil { - fmt.Println("err: ", err) - } - }() - if next != i { - t.Fatalf("job#%d: got %d, want %d", i, next, i) - } - next = i + 1 - if next%3 == 0 { - panic("fifo panic") - } - }) - } - - var jobs []Job - for i := 0; i < 100; i++ { - jobs = append(jobs, jobCreator(i)) - } - - for _, j := range jobs { - s.Schedule(j) - } - - s.WaitFinish(100) - if s.Scheduled() != 100 { - t.Errorf("scheduled = %d, want %d", s.Scheduled(), 100) - } -} diff --git a/pkg/stringutil/rand_test.go b/pkg/stringutil/rand_test.go deleted file mode 100644 index 1b4a9dfd2c8..00000000000 --- a/pkg/stringutil/rand_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stringutil - -import ( - "fmt" - "testing" -) - -func TestUniqueStrings(t *testing.T) { - ss := UniqueStrings(10, 50) - for i := 1; i < len(ss); i++ { - if ss[i-1] == ss[i] { - t.Fatalf("ss[i-1] %q == ss[i] %q", ss[i-1], ss[i]) - } - } - fmt.Println(ss) -} diff --git a/pkg/traceutil/trace.go b/pkg/traceutil/trace.go index bdd8e9b66a2..8552bd4b213 100644 --- a/pkg/traceutil/trace.go +++ b/pkg/traceutil/trace.go @@ -16,10 +16,8 @@ package traceutil import ( - "bytes" "context" "fmt" - "math/rand" "time" "go.uber.org/zap" @@ -27,57 +25,44 @@ import ( const ( TraceKey = "trace" - StartTimeKey = "startTime" + StartTimeKey = "StartTime" ) // Field is a kv pair to record additional details of the trace. type Field struct { - Key string - Value interface{} + Key string `json:"key,omitempty"` + Value interface{} `json:"value,omitempty"` } func (f *Field) format() string { return fmt.Sprintf("%s:%v; ", f.Key, f.Value) } -func writeFields(fields []Field) string { - if len(fields) == 0 { - return "" - } - var buf bytes.Buffer - buf.WriteString("{") - for _, f := range fields { - buf.WriteString(f.format()) - } - buf.WriteString("}") - return buf.String() -} - type Trace struct { - operation string - lg *zap.Logger - fields []Field - startTime time.Time - steps []step - stepDisabled bool - isEmpty bool + Operation string `json:"operation,omitempty"` + lg *zap.Logger `json:"lg,omitempty"` + Fields []Field `json:"fields,omitempty"` + StartTime time.Time `json:"start_time"` + Steps []Step `json:"steps,omitempty"` + StepDisabled bool `json:"step_disabled,omitempty"` + IsEmpty bool `json:"is_empty,omitempty"` } -type step struct { - time time.Time - msg string - fields []Field - isSubTraceStart bool - isSubTraceEnd bool +type Step struct { + Time time.Time `json:"time"` + Msg string `json:"msg,omitempty"` + Fields []Field `json:"fields,omitempty"` + IsSubTraceStart bool `json:"is_sub_trace_start,omitempty"` + IsSubTraceEnd bool `json:"is_sub_trace_end,omitempty"` } func New(op string, lg *zap.Logger, fields ...Field) *Trace { - return &Trace{operation: op, lg: lg, startTime: time.Now(), fields: fields} + return &Trace{Operation: op, lg: lg, StartTime: time.Now(), Fields: fields} } // TODO returns a non-nil, empty Trace func TODO() *Trace { - return &Trace{isEmpty: true} + return &Trace{IsEmpty: true} } func Get(ctx context.Context) *Trace { @@ -88,43 +73,43 @@ func Get(ctx context.Context) *Trace { } func (t *Trace) GetStartTime() time.Time { - return t.startTime + return t.StartTime } func (t *Trace) SetStartTime(time time.Time) { - t.startTime = time + t.StartTime = time } func (t *Trace) InsertStep(at int, time time.Time, msg string, fields ...Field) { - newStep := step{time: time, msg: msg, fields: fields} - if at < len(t.steps) { - t.steps = append(t.steps[:at+1], t.steps[at:]...) - t.steps[at] = newStep + newStep := Step{Time: time, Msg: msg, Fields: fields} + if at < len(t.Steps) { + t.Steps = append(t.Steps[:at+1], t.Steps[at:]...) + t.Steps[at] = newStep } else { - t.steps = append(t.steps, newStep) + t.Steps = append(t.Steps, newStep) } } -// StartSubTrace adds step to trace as a start sign of sublevel trace -// All steps in the subtrace will log out the input fields of this function +// StartSubTrace adds Step to trace as a start sign of sublevel trace +// All Steps in the subtrace will log out the input Fields of this function func (t *Trace) StartSubTrace(fields ...Field) { - t.steps = append(t.steps, step{fields: fields, isSubTraceStart: true}) + t.Steps = append(t.Steps, Step{Fields: fields, IsSubTraceStart: true}) } -// StopSubTrace adds step to trace as a end sign of sublevel trace -// All steps in the subtrace will log out the input fields of this function +// StopSubTrace adds Step to trace as a end sign of sublevel trace +// All Steps in the subtrace will log out the input Fields of this function func (t *Trace) StopSubTrace(fields ...Field) { - t.steps = append(t.steps, step{fields: fields, isSubTraceEnd: true}) + t.Steps = append(t.Steps, Step{Fields: fields, IsSubTraceEnd: true}) } -// Step adds step to trace +// Step adds Step to trace func (t *Trace) Step(msg string, fields ...Field) { - if !t.stepDisabled { - t.steps = append(t.steps, step{time: time.Now(), msg: msg, fields: fields}) + if !t.StepDisabled { + t.Steps = append(t.Steps, Step{Time: time.Now(), Msg: msg, Fields: fields}) } } -// StepWithFunction will measure the input function as a single step +// StepWithFunction 将测量输入函数作为一个单一的步骤 func (t *Trace) StepWithFunction(f func(), msg string, fields ...Field) { t.disableStep() f() @@ -135,106 +120,27 @@ func (t *Trace) StepWithFunction(f func(), msg string, fields ...Field) { func (t *Trace) AddField(fields ...Field) { for _, f := range fields { if !t.updateFieldIfExist(f) { - t.fields = append(t.fields, f) + t.Fields = append(t.Fields, f) } } } -func (t *Trace) IsEmpty() bool { - return t.isEmpty -} - -// Log dumps all steps in the Trace -func (t *Trace) Log() { - t.LogWithStepThreshold(0) -} - -// LogIfLong dumps logs if the duration is longer than threshold -func (t *Trace) LogIfLong(threshold time.Duration) { - if time.Since(t.startTime) > threshold { - stepThreshold := threshold / time.Duration(len(t.steps)+1) - t.LogWithStepThreshold(stepThreshold) - } -} - -// LogAllStepsIfLong dumps all logs if the duration is longer than threshold -func (t *Trace) LogAllStepsIfLong(threshold time.Duration) { - if time.Since(t.startTime) > threshold { - t.LogWithStepThreshold(0) - } -} - -// LogWithStepThreshold only dumps step whose duration is longer than step threshold -func (t *Trace) LogWithStepThreshold(threshold time.Duration) { - msg, fs := t.logInfo(threshold) - if t.lg != nil { - t.lg.Info(msg, fs...) - } -} - -func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) { - endTime := time.Now() - totalDuration := endTime.Sub(t.startTime) - traceNum := rand.Int31() - msg := fmt.Sprintf("trace[%d] %s", traceNum, t.operation) - - var steps []string - lastStepTime := t.startTime - for i := 0; i < len(t.steps); i++ { - step := t.steps[i] - // add subtrace common fields which defined at the beginning to each sub-steps - if step.isSubTraceStart { - for j := i + 1; j < len(t.steps) && !t.steps[j].isSubTraceEnd; j++ { - t.steps[j].fields = append(step.fields, t.steps[j].fields...) - } - continue - } - // add subtrace common fields which defined at the end to each sub-steps - if step.isSubTraceEnd { - for j := i - 1; j >= 0 && !t.steps[j].isSubTraceStart; j-- { - t.steps[j].fields = append(step.fields, t.steps[j].fields...) - } - continue - } - } - for i := 0; i < len(t.steps); i++ { - step := t.steps[i] - if step.isSubTraceStart || step.isSubTraceEnd { - continue - } - stepDuration := step.time.Sub(lastStepTime) - if stepDuration > threshold { - steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)", - traceNum, step.msg, writeFields(step.fields), stepDuration)) - } - lastStepTime = step.time - } - - fs := []zap.Field{zap.String("detail", writeFields(t.fields)), - zap.Duration("duration", totalDuration), - zap.Time("start", t.startTime), - zap.Time("end", endTime), - zap.Strings("steps", steps), - zap.Int("step_count", len(steps))} - return msg, fs -} - func (t *Trace) updateFieldIfExist(f Field) bool { - for i, v := range t.fields { + for i, v := range t.Fields { if v.Key == f.Key { - t.fields[i].Value = f.Value + t.Fields[i].Value = f.Value return true } } return false } -// disableStep sets the flag to prevent the trace from adding steps +// disableStep sets the flag to prevent the trace from adding Steps func (t *Trace) disableStep() { - t.stepDisabled = true + t.StepDisabled = true } -// enableStep re-enable the trace to add steps +// enableStep re-enable the trace to add Steps func (t *Trace) enableStep() { - t.stepDisabled = false + t.StepDisabled = false } diff --git a/pkg/traceutil/trace_test.go b/pkg/traceutil/trace_test.go deleted file mode 100644 index d56fe295306..00000000000 --- a/pkg/traceutil/trace_test.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traceutil - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/logutil" -) - -func TestGet(t *testing.T) { - traceForTest := &Trace{operation: "Test"} - tests := []struct { - name string - inputCtx context.Context - outputTrace *Trace - }{ - { - name: "When the context does not have trace", - inputCtx: context.TODO(), - outputTrace: TODO(), - }, - { - name: "When the context has trace", - inputCtx: context.WithValue(context.Background(), TraceKey, traceForTest), - outputTrace: traceForTest, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - trace := Get(tt.inputCtx) - if trace == nil { - t.Errorf("Expected %v; Got nil", tt.outputTrace) - } - if trace.operation != tt.outputTrace.operation { - t.Errorf("Expected %v; Got %v", tt.outputTrace, trace) - } - }) - } -} - -func TestCreate(t *testing.T) { - var ( - op = "Test" - steps = []string{"Step1, Step2"} - fields = []Field{ - {"traceKey1", "traceValue1"}, - {"traceKey2", "traceValue2"}, - } - stepFields = []Field{ - {"stepKey1", "stepValue2"}, - {"stepKey2", "stepValue2"}, - } - ) - - trace := New(op, nil, fields[0], fields[1]) - if trace.operation != op { - t.Errorf("Expected %v; Got %v", op, trace.operation) - } - for i, f := range trace.fields { - if f.Key != fields[i].Key { - t.Errorf("Expected %v; Got %v", fields[i].Key, f.Key) - } - if f.Value != fields[i].Value { - t.Errorf("Expected %v; Got %v", fields[i].Value, f.Value) - } - } - - for i, v := range steps { - trace.Step(v, stepFields[i]) - } - - for i, v := range trace.steps { - if steps[i] != v.msg { - t.Errorf("Expected %v; Got %v", steps[i], v.msg) - } - if stepFields[i].Key != v.fields[0].Key { - t.Errorf("Expected %v; Got %v", stepFields[i].Key, v.fields[0].Key) - } - if stepFields[i].Value != v.fields[0].Value { - t.Errorf("Expected %v; Got %v", stepFields[i].Value, v.fields[0].Value) - } - } -} - -func TestLog(t *testing.T) { - tests := []struct { - name string - trace *Trace - fields []Field - expectedMsg []string - }{ - { - name: "When dump all logs", - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - {time: time.Now().Add(-80 * time.Millisecond), msg: "msg1"}, - {time: time.Now().Add(-50 * time.Millisecond), msg: "msg2"}, - }, - }, - expectedMsg: []string{ - "msg1", "msg2", - }, - }, - { - name: "When trace has fields", - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - { - time: time.Now().Add(-80 * time.Millisecond), - msg: "msg1", - fields: []Field{{"stepKey1", "stepValue1"}}, - }, - { - time: time.Now().Add(-50 * time.Millisecond), - msg: "msg2", - fields: []Field{{"stepKey2", "stepValue2"}}, - }, - }, - }, - fields: []Field{ - {"traceKey1", "traceValue1"}, - {"count", 1}, - }, - expectedMsg: []string{ - "Test", - "msg1", "msg2", - "traceKey1:traceValue1", "count:1", - "stepKey1:stepValue1", "stepKey2:stepValue2", - "\"step_count\":2", - }, - }, - { - name: "When trace has subtrace", - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - { - time: time.Now().Add(-80 * time.Millisecond), - msg: "msg1", - fields: []Field{{"stepKey1", "stepValue1"}}, - }, - { - fields: []Field{{"beginSubTrace", "true"}}, - isSubTraceStart: true, - }, - { - time: time.Now().Add(-50 * time.Millisecond), - msg: "submsg", - fields: []Field{{"subStepKey", "subStepValue"}}, - }, - { - fields: []Field{{"endSubTrace", "true"}}, - isSubTraceEnd: true, - }, - { - time: time.Now().Add(-30 * time.Millisecond), - msg: "msg2", - fields: []Field{{"stepKey2", "stepValue2"}}, - }, - }, - }, - fields: []Field{ - {"traceKey1", "traceValue1"}, - {"count", 1}, - }, - expectedMsg: []string{ - "Test", - "msg1", "msg2", "submsg", - "traceKey1:traceValue1", "count:1", - "stepKey1:stepValue1", "stepKey2:stepValue2", "subStepKey:subStepValue", - "beginSubTrace:true", "endSubTrace:true", - "\"step_count\":3", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano())) - defer os.RemoveAll(logPath) - - lcfg := logutil.DefaultZapLoggerConfig - lcfg.OutputPaths = []string{logPath} - lcfg.ErrorOutputPaths = []string{logPath} - lg, _ := lcfg.Build() - - for _, f := range tt.fields { - tt.trace.AddField(f) - } - tt.trace.lg = lg - tt.trace.Log() - data, err := os.ReadFile(logPath) - if err != nil { - t.Fatal(err) - } - - for _, msg := range tt.expectedMsg { - if !bytes.Contains(data, []byte(msg)) { - t.Errorf("Expected to find %v in log", msg) - } - } - }) - } -} - -func TestLogIfLong(t *testing.T) { - tests := []struct { - name string - threshold time.Duration - trace *Trace - expectedMsg []string - }{ - { - name: "When the duration is smaller than threshold", - threshold: 200 * time.Millisecond, - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - {time: time.Now().Add(-50 * time.Millisecond), msg: "msg1"}, - {time: time.Now(), msg: "msg2"}, - }, - }, - expectedMsg: []string{}, - }, - { - name: "When the duration is longer than threshold", - threshold: 50 * time.Millisecond, - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - {time: time.Now().Add(-50 * time.Millisecond), msg: "msg1"}, - {time: time.Now(), msg: "msg2"}, - }, - }, - expectedMsg: []string{ - "msg1", "msg2", - }, - }, - { - name: "When not all steps are longer than step threshold", - threshold: 50 * time.Millisecond, - trace: &Trace{ - operation: "Test", - startTime: time.Now().Add(-100 * time.Millisecond), - steps: []step{ - {time: time.Now(), msg: "msg1"}, - {time: time.Now(), msg: "msg2"}, - }, - }, - expectedMsg: []string{ - "msg1", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano())) - defer os.RemoveAll(logPath) - - lcfg := logutil.DefaultZapLoggerConfig - lcfg.OutputPaths = []string{logPath} - lcfg.ErrorOutputPaths = []string{logPath} - lg, _ := lcfg.Build() - - tt.trace.lg = lg - tt.trace.LogIfLong(tt.threshold) - data, err := os.ReadFile(logPath) - if err != nil { - t.Fatal(err) - } - for _, msg := range tt.expectedMsg { - if !bytes.Contains(data, []byte(msg)) { - t.Errorf("Expected to find %v in log", msg) - } - } - }) - } -} diff --git a/pkg/wait/wait.go b/pkg/wait/wait.go index a3e2aec7cb5..1cfc5c0c4bb 100644 --- a/pkg/wait/wait.go +++ b/pkg/wait/wait.go @@ -28,15 +28,10 @@ const ( defaultListElementLength = 64 ) -// Wait is an interface that provides the ability to wait and trigger events that -// are associated with IDs. +// Wait 是一个接口提供等待和触发与ID相关的事件的能力. type Wait interface { - // Register waits returns a chan that waits on the given ID. - // The chan will be triggered when Trigger is called with - // the same ID. - Register(id uint64) <-chan interface{} - // Trigger triggers the waiting chans with the given ID. - Trigger(id uint64, x interface{}) + Register(id uint64) <-chan interface{} // waits返回一个在给定ID上等待的chan.当Trigger以相同的ID被调用时这个chan将被触发. + Trigger(id uint64, x interface{}) // 触发具有给定ID的等待通道.设置结果 IsRegistered(id uint64) bool } @@ -44,15 +39,15 @@ type list struct { e []listElement } +// 64个槽位 接收 响应 type listElement struct { l sync.RWMutex - m map[uint64]chan interface{} + m map[uint64]chan interface{} // 每个请求,以及待返回的channel } -// New creates a Wait. func New() Wait { res := list{ - e: make([]listElement, defaultListElementLength), + e: make([]listElement, defaultListElementLength), // 64 } for i := 0; i < len(res.e); i++ { res.e[i].m = make(map[uint64]chan interface{}) @@ -61,14 +56,15 @@ func New() Wait { } func (w *list) Register(id uint64) <-chan interface{} { - idx := id % defaultListElementLength + idx := id % defaultListElementLength // 64 newCh := make(chan interface{}, 1) w.e[idx].l.Lock() defer w.e[idx].l.Unlock() + // 判断 请求存不存在 if _, ok := w.e[idx].m[id]; !ok { w.e[idx].m[id] = newCh } else { - log.Panicf("dup id %x", id) + log.Panicf("重复的 id %x", id) } return newCh } @@ -106,5 +102,5 @@ func (w *waitWithResponse) Register(id uint64) <-chan interface{} { } func (w *waitWithResponse) Trigger(id uint64, x interface{}) {} func (w *waitWithResponse) IsRegistered(id uint64) bool { - panic("waitWithResponse.IsRegistered() shouldn't be called") + panic("waitWithResponse.IsRegistered() 不应该被调用") } diff --git a/pkg/wait/wait_test.go b/pkg/wait/wait_test.go deleted file mode 100644 index 54395cb360c..00000000000 --- a/pkg/wait/wait_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wait - -import ( - "fmt" - "testing" - "time" -) - -func TestWait(t *testing.T) { - const eid = 1 - wt := New() - ch := wt.Register(eid) - wt.Trigger(eid, "foo") - v := <-ch - if g, w := fmt.Sprintf("%v (%T)", v, v), "foo (string)"; g != w { - t.Errorf("<-ch = %v, want %v", g, w) - } - - if g := <-ch; g != nil { - t.Errorf("unexpected non-nil value: %v (%T)", g, g) - } -} - -func TestRegisterDupPanic(t *testing.T) { - const eid = 1 - wt := New() - ch1 := wt.Register(eid) - - panicC := make(chan struct{}, 1) - - func() { - defer func() { - if r := recover(); r != nil { - panicC <- struct{}{} - } - }() - wt.Register(eid) - }() - - select { - case <-panicC: - case <-time.After(1 * time.Second): - t.Errorf("failed to receive panic") - } - - wt.Trigger(eid, "foo") - <-ch1 -} - -func TestTriggerDupSuppression(t *testing.T) { - const eid = 1 - wt := New() - ch := wt.Register(eid) - wt.Trigger(eid, "foo") - wt.Trigger(eid, "bar") - - v := <-ch - if g, w := fmt.Sprintf("%v (%T)", v, v), "foo (string)"; g != w { - t.Errorf("<-ch = %v, want %v", g, w) - } - - if g := <-ch; g != nil { - t.Errorf("unexpected non-nil value: %v (%T)", g, g) - } -} - -func TestIsRegistered(t *testing.T) { - wt := New() - - wt.Register(0) - wt.Register(1) - wt.Register(2) - - for i := uint64(0); i < 3; i++ { - if !wt.IsRegistered(i) { - t.Errorf("event ID %d isn't registered", i) - } - } - - if wt.IsRegistered(4) { - t.Errorf("event ID 4 shouldn't be registered") - } - - wt.Trigger(0, "foo") - if wt.IsRegistered(0) { - t.Errorf("event ID 0 is already triggered, shouldn't be registered") - } -} diff --git a/pkg/wait/wait_time_test.go b/pkg/wait/wait_time_test.go deleted file mode 100644 index 26164c4acee..00000000000 --- a/pkg/wait/wait_time_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wait - -import ( - "testing" - "time" -) - -func TestWaitTime(t *testing.T) { - wt := NewTimeList() - ch1 := wt.Wait(1) - wt.Trigger(2) - select { - case <-ch1: - default: - t.Fatalf("cannot receive from ch as expected") - } - - ch2 := wt.Wait(4) - wt.Trigger(3) - select { - case <-ch2: - t.Fatalf("unexpected to receive from ch2") - default: - } - wt.Trigger(4) - select { - case <-ch2: - default: - t.Fatalf("cannot receive from ch2 as expected") - } - - select { - // wait on a triggered deadline - case <-wt.Wait(4): - default: - t.Fatalf("unexpected blocking when wait on triggered deadline") - } -} - -func TestWaitTestStress(t *testing.T) { - chs := make([]<-chan struct{}, 0) - wt := NewTimeList() - for i := 0; i < 10000; i++ { - chs = append(chs, wt.Wait(uint64(i))) - } - wt.Trigger(10000 + 1) - - for _, ch := range chs { - select { - case <-ch: - case <-time.After(time.Second): - t.Fatalf("cannot receive from ch as expected") - } - } -} - -func BenchmarkWaitTime(b *testing.B) { - wt := NewTimeList() - for i := 0; i < b.N; i++ { - wt.Wait(1) - } -} - -func BenchmarkTriggerAnd10KWaitTime(b *testing.B) { - for i := 0; i < b.N; i++ { - wt := NewTimeList() - for j := 0; j < 10000; j++ { - wt.Wait(uint64(j)) - } - wt.Trigger(10000 + 1) - } -} diff --git a/raft/change.go b/raft/change.go new file mode 100644 index 00000000000..41665e6442d --- /dev/null +++ b/raft/change.go @@ -0,0 +1,74 @@ +package raft + +import ( + "github.com/ls-2018/etcd_cn/raft/confchange" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// 应用变更配置 +func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { + changer := confchange.Changer{ + Tracker: r.prstrack, + LastIndex: r.raftLog.lastIndex(), + } + var cfg tracker.Config + var prs tracker.ProgressMap + var err error + if cc.LeaveJoint() { // 判断是不是一个空的ConfChangeV2,离开联合共识 + cfg, prs, err = changer.LeaveJoint() + } else if autoLeave, ok := cc.EnterJoint(); ok { // 进入联合共识 + // change >1 或 !ConfChangeTransitionAuto + cfg, prs, err = changer.EnterJoint(autoLeave, cc.Changes...) + } else { + cfg, prs, err = changer.Simple(cc.Changes...) // cfg是深拷贝,prs是浅拷贝;获取当前的配置,确保配置和进度是相互兼容的 + } + + if err != nil { + panic(err) + } + + return r.switchToConfig(cfg, prs) +} + +// switchToConfig 重新配置这个节点以使用所提供的配置.它更新内存中的状态,并在必要时进行额外的操作, +// 如对删除节点或改变的法定人数作出反应.要求.输入通常来自于恢复一个ConfState或应用一个ConfChange. +func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { + // cfg是深拷贝,prs是浅拷贝;获取当前的配置,确保配置和进度是相互兼容的 + r.prstrack.Config = cfg + r.prstrack.Progress = prs + + r.logger.Infof("%x 切换配置 %s", r.id, r.prstrack.Config) + cs := r.prstrack.ConfState() // 当前集群配置的信息汇总 + pr, ok := r.prstrack.Progress[r.id] // 本机还在不在里边 + + // 更新localNode本身是否是学习者,当localNode被移除时重置为false. + r.isLearner = ok && pr.IsLearner + + if (!ok || r.isLearner) && r.state == StateLeader { + // leader 降级或者 移除 + return cs + } + + // 其余的步骤只有在这个localNode是领导者并且还有其他节点的情况下才有意义. + if r.state != StateLeader || len(cs.Voters) == 0 { + return cs + } + + // bcastAppend、maybeSendAppend 都是异步发送的 + if r.maybeCommit() { // 节点配置变更,同步到了大多数节点上 + r.bcastAppend() // 广播消息,触发follower配置变更的落盘 + } else { + // 否则,仍然探测新添加的副本;没有理由让他们等待心跳间隔(或下一个传入的提议). + r.prstrack.Visit(func(id uint64, pr *tracker.Progress) { + r.maybeSendAppend(id, false /* sendIfEmpty */) // 再次发送消息 + }) + } + // 如果leader被免职或降职,则中止领导权转移. + if _, tOK := r.prstrack.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 { + // leader转移时;leader转移的目标 不是可投票节点, 要停止领导者转移 + r.abortLeaderTransfer() + } + + return cs +} diff --git a/raft/confchange/confchange.go b/raft/confchange/confchange.go new file mode 100644 index 00000000000..cca3837ced7 --- /dev/null +++ b/raft/confchange/confchange.go @@ -0,0 +1,411 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + "errors" + "fmt" + "strings" + + "github.com/ls-2018/etcd_cn/raft/quorum" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// Changer facilitates configuration changes. It exposes methods to handle +// simple and joint consensus while performing the proper validation that allows +// refusing invalid configuration changes before they affect the active +// configuration. +type Changer struct { + Tracker tracker.ProgressTracker + LastIndex uint64 +} + +// EnterJoint verifies that the outgoing (=right) majority config of the joint +// config is empty and initializes it with a copy of the incoming (=left) +// majority config. That is, it transitions from +// +// (1 2 3)&&() +// to +// (1 2 3)&&(1 2 3). +// +// The supplied changes are then applied to the incoming majority config, +// resulting in a joint configuration that in terms of the Raft thesis[1] +// (Section 4.3) corresponds to `C_{new,old}`. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("config is already joint") + return c.err(err) + } + if len(incoming(cfg.Voters)) == 0 { + // We allow adding nodes to an empty config for convenience (testing and + // bootstrap), but you can't enter a joint state. + err := errors.New("can't make a zero-voter config joint") + return c.err(err) + } + // Clear the outgoing config. + *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{} + // Copy incoming to outgoing. + for id := range incoming(cfg.Voters) { + outgoing(cfg.Voters)[id] = struct{}{} + } + + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + cfg.AutoLeave = autoLeave + return checkAndReturn(cfg, prs) +} + +// LeaveJoint transitions out of a joint configuration. It is an error to call +// this method if the configuration is not joint, i.e. if the outgoing majority +// config Voters[1] is empty. +// +// The outgoing majority config of the joint configuration will be removed, +// that is, the incoming config is promoted as the sole decision maker. In the +// notation of the Raft thesis[1] (Section 4.3), this method transitions from +// `C_{new,old}` into `C_new`. +// +// At the same time, any staged learners (LearnersNext) the addition of which +// was held back by an overlapping voter in the former outgoing config will be +// inserted into Learners. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if !joint(cfg) { + err := errors.New("can't leave a non-joint config") + return c.err(err) + } + if len(outgoing(cfg.Voters)) == 0 { + err := fmt.Errorf("configuration is not joint: %v", cfg) + return c.err(err) + } + for id := range cfg.LearnersNext { + nilAwareAdd(&cfg.Learners, id) + prs[id].IsLearner = true + } + cfg.LearnersNext = nil + + for id := range outgoing(cfg.Voters) { + _, isVoter := incoming(cfg.Voters)[id] + _, isLearner := cfg.Learners[id] + + if !isVoter && !isLearner { + delete(prs, id) + } + } + *outgoingPtr(&cfg.Voters) = nil + cfg.AutoLeave = false + + return checkAndReturn(cfg, prs) +} + +// ------------------------------------ OVER -------------------------------------------------- + +// 是不是进入联合共识 +func joint(cfg tracker.Config) bool { + return len(outgoing(cfg.Voters)) > 0 +} + +// 节点未发生变更时,节点信息存储在JointConfig[0]即incoming的指向的集合中. +// 当EnterJoint时,将老节点拷贝至outgoing中,变更节点拷贝至incoming中. +// LeaveJoint时,删除下线的节点,合并在线的节点并合并至incoming中,完成节点变更过程. +func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] } +func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] } +func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] } + +// Describe 打印配置变更 +func Describe(ccs ...pb.ConfChangeSingle) string { + var buf strings.Builder + for _, cc := range ccs { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID) + } + return buf.String() +} + +// checkInvariants 确保配置和进度是相互兼容的. +func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { + // cfg是深拷贝,prs是浅拷贝' + for _, ids := range []map[uint64]struct{}{ + cfg.Voters.IDs(), // JointConfig里的所有节点ID + cfg.Learners, // + cfg.LearnersNext, // 即将变成learner的几点 + } { + for id := range ids { + if _, ok := prs[id]; !ok { + return fmt.Errorf("没有该节点的进度信息 %d", id) + } + } + } + + // LearnersNext 不能直接添加, 只能由leader、follower 变成 ; 即降级得到的. + for id := range cfg.LearnersNext { + // 之前的旧配置中必须存在它 + if _, ok := outgoing(cfg.Voters)[id]; !ok { + return fmt.Errorf("%d 是在LearnersNext中,但不是在Voters中.[1]", id) + } + // 是不是已经被标记位了learner + if prs[id].IsLearner { + return fmt.Errorf("%d 是在LearnersNext中,但已经被标记为learner.", id) + } + } + // 反之,learner和投票者根本没有交集. + for id := range cfg.Learners { + if _, ok := outgoing(cfg.Voters)[id]; ok { + return fmt.Errorf("%d 在 Learners 、 Voters[1]", id) + } + if _, ok := incoming(cfg.Voters)[id]; ok { + return fmt.Errorf("%d 在 Learners 、 Voters[0]", id) + } + if !prs[id].IsLearner { + return fmt.Errorf("%d 是 Learners, 但没有被标记为learner", id) + } + } + + if !joint(cfg) { // 没有进入共识状态 + // 我们强制规定,空map是nil而不是0. + if outgoing(cfg.Voters) != nil { + return fmt.Errorf("cfg.Voters[1]必须是nil 当没有进入联合共识") + } + if cfg.LearnersNext != nil { + return fmt.Errorf("cfg.LearnersNext必须是nil 当没有进入联合共识") + } + if cfg.AutoLeave { + return fmt.Errorf("AutoLeave必须是false 当没有进入联合共识") + } + } + return nil +} + +// checkAndCopy 复制跟踪器的配置和进度Map,并返回这些副本. +func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { + cfg := c.Tracker.Config.Clone() // 现有的集群配置 + var _ tracker.ProgressMap = c.Tracker.Progress + prs := tracker.ProgressMap{} // 新的 + + for id, pr := range c.Tracker.Progress { + // pr是一个指针 + // 一个浅层拷贝就足够了,因为我们只对Learner字段进行变异. + ppr := *pr + prs[id] = &ppr + } + return checkAndReturn(cfg, prs) // cfg是深拷贝,prs是浅拷贝;确保配置和进度是相互兼容的 +} + +// checkAndReturn 在输入上调用checkInvariants,并返回结果错误或输入. +func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + // cfg是深拷贝,prs是浅拷贝 + if err := checkInvariants(cfg, prs); err != nil { // 确保配置和进度是相互兼容的 + return tracker.Config{}, tracker.ProgressMap{}, err + } + return cfg, prs, nil +} + +// initProgress 初始化给定Follower或Learner的Progress,该节点不能以任何一种形式存在,否则异常. +// ID是Peer的ID,match和next用来初始化Progress的. +func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { + if !isLearner { + // 等同于 cfg.Voters[0][id] = struct{}{} + incoming(cfg.Voters)[id] = struct{}{} + } else { + nilAwareAdd(&cfg.Learners, id) + } + // Follower可以参与选举和投票,Learner不可以,只要知道这一点就可以了.无论是Follower还是 + // Learner都会有一个Progress,但是他们再次进行了分组管理. + prs[id] = &tracker.Progress{ + // 初始化Progress需要给定Next、Match、Inflights容量以及是否是learner,其他也没啥 + // 此处可以剧透一下,raft的代码初始化的时候Match=0,Next=1. + Next: c.LastIndex, + Match: 0, + Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + IsLearner: isLearner, + // 当一个节点第一次被添加时,我们应该把它标记为最近活跃.否则,如果CheckQuorum在被调用之前,可能会导致我们停止工作. + // 在被添加的节点有机会与我们通信之前调用它,可能会导致我们降级. + RecentActive: true, + } +} + +// nilAwareAdd 填充一个map条目,如果需要的话,创建map +func nilAwareAdd(m *map[uint64]struct{}, id uint64) { + if *m == nil { + *m = map[uint64]struct{}{} + } + (*m)[id] = struct{}{} +} + +// nilAwareDelete 从一个map中删除,如果之后map是空的,则将其置空. +func nilAwareDelete(m *map[uint64]struct{}, id uint64) { + if *m == nil { + return + } + delete(*m, id) + if len(*m) == 0 { + *m = nil + } +} + +// err 返回零值和一个错误. +func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) { + return tracker.Config{}, nil, err +} + +// makeVoter 增加或提升给定的ID,使其成为入选的多数人配置中的选民. +func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + // cfg是深拷贝,prs是浅拷贝;获取当前的配置,确保配置和进度是相互兼容的 + pr := prs[id] + if pr == nil { + // 添加节点 + c.initProgress(cfg, prs, id, false /* isLearner */) + return + } + // 提升角色 [learner->follower] + pr.IsLearner = false + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + incoming(cfg.Voters)[id] = struct{}{} +} + +// remove 从 voter[0]、learner、learnersNext 中删除 +func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + if _, ok := prs[id]; !ok { + return + } + + delete(incoming(cfg.Voters), id) + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + + // 如果不再vote[1]中 删除;如果在vote[1]中,之后在leaveJoint处理 + if _, onRight := outgoing(cfg.Voters)[id]; !onRight { + // cfg.Voters[1][id] + delete(prs, id) + } +} + +// makeLearner 添加learner +func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + // 新增加一个learner + c.initProgress(cfg, prs, id, true /* isLearner */) + return + } + if pr.IsLearner { + return + } + // // 从 voters、learner、learnersNext 中删除 + c.remove(cfg, prs, id) // 从 voter[0]、learner、learnersNext 中删除 + prs[id] = pr + // 如果我们不能直接将learner添加到learner中,也就是说,该peer在veto[1]中,是降级来的 + // 则使用 LearnersNext. + // 在LeaveJoint()中,LearnersNext将被转变成一个learner. + // 否则,立即添加一个普通的learner. + if _, onRight := outgoing(cfg.Voters)[id]; onRight { + // 降级 + nilAwareAdd(&cfg.LearnersNext, id) + } else { + // 新增 + pr.IsLearner = true + nilAwareAdd(&cfg.Learners, id) + } +} + +// apply 一个对配置的改变.按照惯例,对voter的更改总是对 Voters[0] 进行. +// [变更节点集合,老节点集合] 或 [节点、nil] +// Voters[1]要么是空的,要么在联合状态下保留传出的多数配置. +func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { + // cfg是深拷贝,prs是浅拷贝;获取当前的配置,确保配置和进度是相互兼容的 + for _, cc := range ccs { + if cc.NodeID == 0 { + continue + } + // 只需要 节点 id、角色; peer url 不需要 + // 更新cfg prs 数据 + switch cc.Type { + case pb.ConfChangeAddNode: + c.makeVoter(cfg, prs, cc.NodeID) + case pb.ConfChangeAddLearnerNode: + c.makeLearner(cfg, prs, cc.NodeID) + case pb.ConfChangeRemoveNode: + c.remove(cfg, prs, cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + return fmt.Errorf("未知的conf type %d", cc.Type) + } + } + if len(incoming(cfg.Voters)) == 0 { + return errors.New("删除了所有选民") + } + return nil +} + +// symdiff 返回二者的差异数 len(a-b)+len(b-a):描述的不够准确 +func symdiff(l, r map[uint64]struct{}) int { + var n int + // [[1,2],[1,2]] + pairs := [][2]quorum.MajorityConfig{ + {l, r}, // count elems in l but not in r + {r, l}, // count elems in r but not in l + } + for _, p := range pairs { + for id := range p[0] { + if _, ok := p[1][id]; !ok { + n++ + } + } + } + return n +} + +// Simple 进行一系列的配置改变,(总的来说)使传入的多数配置Voters[0]最多变化一个. +// 如果不是这样,如果响应数:quorum为零,或者如果配置处于联合状态(即如果有一个传出的配置),这个方法将返回一个错误. +func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() // cfg是深拷贝,prs是浅拷贝;获取当前的配置,确保配置和进度是相互兼容的 + if err != nil { + return c.err(err) + } + if joint(cfg) { // 是不是进入联合共识 + err := errors.New("不能在联合配置中应用简单的配置更改") + return c.err(err) + } + if err := c.apply(&cfg, prs, ccs...); err != nil { // 只更新cfg、prs记录 + return c.err(err) + } + // incoming voters[0] + // prs = c.Tracker.Progress cfg = c.Tracker.Config + _ = c.Tracker.Voters // quorum.JointConfig + // c.Tracker.Config.Voters 没有区别 Config是匿名结构体 c.Tracker.Voters + + if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { + // [12,123] 一般是 1 + // 存在不同的个数 + return tracker.Config{}, nil, errors.New("多个选民在没有进入联合配置的情况下发生变化") + } + + return checkAndReturn(cfg, prs) // 内容校验 +} diff --git a/raft/confchange/restore.go b/raft/confchange/restore.go new file mode 100644 index 00000000000..d9c3621acca --- /dev/null +++ b/raft/confchange/restore.go @@ -0,0 +1,149 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// toConfChangeSingle translates a conf state into 1) a slice of operations creating +// first the config that will become the outgoing one, and then the incoming one, and +// b) another slice that, when applied to the config resulted from 1), represents the +// ConfState. +func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) { + // Example to follow along this code: + // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4) + // + // This means that before entering the joint config, the configuration + // had voters (1 2 4 6) and perhaps some learners that are already gone. + // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6) + // are no longer voters; however 4 is poised to become a learner upon leaving + // the joint state. + // We can't tell whether 5 was a learner before entering the joint config, + // but it doesn't matter (we'll pretend that it wasn't). + // + // The code below will construct + // outgoing = add 1; add 2; add 4; add 6 + // incoming = remove 1; remove 2; remove 4; remove 6 + // add 1; add 2; add 3; + // add-learner 5; + // add-learner 4; + // + // So, when starting with an empty config, after applying 'outgoing' we have + // + // quorum=(1 2 4 6) + // + // From which we enter a joint state via 'incoming' + // + // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4) + // + // as desired. + + for _, id := range cs.VotersOutgoing { + // If there are outgoing voters, first add them one by one so that the + // (non-joint) config has them all. + out = append(out, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + + // We're done constructing the outgoing slice, now on to the incoming one + // (which will apply on top of the config created by the outgoing slice). + + // First, we'll remove all of the outgoing voters. + for _, id := range cs.VotersOutgoing { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeRemoveNode, + NodeID: id, + }) + } + // Then we'll add the incoming voters and learners. + for _, id := range cs.Voters { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + for _, id := range cs.Learners { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + // Same for LearnersNext; these are nodes we want to be learners but which + // are currently voters in the outgoing config. + for _, id := range cs.LearnersNext { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + return out, in +} + +func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { + for _, op := range ops { + cfg, prs, err := op(chg) + if err != nil { + return tracker.Config{}, nil, err + } + chg.Tracker.Config = cfg + chg.Tracker.Progress = prs + } + return chg.Tracker.Config, chg.Tracker.Progress, nil +} + +// Restore 接受一个Changer(它必须代表一个空的配置),并运行一连串的变化,颁布在 ConfState中描述的配置. +// 这是愚蠢的,这需要一个Changer.通过确保Changer只需要一个ProgressMap(而不是整个追踪器)来解开这个问题, +// 在这一点上,它可以直接获取LastIndex和MaxInflight,并仅从这一点上得出结果. +func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) { + outgoing, incoming := toConfChangeSingle(cs) + + var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error) + + if len(outgoing) == 0 { + // No outgoing config, so just apply the incoming changes one by one. + for _, cc := range incoming { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + } else { + // The ConfState describes a joint configuration. + // + // First, apply all of the changes of the outgoing config one by one, so + // that it temporarily becomes the incoming active config. For example, + // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&(). + for _, cc := range outgoing { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + // Now enter the joint state, which rotates the above additions into the + // outgoing config, and adds the incoming config in. Continuing the + // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations + // would be removing 2,3,4 and then adding in 1,2,3 while transitioning + // into a joint state. + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.EnterJoint(cs.AutoLeave, incoming...) + }) + } + + return chain(chg, ops...) +} diff --git a/raft/confchange/testdata/joint_autoleave.txt b/raft/confchange/testdata/joint_autoleave.txt new file mode 100644 index 00000000000..9ec8cb0a467 --- /dev/null +++ b/raft/confchange/testdata/joint_autoleave.txt @@ -0,0 +1,29 @@ +# Test the autoleave argument to EnterJoint. It defaults to false in the +# datadriven tests. The flag has no associated semantics in this package, +# it is simply passed through. +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +# Autoleave is reflected in the config. +enter-joint autoleave=true +v2 v3 +---- +voters=(1 2 3)&&(1) autoleave +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 +3: StateProbe match=0 next=1 + +# Can't enter-joint twice, even if autoleave changes. +enter-joint autoleave=false +---- +config is already joint + +leave-joint +---- +voters=(1 2 3) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 +3: StateProbe match=0 next=1 diff --git a/raft/confchange/testdata/joint_idempotency.txt b/raft/confchange/testdata/joint_idempotency.txt new file mode 100644 index 00000000000..6d1346b7895 --- /dev/null +++ b/raft/confchange/testdata/joint_idempotency.txt @@ -0,0 +1,23 @@ +# Verify that operations upon entering the joint state are idempotent, i.e. +# removing an absent node is fine, etc. + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +enter-joint +r1 r2 r9 v2 v3 v4 v2 v3 v4 l2 l2 r4 r4 l1 l1 +---- +voters=(3)&&(1) learners=(2) learners_next=(1) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 learner +3: StateProbe match=0 next=1 + +leave-joint +---- +voters=(3) learners=(1 2) +1: StateProbe match=0 next=0 learner +2: StateProbe match=0 next=1 learner +3: StateProbe match=0 next=1 diff --git a/raft/confchange/testdata/joint_learners_next.txt b/raft/confchange/testdata/joint_learners_next.txt new file mode 100644 index 00000000000..df1da7d0c9f --- /dev/null +++ b/raft/confchange/testdata/joint_learners_next.txt @@ -0,0 +1,24 @@ +# Verify that when a voter is demoted in a joint config, it will show up in +# learners_next until the joint config is left, and only then will the progress +# turn into that of a learner, without resetting the progress. Note that this +# last fact is verified by `next`, which can tell us which "round" the progress +# was originally created in. + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +enter-joint +v2 l1 +---- +voters=(2)&&(1) learners_next=(1) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 + +leave-joint +---- +voters=(2) learners=(1) +1: StateProbe match=0 next=0 learner +2: StateProbe match=0 next=1 diff --git a/raft/confchange/testdata/joint_safety.txt b/raft/confchange/testdata/joint_safety.txt new file mode 100644 index 00000000000..75d11b199e0 --- /dev/null +++ b/raft/confchange/testdata/joint_safety.txt @@ -0,0 +1,81 @@ +leave-joint +---- +can't leave a non-joint config + +enter-joint +---- +can't make a zero-voter config joint + +enter-joint +v1 +---- +can't make a zero-voter config joint + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=3 + +leave-joint +---- +can't leave a non-joint config + +# Can enter into joint config. +enter-joint +---- +voters=(1)&&(1) +1: StateProbe match=0 next=3 + +enter-joint +---- +config is already joint + +leave-joint +---- +voters=(1) +1: StateProbe match=0 next=3 + +leave-joint +---- +can't leave a non-joint config + +# Can enter again, this time with some ops. +enter-joint +r1 v2 v3 l4 +---- +voters=(2 3)&&(1) learners=(4) +1: StateProbe match=0 next=3 +2: StateProbe match=0 next=9 +3: StateProbe match=0 next=9 +4: StateProbe match=0 next=9 learner + +enter-joint +---- +config is already joint + +enter-joint +v12 +---- +config is already joint + +simple +l15 +---- +can't apply simple config change in joint config + +leave-joint +---- +voters=(2 3) learners=(4) +2: StateProbe match=0 next=9 +3: StateProbe match=0 next=9 +4: StateProbe match=0 next=9 learner + +simple +l9 +---- +voters=(2 3) learners=(4 9) +2: StateProbe match=0 next=9 +3: StateProbe match=0 next=9 +4: StateProbe match=0 next=9 learner +9: StateProbe match=0 next=14 learner diff --git a/raft/confchange/testdata/simple_idempotency.txt b/raft/confchange/testdata/simple_idempotency.txt new file mode 100644 index 00000000000..2f7ca2e247b --- /dev/null +++ b/raft/confchange/testdata/simple_idempotency.txt @@ -0,0 +1,69 @@ +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +simple +v2 +---- +voters=(1 2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=2 + +simple +l1 +---- +voters=(2) learners=(1) +1: StateProbe match=0 next=0 learner +2: StateProbe match=0 next=2 + +simple +l1 +---- +voters=(2) learners=(1) +1: StateProbe match=0 next=0 learner +2: StateProbe match=0 next=2 + +simple +r1 +---- +voters=(2) +2: StateProbe match=0 next=2 + +simple +r1 +---- +voters=(2) +2: StateProbe match=0 next=2 + +simple +v3 +---- +voters=(2 3) +2: StateProbe match=0 next=2 +3: StateProbe match=0 next=7 + +simple +r3 +---- +voters=(2) +2: StateProbe match=0 next=2 + +simple +r3 +---- +voters=(2) +2: StateProbe match=0 next=2 + +simple +r4 +---- +voters=(2) +2: StateProbe match=0 next=2 diff --git a/raft/confchange/testdata/simple_promote_demote.txt b/raft/confchange/testdata/simple_promote_demote.txt new file mode 100644 index 00000000000..52369b450e3 --- /dev/null +++ b/raft/confchange/testdata/simple_promote_demote.txt @@ -0,0 +1,60 @@ +# Set up three voters for this test. + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +simple +v2 +---- +voters=(1 2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 + +simple +v3 +---- +voters=(1 2 3) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 +3: StateProbe match=0 next=2 + +# Can atomically demote and promote without a hitch. +# This is pointless, but possible. +simple +l1 v1 +---- +voters=(1 2 3) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 +3: StateProbe match=0 next=2 + +# Can demote a voter. +simple +l2 +---- +voters=(1 3) learners=(2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 learner +3: StateProbe match=0 next=2 + +# Can atomically promote and demote the same voter. +# This is pointless, but possible. +simple +v2 l2 +---- +voters=(1 3) learners=(2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 learner +3: StateProbe match=0 next=2 + +# Can promote a voter. +simple +v2 +---- +voters=(1 2 3) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 +3: StateProbe match=0 next=2 diff --git a/raft/confchange/testdata/simple_safety.txt b/raft/confchange/testdata/simple_safety.txt new file mode 100644 index 00000000000..6566c5fccf7 --- /dev/null +++ b/raft/confchange/testdata/simple_safety.txt @@ -0,0 +1,64 @@ +simple +l1 +---- +removed all voters + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=1 + +simple +v2 l3 +---- +voters=(1 2) learners=(3) +1: StateProbe match=0 next=1 +2: StateProbe match=0 next=2 +3: StateProbe match=0 next=2 learner + +simple +r1 v5 +---- +more than one voter changed without entering joint config + +simple +r1 r2 +---- +removed all voters + +simple +v3 v4 +---- +more than one voter changed without entering joint config + +simple +l1 v5 +---- +more than one voter changed without entering joint config + +simple +l1 l2 +---- +removed all voters + +simple +l2 l3 l4 l5 +---- +voters=(1) learners=(2 3 4 5) +1: StateProbe match=0 next=1 +2: StateProbe match=0 next=2 learner +3: StateProbe match=0 next=2 learner +4: StateProbe match=0 next=8 learner +5: StateProbe match=0 next=8 learner + +simple +r1 +---- +removed all voters + +simple +r2 r3 r4 r5 +---- +voters=(1) +1: StateProbe match=0 next=1 diff --git a/raft/confchange/testdata/update.txt b/raft/confchange/testdata/update.txt new file mode 100644 index 00000000000..50a703ccf1d --- /dev/null +++ b/raft/confchange/testdata/update.txt @@ -0,0 +1,23 @@ +# Nobody cares about ConfChangeUpdateNode, but at least use it once. It is used +# by etcd as a convenient way to pass a blob through their conf change machinery +# that updates information tracked outside of raft. + +simple +v1 +---- +voters=(1) +1: StateProbe match=0 next=0 + +simple +v2 u1 +---- +voters=(1 2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 + +simple +u1 u2 u3 u1 u2 u3 +---- +voters=(1 2) +1: StateProbe match=0 next=0 +2: StateProbe match=0 next=1 diff --git a/raft/confchange/testdata/zero.txt b/raft/confchange/testdata/zero.txt new file mode 100644 index 00000000000..5e0d46fe6b6 --- /dev/null +++ b/raft/confchange/testdata/zero.txt @@ -0,0 +1,6 @@ +# NodeID zero is ignored. +simple +v1 r0 v0 l0 +---- +voters=(1) +1: StateProbe match=0 next=0 diff --git a/raft/node.go b/raft/node.go new file mode 100644 index 00000000000..935ba46b1fe --- /dev/null +++ b/raft/node.go @@ -0,0 +1,443 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "context" + "errors" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + ErrStopped = errors.New("raft: 已停止") +) + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState 判断是不是空的 +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +type Peer struct { + ID uint64 // 成员ID + Context []byte // 成员信息序列化后的数据 +} + +// StartNode 它为每个给定的peer在初始日志中添加一个ConfChangeAddNode条目. +// Peer封装了节点的ID, peers记录了当前集群中全部节点的ID +func StartNode(c *Config, peers []Peer) RaftNodeInterFace { // ✅✈️ 🚗🚴🏻😁 + if len(peers) == 0 { + panic("没有给定peers;使用RestartNode代替.") + } + rn, err := NewRawNode(c) // ✅ + if err != nil { + panic(err) + } + rn.Bootstrap(peers) // [{"id":10276657743932975437,"peerURLs":["http://localhost:2380"],"name":"default"}] + + n := newLocalNode(rn) // 本机,用于接收发消息 + go n.run() // ok + + return &n +} + +// RestartNode 集群的当前成员将从Storage中恢复. +// 如果调用者有一个现有的状态机,请传入最后应用于它的日志索引;否则使用0. +func RestartNode(c *Config) RaftNodeInterFace { + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + n := newLocalNode(rn) + go n.run() + return &n +} + +// Ready数据通过上一次的软、硬状态,计算这两个状态的变化,其他 +// 的数据都是来源于raft. +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), // 还没有落盘的,需要调用方落盘 + CommittedEntries: r.raftLog.nextEnts(), // 已经commit待apply的日志,交给上层应用 + Messages: r.msgs, // 封装好的需要通过网络发送都其他节点的消息 + SoftState: nil, + HardState: pb.HardState{}, + Snapshot: pb.Snapshot{}, + ReadStates: []ReadState{}, + MustSync: false, + } + // 判断softState有没有变化,有则赋值 + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + // 判断hardState有没有变化,有则赋值 + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + // 判断是不是收到snapshot + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + // 处理该Ready后是否需要做fsync,将数据强制刷盘 + rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) + return rd +} + +// 包含在raftNode中,是Node接口的实现.里面包含一个协程和多个队列,是状态机消息处理的入口. +type localNode struct { + rn *RawNode + propc chan msgWithResult // Propose队列,调用raftNode的Propose即把Propose消息塞到这个队列里 + recvc chan pb.Message // Message队列,除Propose消息以外其他消息塞到这个队列里 + confc chan pb.ConfChangeV2 // 接受配置变更的管道 + confstatec chan pb.ConfState // 将配置变更后的管道 + readyc chan Ready // 已经准备好apply的信息队列,通知使用者 + advancec chan struct{} // 每次apply好了以后往这个队列里塞个空对象.通知raft可以继续准备Ready消息. + tickc chan struct{} // tick信息队列,用于调用心跳 + done chan struct{} // + stop chan struct{} // 为Stop接口实现的,应该还好理解 + status chan chan Status // + logger Logger // 用来写运行日志的 +} + +func newLocalNode(rn *RawNode) localNode { + return localNode{ + confc: make(chan pb.ConfChangeV2), // 接收EntryConfChange类型消息比如动态添加节点 + rn: rn, + propc: make(chan msgWithResult), // 接收网络层MsgProp类型消息 + recvc: make(chan pb.Message), // 接收网络层除MsgProp类型以外的消息 + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), // 向上层返回 ready + advancec: make(chan struct{}), // 上层处理往ready后返回给raft的消息 + tickc: make(chan struct{}, 128), // 管理超时的管道,繁忙时可以处理之前的事件 + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + } +} + +func (n *localNode) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // RaftNodeInterFace has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *localNode) run() { + var propc chan msgWithResult // 提议 + var readyc chan Ready // 接收已committed的消息 + var advancec chan struct{} // 通知raft 继续的channel + var rd Ready + + r := n.rn.raft + // 初始状态不知道谁是leader,需要通过Ready获取 + lead := None + for { + // 这一段 主要是为了 只有只有客户端通知了了,才能继续往readyc放新的 + if advancec != nil { // 开始时是nil + readyc = nil + } else if n.rn.HasReady() { // 判断是否有Ready数据:待发送的数据 + rd = n.rn.readyWithoutAccept() // 计算软硬状态变化;返回ready结构体 + readyc = n.readyc // 下边有放入数据的 + } + // 初始时都是0, lead发生变化 + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.localNode: %x 成为了leader %x 在任期 %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.localNode: %x leader变成了 %x to %x 在任期 %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc // 从里边消费消息 + } else { + r.logger.Infof("raft.localNode: %x 丢失leader %x 在任期 %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + + case pm := <-propc: // 接收到提议消息;提议消息是本节点生成的; 版本变更消息处理完后,会关闭propc + _ = msgWithResult{} + m := pm.m + m.From = r.id + err := r.Step(m) // 因为是异步发送到每个节点的,因此这里不是发送的结果 + if pm.result != nil { + pm.result <- err + close(pm.result) + } + case m := <-n.recvc: // Message队列, 除Propose消息以外其他消息塞到这个队列里 + // 必须是已知节点、或者是非响应类信息 + // MsgReadIndex类型 + if pr := r.prstrack.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) + } + case cc := <-n.confc: // 配置的变更信息 [新增、删除、更新节点] + // 如果NodeID是None,就变成了获取节点信息的操作 + _, okBefore := r.prstrack.Progress[r.id] // 获取本节点的信息 + cs := r.applyConfChange(cc) + // 如果localNode被移除,则阻止传入的变化.请注意,我们只在localNode之前在配置中时才这样做. + // 节点可能在不知道这一点的情况下成为组的成员(当他们在追赶日志时,没有最新的配置),在这种情况下,我们不希望阻止提案通道. + // NB:当领导者发生变化时,propc会被重置,如果我们了解到这一点,就有点暗示我们被读取了,也许?这并不 这不是很合理,而且很可能有bug. + if _, okAfter := r.prstrack.Progress[r.id]; okBefore && !okAfter { + // 变更前有自己,变更后没有自己 + var found bool + _ = r.prstrack.ConfState + outer: + for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { + // 在所有的投票者中,寻找自己 + for _, id := range sl { + if id == r.id { + found = true + break outer + } + } + } + if !found { + propc = nil + } + } + select { + case n.confstatec <- cs: + case <-n.done: + } + case <-n.tickc: // 超时时间到,包括心跳超时和选举超时等 + n.rn.Tick() + case readyc <- rd: // 数据放入ready channel中,等待上层应用处理 + n.rn.acceptReady(rd) // 告诉raft,ready数据已被接收 + advancec = n.advancec // 赋值Advance channel等待Ready处理完成的消息 + case <-advancec: // 使用者处理完Ready数据后,调用了Advance() + n.rn.Advance(rd) // 通知RawNode 应用程序已经应用并保存了最后一个Ready结果的进度. + rd = Ready{} // 重置数据 + advancec = nil + case c := <-n.status: // 收取了获取节点状态的信号 + c <- getStatus(r) + case <-n.stop: // 收到停止信号 + close(n.done) + return + } + } +} + +func (n *localNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc.AsV2(): // 把配置调整发送到confc + case <-n.done: + } + select { + case cs = <-n.confstatec: // 再通过confstatec把调整后的结果读出来 + case <-n.done: + } + return &cs +} + +// ------------------------------------------ over -------------------------------------------------------------- + +// TransferLeadership leader 由 lead转移给transferee +func (n *localNode) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +// ReadIndex etcdctl get 会走这里,rctx 是一个生成的索引 +func (n *localNode) ReadIndex(ctx context.Context, rctx []byte) error { + // rctx 后随着响应返回来 + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) // ok data是requestIndex[生成的] +} + +func (n *localNode) Ready() <-chan Ready { + // Ready 如果raft状态机有变化,会通过channel返回一个Ready的数据结构,里面包含变化信息,比如日志变化、心跳发送等. + return n.readyc +} + +func (n *localNode) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *localNode) Status() Status { + c := make(chan Status) + select { + case n.status <- c: // 通过status把c送给node,让node通过c把Status输出 chan chan Status + _ = getStatus // 就是它的返回结果 + return <-c // 此时再从c中把Status读出来 + case <-n.done: + return Status{} + } +} + +func (n *localNode) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *localNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *localNode) Propose(ctx context.Context, data []byte) error { + // 发起提议,要等到得到大多数响应 + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) // ok +} + +func (n *localNode) Step(ctx context.Context, m pb.Message) error { + // 忽略通过网络接收的非本地信息 + if IsLocalMsg(m.Type) { + return nil + } + return n.step(ctx, m) +} + +func (n *localNode) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *localNode) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) +} + +func (n *localNode) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + // 所有的非pb.MsgProp消息通过recvc送给node处理,此时是否wait根本不关心,因为通过recvc + // 提交给node处理的消息可以理解为没有返回值的调用. + select { + case n.recvc <- m: // 非提议信息,放进去就完事了 + return nil // 一般都会走这里 + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + + // 处理提议消息,等待响应 + ch := n.propc // 生产消息 + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) + } + + select { + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + + select { + case err := <-pm.result: + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + return nil +} + +func (n *localNode) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.rn.raft.logger.Warningf("%x 错过了开火的时间.RaftNodeInterFace 阻塞时间过长! ", n.rn.raft.id) + } +} + +func (n *localNode) Campaign(ctx context.Context) error { + // 封装成pb.MsgHup消息然后再处理,step()后面会详细说明 + // 主动触发一次选举 + return n.step(ctx, pb.Message{Type: pb.MsgHup}) +} + +// MustSync 设置是否必须同步 +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // 有不可靠日志、leader更换以及换届选举都需要设置同步标记,也就是说当有不可靠日志或者 + // 新一轮选举发生时必须等到这些数据同步到可靠存储后才能继续执行,这还算是比较好理解,毕竟 + // 这些状态是全局性的,需要leader统计超过半数可靠可靠以后确认为可靠的数据.如果此时采用 + // 异步实现,就会出现不一致的可能性. + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} + +// ProposeConfChange 集群配置变更,[新增、删除节点] +func (n *localNode) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error { + msg, err := confChangeToMsg(cc) + if err != nil { + return err + } + return n.Step(ctx, msg) +} + +// 消息、结果包装 +type msgWithResult struct { + m pb.Message // 发送出去的信息 + result chan error // 返回的结果 +} + +// 将raftpb.ConfChangeV1变成用于用于发送的pb.Message +func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) { + typ, data, err := pb.MarshalConfChange(c) + if err != nil { + return pb.Message{}, err + } + return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil // ok +} diff --git a/raft/over_bootstrap.go b/raft/over_bootstrap.go new file mode 100644 index 00000000000..709e48b1855 --- /dev/null +++ b/raft/over_bootstrap.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// Bootstrap 引导集群, 将集群信息加载到 memoryStorage +func (rn *RawNode) Bootstrap(peers []Peer) error { + // [{"id":10276657743932975437,"peerURLs":["http://localhost:2380"],"name":"default"}] + if len(peers) == 0 { + return errors.New("必须提供至少一个peer") + } + lastIndex, err := rn.raft.raftLog.storage.LastIndex() // 内存中最新索引 0 + if err != nil { + return err + } + + if lastIndex != 0 { + return errors.New("不能引导一个非空的存储空间") + } + rn.prevHardSt = emptyState + + rn.raft.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChangeV1{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: string(peer.Context)} + data, err := cc.Marshal() + if err != nil { + return err + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} // ok + } + rn.raft.raftLog.append(ents...) // 有多少个节点就记录多少个日志项 + rn.raft.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + rn.raft.applyConfChange(pb.ConfChangeV1{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2()) + } + return nil +} diff --git a/raft/over_interface.go b/raft/over_interface.go new file mode 100644 index 00000000000..0dec0d7ca53 --- /dev/null +++ b/raft/over_interface.go @@ -0,0 +1,68 @@ +package raft + +import ( + "context" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// RaftNodeInterFace raft 节点 +type RaftNodeInterFace interface { + Tick() // 触发一次Tick,会触发Node心跳或者选举 + Campaign(ctx context.Context) error // 主动触发一次选举 + Propose(ctx context.Context, data []byte) error // 使用者的data通过日志广播到所有节点,返回nil,不代表成功,因为是异步的 + ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error // 集群配置变更,[新增、删除节点] + Step(ctx context.Context, msg pb.Message) error // 处理msg的函数 + Ready() <-chan Ready // 已经commit,准备apply的数据通过这里通知 + Advance() // ready消息处理完后,发送一个通知消息;当处理完这些消息后,必须调用,不然raft会堵塞在这里 + ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState // 应用集群变化到状态机 + TransferLeadership(ctx context.Context, lead, transferee uint64) // 将Leader转给transferee. + Status() Status // 返回 raft state machine当前状态. + ReportSnapshot(id uint64, status SnapshotStatus) // 告诉leader状态机该id节点发送snapshot的最终处理状态 + ReadIndex(ctx context.Context, rctx []byte) error // follower当请求的索引不存在时,记录,当applied >= 该索引时,通过Ready()返回给使用者 + ReportUnreachable(id uint64) // 报告raft指定的节点上次发送没有成功 + Stop() // 关闭节点 +} + +// Ready raft 准备好需要使用者处理的数据 +type Ready struct { + // Ready引入的第一个概念就是"软状态",主要是节点运行状态,包括Leader是谁,自己是什么角色.该参数为nil代表软状态没有变化 + *SoftState + + // Ready引入的第二个概念局势"硬状态",主要是集群运行状态,包括任期、提交索引和Leader.如何 + // 更好理解软、硬状态?一句话,硬状态需要使用者持久化,而软状态不需要,就好像一个是内存 + // 值一个是硬盘值,这样就比较好理解了. + pb.HardState + + ReadStates []ReadState // 这个参数就是Node.ReadIndex()的结果回调. 携带了当前leader已经committed的索引,用于表示raft还活着 + + // 需要存入可靠存储的日志,还记得log那片文章里面提到的unstable么,这些日志就是从哪里获取的. + Entries []pb.Entry + + // 需要存入可靠存储的快照,它也是来自unstable + Snapshot pb.Snapshot + + // 已经提交的日志,用于使用者应用这些日志,需要注意的是,CommittedEntries可能与Entries有 + // 重叠的日志,这是因为Leader确认一半以上的节点接收就可以提交.而节点接收到新的提交索引的消息 + // 的时候,一些日志可能还存储在unstable中. + CommittedEntries []pb.Entry + + // Messages 日志被提交到稳定的存储.如果它包含一个MsgSnap消息,应用程序必须在收到快照或调用ReportSnapshot失败时向raft报告. + // Messages是需要发送给其他节点的消息,raft负责封装消息但不负责发送消息,消息发送需要使用者来实现 + Messages []pb.Message // 就是raft.msgs + + /// MustSync指示了硬状态和不可靠日志是否必须同步的写入磁盘还是异步写入,也就是使用者必须把 + // 数据同步到磁盘后才能调用Advance() + MustSync bool +} + +// appliedCursor 从Ready中提取客户端apply的最高索引 (一旦通过Advance确认Ready) . +func (rd Ready) appliedCursor() uint64 { + if n := len(rd.CommittedEntries); n > 0 { + return rd.CommittedEntries[n-1].Index + } + if index := rd.Snapshot.Metadata.Index; index > 0 { + return index + } + return 0 +} diff --git a/raft/over_log.go b/raft/over_log.go new file mode 100644 index 00000000000..da86c3b3246 --- /dev/null +++ b/raft/over_log.go @@ -0,0 +1,390 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// 快照 + storage + unstable +// +type raftLog struct { + // storage 存储已经持久化到 WAL 中的日志条目,unstable 存储未持久化的条目和快照,一旦持久化会及时删除日志条目,因此不存在过多内存占用的问题. + storage Storage // 最后存储数据 // 这里还是一个内存存储,用于保存自从最后一次snapshot之后提交的数据 + unstable unstable // 快照之后的数据 // 用于存储未写入Storage的快照数据及Entry记录 + committed uint64 // 己提交的位置,即己提交的Entry记录中最大的索引值. + // 而applied保存的是传入状态机中的最高index + // 即一条日志首先要提交成功(即committed),才能被applied到状态机中;因此以下不等式一直成立:applied <= committed + applied uint64 + logger Logger + maxNextEntsSize uint64 // 调用 nextEnts 时,返回的日志项集合的最大的大小 返回应用程序已经可以应用到状态机的日志项集合 +} + +// 追加日志. +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + // 日志与部分提交日志重叠,这种属于不能接受的情况 + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) 超出范围[committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict 对每一个日志查找冲突 +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("发现索引冲突[任期不一致] %d [existing term: %d, conflicting term: %d]", ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +// 从此索引开始,查找第一个任期小于LogTerm的日志索引 +func (l *raftLog) findConflictByTerm(index uint64, term uint64) uint64 { + // case 1 follower index:6 本地存储的 term:9leader认为的 返回 6 + // idx 1 2 3 4 5 6 7 8 9 10 11 12 + // ------------------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 + // case 2 follower index:12 本地存储的 term:9leader认为的 返回 12 + // idx 1 2 3 4 5 6 7 8 9 10 11 12 + // ------------------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 2 2 2 2 2 2 + // case 3 leader index:6 本地存储的 term:2follower的 返回 1 + // idx 1 2 3 4 5 6 7 8 9 10 11 12 + // ------------------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 + if li := l.lastIndex(); index > li { + l.logger.Warningf("index(%d) 超出范围 [0, lastIndex(%d)] in findConflictByTerm", index, li) + return index + } + for { + logTerm, err := l.term(index) // 2 + if logTerm <= term || err != nil { + break + } + index-- + } + return index +} + +// 获取不可靠日志,就是把unstable的所有日志输出,这个函数用于输出给使用者持久化 +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts 获取[applied+1: committed+1] 的所有日志 +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) + if err != nil { + l.logger.Panicf("在获取未应用的条目时出现意外错误 (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts 判断是否有可应用的日志 +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +// hasPendingSnapshot 判断是不是正在处理快照 +func (l *raftLog) hasPendingSnapshot() bool { + return l.unstable.snapshot != nil && !IsEmptySnap(*l.unstable.snapshot) +} + +// 获取快照 +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +// 获取快照之后的第一个索引 +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + // 有快照 + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) + } + return index +} + +// 获取最新的日志索引 +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) + } + return i +} + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +// 更新提交索引 +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +// 简单的l.term封装,0或正常的任期 +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("未知的 error (%v)", err) + return 0 +} + +// 构建新的日志条目 +// 刚刚创建的时候unstable里的snapshot和ents为空 +func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { + if storage == nil { + log.Panic("存储不能为空") + } + log := &raftLog{ // struct + storage: storage, // memory + logger: logger, + maxNextEntsSize: maxNextEntsSize, // 消息最大大小 + unstable: unstable{}, + } + // 使用者启动需要把持久化的快照以及日志存储在storage中,前面已经提到了,这个 + // storage类似于使用者持久化存储的cache. + + firstIndex, err := storage.FirstIndex() // 返回第一条数据的索引 + if err != nil { + panic(err) + } + lastIndex, err := storage.LastIndex() // 返回最后一条数据的索引 + if err != nil { + panic(err) + } + // 这个代码印证了前面提到了,当unstable没有不可靠日志的时候,unstable.offset的值就是 + // 未来的第一个不可靠日志的索引. + log.unstable.offset = lastIndex + 1 // 保存了尚未持久化的日志条目或快照 + log.unstable.logger = logger + // ------------------------------------- + // commit|apply storage + log.committed = firstIndex - 1 // 已经提交的了 + log.applied = firstIndex - 1 + + return log +} + +// OK +func (l *raftLog) String() string { + return fmt.Sprintf("----> 【committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d】", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// slice 获取lo到hi-1的所有日志,但总量限制在maxsize +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + // 日志有一部分落在storage中 + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { // 压缩了 + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("日志[%d:%d] 索引中的请求条目不可用", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // 检查ents是否达到大小限制 + // 如果从storage获取的日志数量比预期少;说明没那么多日志存在storage中;那也就没必要再找unstable了. + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + ents = storedEnts + } + + // 日志有一部分在unstable中. + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + combined := make([]pb.Entry, len(ents)+len(unstable)) + n := copy(combined, ents) + copy(combined[n:], unstable) + ents = combined + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// 范围检查 l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("无效的索引 %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +// 查看索引消息对应的任期 +func (l *raftLog) term(i uint64) (uint64, error) { + // 如果索引在raftLog记录的所有日志之外,那么只能返回0代表没找到 + // [快照之后的第一个索引, unstable最新索引] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + return 0, nil + } + // 在unstable中找一下 + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + // unstable中没有那就在storage找 + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + // 如果storage和unstable都没有,那也算是没找到.这是因为storage可能会压缩,比如把应用 + // 索引以前的日志删除,因为他们已经没用了,这样可以节省内存空间. + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) +} + +// isUpToDate Follower节点在接收到Candidate节点的选举请求之后,会通过比较Candidate节点的本地日志与自身本地日志的新旧程度,从而决定是否投票. +// raftLog提供了isUpToDat巳()方法用于比较日志的新旧程度. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +// 检测MsgApp消息的Index 字段及LogTerm字段是否合法 +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) // 查看索引消息对应的任期 + if err != nil { + return false + } + return t == term +} + +// 将日志commit到tocommit +func (l *raftLog) commitTo(tocommit uint64) { + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d)超过了 [lastIndex(%d)] raft log是否被损坏、截断或丢失?.?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) 不再范围内[prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +// 获取从索引值为i之后的所有日志,但是日志总量限制在maxsize +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// ------------------------------------------ entrance ---------------------------------------- + +// maybeAppend 当Follower节点或Candidate节点需要向raftLog 中追加Entry记录时,会通过raft.handleAppendEntriesO方法调用raftLog.maybeAppend +// m.Index: ents[0].Index, m.LogTerm: ents[0].Term, m.Commit:leader记录的本机点已经commit的日志索引 +// m.Entries... 真正的日志数据 +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { // 查看 index 的 term 与 logTerm 是否匹配· + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) // 查找ents中,index与term 冲突的位置. + switch { + case ci == 0: // 没有冲突 + case ci <= l.committed: // 如果冲突的位置在已提交的位置之前,有问题 + l.logger.Panicf("日志 %d 与已承诺的条目冲突 [committed(%d)]", ci, l.committed) + default: // 如果冲突位置是未提交的部分 + // [1,2] ----> [1,3,4] + // 本节点存在一些无效的数据,比leader多 + offset := index + 1 + // 则将ents中未发生冲突的部分追加到raftLog中 + // etcd 深入解析 图1-11 f + l.append(ents[ci-offset:]...) // 追加到unstable + } + l.commitTo(min(committed, lastnewi)) // committed:leader端发送过来的,认为本节点已committed的索引 + return lastnewi, true + } + return 0, false +} + +// 收到快照 +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] 开始恢复快照 [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} diff --git a/raft/over_log_storage.go b/raft/over_log_storage.go new file mode 100644 index 00000000000..ba65b56cccf --- /dev/null +++ b/raft/over_log_storage.go @@ -0,0 +1,255 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +var ErrCompacted = errors.New("由于压缩,请求的索引无法到达") + +var ErrSnapOutOfDate = errors.New("请求的索引比现有快照的老") + +var ErrUnavailable = errors.New("索引中的请求条目不可用") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage raft状态机 +type Storage interface { + // InitialState 使用者在构造raft时,需要传入初始状态,这些状态存储在可靠存储中,使用者需要通过Storage + // 告知raft.关于状态的定义不在本文导论范围,笔者会在其他文章中详细说明. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries 获取索引在[lo,hi)之间的日志,日志总量限制在maxSize + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term 获取日志索引为i的届.找不到的情况下error返回值不为空,其中当返回ErrCompacted表示传入的索引数据已经找不到, + // 说明已经被压缩成快照数据了;返回ErrUnavailable:表示传入的索引值大于当前的最大索引. + Term(index uint64) (uint64, error) + LastIndex() (uint64, error) // 返回最后一条日志的索引 + FirstIndex() (uint64, error) // 返回第一条日志的索引 + Snapshot() (pb.Snapshot, error) // 反回最近的快照数据 +} + +// MemoryStorage 大部分操作都需要加锁 +type MemoryStorage struct { + sync.Mutex + hardState pb.HardState // 状态信息(当前任期,当前节点投票给了谁,已提交的entry记录的位置) + snapshot pb.Snapshot // 当前内存里的快照信息 + // wal日志 内存存储 + ents []pb.Entry // snapshot之后的日志条目,第一条日志条目的index为snapshot.Metadata.Index 已经apply的日志项 +} + +// NewMemoryStorage 创建内存存储 +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // 当从头开始时,用一个假的条目来填充列表中的第零项. + ents: make([]pb.Entry, 1), + } +} + +// Entries 获取一定范围内的日志项 +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + getLogger().Panicf("日志 hi(%d)超出范围的最后一个索引(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + // 获取下标的数据 + ents := ms.ents[lo-offset : hi-offset] + // limitsize把超过大小的数据剔除 + return limitSize(ents, maxSize), nil +} + +// Term 获取指定索引日志的任期 +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// ApplySnapshot 更新快照数据,将snapshot实例保存到memorystorage中 +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + msIndex := ms.snapshot.Metadata.Index // 内存的 + snapIndex := snap.Metadata.Index // 文件系统的 + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot 创建新的快照 i是新建Snapshot包含的最大的索引值,cs是当前集群的状态,data是状态机里的快照数据 +// 更新快照信息 +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + // ents [a,b,c,d,e,f,g,h,i,j,k] + // i + // 前提条件: i 必须>= a日志的索引 + ms.Lock() + defer ms.Unlock() + // 新建立的快照是旧数据 + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index // snapshot之后的日志条目, apply之后的 + if i > ms.lastIndex() { // k 位置的索引 + getLogger().Panicf("快照 %d 超过了最新的日志(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term // 获取数组指定偏移位置的索引 + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// InitialState OK +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState OK +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Snapshot OK +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// LastIndex todo 返回ents最新entry的索引 +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +// todo 返回ents最新entry的索引 +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex todo +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +// 第一条日志索引,默认ents里有一个索引为0的EntryNormal +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Compact 新建Snapshot之后,一般会调用MemoryStorage.Compact()方法将MemoryStorage.ents中指定索引之前的Entry记录全部抛弃, +// 从而实现压缩MemoryStorage.ents 的目的,具体实现如下: [GC] +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index // ents记录中第一条日志的索引 + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { // ents记录中最新日志的索引 + getLogger().Panicf("压缩 %d 超出范围 lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + // 创建新的切片,用来存储compactIndex之后的Entry + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + // 将compactlndex之后的Entry拷贝到ents中,并更新MemoryStorage.ents 字段 + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + // 那么随着写请求增多,内存中保留的 Raft 日志条目会越来越多,如何防止 etcd 出现 OOM 呢?etcd 提供了快照和压缩功能来解决这个问题. + // 首先你可以通过调整 --snapshot-count 参数来控制生成快照的频率,其值默认是 100000(etcd v3.4.9,早期 etcd 版本是 10000), + // 也就是每 10 万个写请求触发一次快照生成操作.快照生成完之后,etcd 会通过压缩来删除旧的日志条目.那么是全部删除日志条目还是保留一小部分呢? + // 答案是保留一小部分 Raft 日志条目.数量由 DefaultSnapshotCatchUpEntries 参数控制,默认 5000,目前不支持自定义配置. + // 保留一小部分日志条目其实是为了帮助慢的 Follower 以较低的开销向 Leader 获取 Raft 日志条目,以尽快追上 Leader 进度. + // 若 raftLog 中不保留任何日志条目,就只能发送快照给慢的 Follower,这开销就非常大了. + return nil +} + +// Append 向快照添加数据 +// 确保日志项是 连续的且entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() // 当前第一个 + last := entries[0].Index + uint64(len(entries)) - 1 // 最后一个 + + if last < first { + return nil // entries切片中所有的Entry都已经过时,无须添加任何Entry + } + // first之前的Entry已经记入Snapshot中,不应该再记录到ents中,所以将这部分Entry截掉 + // entries[0] first entries[-1] + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + // 计算entries切片中第一条可用的Entry与first之间的差距 + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + // 保留MemoryStorage.ents中first~offset的部分,offset之后的部分被抛弃 + // 然后将待追加的Entry追加到MemoryStorage.ents中 + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + // 直接将待追加的日志记录(entries)追加到MemoryStorage中 + ms.ents = append(ms.ents, entries...) + default: + getLogger().Panicf("丢失日志项 [last: %d, append at: %d]", ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/raft/over_log_unstable.go b/raft/over_log_unstable.go new file mode 100644 index 00000000000..db5fa503526 --- /dev/null +++ b/raft/over_log_unstable.go @@ -0,0 +1,159 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/ls-2018/etcd_cn/raft/raftpb" + +// 使用内存数组维护其中所有的Entry记录,对于Leader节点而言,它维护了客户端请求对应的Entry记录; +// 对于Follower节点而言,它维护的是从Leader节点复制来的Entry记录. +// 无论是Leader节点还是Follower节点,对于刚刚接收到的Entry记录首先都会被存储在unstable中. +// 然后按照Raft协议将unstable中缓存的这些Entry记录交给上层模块进行处理,上层模块会将这些Entry记录发送到集群其他节点或进行保存(写入Storage中). +// 之后,上层模块会调用Advance()方法通知底层的raft模块将unstable 中对应的Entry记录删除(因为己经保存到了Storage中) +// +type unstable struct { + snapshot *pb.Snapshot // 快照数据,该快照数据也是未写入Storage中的. + entries []pb.Entry // 用于保存未写入Storage中的Entry记录.刚生成的日志,没确认的 + offset uint64 // entries数组中的第一条数据在raft日志中的索引 + logger Logger +} + +// maybeFirstIndex 返回unstable数据的第一条数据索引 +// 因为只有快照数据在最前面,因此这个函数只有当快照数据存在的时候才能拿到第一条数据索引,其他的情况下已经拿不到了. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex 尝试获取unstable 的最后一条Entry记录的索引值 +// 返回最后一条数据的索引.因为是entries数据在后,而快照数据在前,所以取最后一条数据索引是从entries开始查,查不到的情况下才查快照数据. +func (u *unstable) maybeLastIndex() (uint64, bool) { + // 如果日志数组中有日志条目,那就返回最后一个条目的索引. + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm 尝试获取指定Entry记录的Term值,根据条件查找指定的Entry记录的位置. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + // 打完快照之后,之前日志的数据就不保存了,包括任期、索引等等 + if i < u.offset { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + // 如果比最大日志索引还大,超出处理范围也只能返回失败. + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + + return u.entries[i-u.offset].Term, true +} + +// shrinkEntriesArray 释放数组无用空间 +func (u *unstable) shrinkEntriesArray() { + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + // 重新创建切片,复制原有切片中的数据,重直entries字段 + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +// 这个函数是接收到leader发来的快照后调用的,暂时存入unstable等待使用者持久化. +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +// 截断和追加 +// 本节点存在一些无效的数据,比leader多 +// 存储不可靠日志,这个函数是leader发来追加日志消息的时候触发调用的,raft先把这些日志存储在 +// unstable中等待使用者持久化.为什么是追加?因为日志是有序的,leader发来的日志一般是该节点 +// 紧随其后的日志亦或是有些重叠的日志,看似像是一直追加一样. +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + // 刚好接在当前日志的后面,理想中的追加 + case after == u.offset+uint64(len(u.entries)): + u.entries = append(u.entries, ents...) + // 这种情况存储可靠存储的日志还没有被提交,此时新的leader不在认可这些日志,所以替换追加 + case after <= u.offset: + u.logger.Infof("直接用待追加的Entry记录替换当前的entries字段,并支新offset %d", after) + u.offset = after + u.entries = ents + default: + // 有重叠的日志,那就用最新的日志覆盖老日志,覆盖追加 + u.logger.Infof("截断在after之后数据 %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +// 截取(lo,hi]的日志 +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// 范围检查 u.offset <= lo <= hi <= u.offset+len(u.entries) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("无效的切片 %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) 超出范围 [%d,%d]", lo, hi, u.offset, upper) + } +} + +// 这个函数是在使用者持久化不可靠日志后触发的调用,可靠的日志索引已经到达了i. +func (u *unstable) stableTo(i, t uint64) { + // i:要持久化的日志索引; t 当前任期 + // 查找指定Entry记录的Term佳,若查找失败则表示对应的Entry不在unstable中,直接返回 + gt, ok := u.maybeTerm(i) + if !ok { + return + } + if gt == t && i >= u.offset { + // 指定索引位之前的Entry记录都已经完成持久化,则将其之前的全部Entry记录删除 + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + // 随着多次追加日志和截断日志的操作unstable.entires底层的数组会越来越大, + // shrinkEntriesArray方法会在底层数组长度超过实际占用的两倍时,对底层数据进行缩减 + u.shrinkEntriesArray() + } +} + +// 这个函数是快照持久完成后触发的调用 +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} diff --git a/raft/over_logger.go b/raft/over_logger.go new file mode 100644 index 00000000000..3f4be2dbd7c --- /dev/null +++ b/raft/over_logger.go @@ -0,0 +1,135 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + "os" + "sync" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + Info(v ...interface{}) + Infof(format string, v ...interface{}) + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { + raftLoggerMu.Lock() + raftLogger = l + raftLoggerMu.Unlock() +} + +func ResetDefaultLogger() { + SetLogger(defaultLogger) +} + +func getLogger() Logger { + raftLoggerMu.Lock() + defer raftLoggerMu.Unlock() + return raftLogger +} + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + raftLoggerMu sync.Mutex + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/raft/over_raft.go b/raft/over_raft.go new file mode 100644 index 00000000000..8456ea179dd --- /dev/null +++ b/raft/over_raft.go @@ -0,0 +1,792 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + "github.com/ls-2018/etcd_cn/raft/quorum" + + "github.com/ls-2018/etcd_cn/raft/confchange" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// None 是一个占位的节点ID,在没有leader时使用. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// 状态类型 +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate +) + +type ReadOnlyOption int + +const ( + ReadOnlySafe ReadOnlyOption = iota + ReadOnlyLeaseBased + // 1、 ReadOnlySafe + // 该线性读模式,每次 Follower 进行读请求时,需要和 Leader 同步日志提交位点信息,而 Leader需要向过半的 Follower 发起证明自己是 Leader 的轻量的 RPC 请求, + // 相当于一个 Follower 读,至少需要 1 +(n/2)+ 1 次的 RPC 请求. + // 2、ReadOnlyLeaseBased + // 该线性读模式,每次 Follower 进行读请求时, Leader 只需要判断自己的 Leader 租约是否过期了,如果没有过期,直接可以回复 Follower 自己是 Leader + // 但是该机制对于机器时钟要求很严格,如果有做时钟同步的话,可以考虑使用该线性读模式. + // 如果说对于配置的发布、修改操作比较频繁,可以将 Raft 快照的时间适当的进行调整,避免新节点加入或者节点重启时,由于 Raft 日志回放操作数太多导致节点可开始对外服务的时间过长. +) + +const ( + campaignPreElection CampaignType = "CampaignPreElection" // 竞选类型: pre-vote模式 + campaignElection CampaignType = "CampaignElection" // 竞选类型:vote模式 + campaignTransfer CampaignType = "CampaignTransfer" // 竞选类型:leader开始转移 +) + +// ErrProposalDropped 在某些情况下提案被忽略时返回,以便提案者可以得到通知并快速失败. +var ErrProposalDropped = errors.New("撤销raft提案") + +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType 竞选类型 +type CampaignType string + +// 封装了当前节点所有的核心数据. +type raft struct { + id uint64 // 是本节点raft的身份 + Term uint64 // 任期.如果Message的Term字段为0,则表示该消息是本地消息,例如,MsgHup、 MsgProp、 MsgReadlndex 等消息,都属于本地消息. + Vote uint64 // 当前任期中当前节点将选票投给了哪个节点 + raftLog *raftLog // 当前节点的log状态信息 + maxMsgSize uint64 // 每次发送消息的最大大小[多条日志] + maxUncommittedSize uint64 // 每条日志最大消息体 + prstrack tracker.ProgressTracker // 跟踪Follower节点的状态,比如日志复制的matchIndex + state StateType // 当前节点的状态可选值分为StateFollower、StateCandidate、 StateLeader和StatePreCandidat巳四种状态. + isLearner bool // 本节点是不是learner角色 + msgs []pb.Message // 缓存了当前节点等待发送的消息. + lead uint64 // 当前leaderID + leadTransferee uint64 // leader转移到的节点ID + pendingConfIndex uint64 // 记录配置变更日志的索引,可能该索引已经被apply + uncommittedSize uint64 // 还未提交的日志s的大小,非准确值 + checkQuorum bool // 检查需要维持的选票数,一旦小于,就会丢失leader + preVote bool // PreVote 是否启用PreVote + electionElapsed int // 选举计时器的指针,其单位是逻辑时钟的刻度,逻辑时钟每推进一次,该字段值就会增加1. + heartbeatElapsed int // 心跳计时器的指针,其单位也是逻辑时钟的刻度,逻辑时钟每推进一次,该字段值就会增加1 . + heartbeatTimeout int // 心跳间隔 上限 heartbeatTimeout是当前距离上次心跳的时间 + electionTimeout int // 选举超时时间,当electionE!apsed 宇段值到达该值时,就会触发新一轮的选举. + randomizedElectionTimeout int // 随机选举超时 + disableProposalForwarding bool // 禁止将请求转发到leader,默认FALSE + tick func() // 逻辑计数器推进函数, 由 r.ticker = time.NewTicker(r.heartbeat) ;触发该函数的执行 r.start + step stepFunc // 阶段函数、在那个角色就执行那个角色的函数、处理接收到的消息 + logger Logger + // pendingReadIndexMessages is used to store messages of type MsgReadIndex + // that can't be answered as new leader didn't committed any log in + // current term. Those will be handled as fast as first log is committed in + // current term. + pendingReadIndexMessages []pb.Message + readStates []ReadState // leader会直接往这里存储; follower 转发MsgReadIndex至leader 的响应 + readOnly *readOnly +} + +// 通知RawNode 应用程序已经应用并保存了最后一个Ready结果的进度. +func (r *raft) advance(rd Ready) { + // 此时这些数据,应用到了wal,与应用程序状态机 + r.reduceUncommittedSize(rd.CommittedEntries) // 日志committed以后应该从这里扣除 + + // 如果应用了条目(或快照),则将游标更新为下一个Ready.请注意,如果当前的HardState包含一个新的Commit索引, + // 这并不意味着我们也应用了所有由于按大小提交分页而产生的新条目. + if newApplied := rd.appliedCursor(); newApplied > 0 { + oldApplied := r.raftLog.applied + r.raftLog.appliedTo(newApplied) + + if r.prstrack.Config.AutoLeave && oldApplied <= r.pendingConfIndex && newApplied >= r.pendingConfIndex && r.state == StateLeader { + // 如果当前配置(和最近的配置,至少在这个领导人的任期内)应该是自动离开的,现在启动它.我们使用一个空的数据, + // 它分解成一个空的ConfChangeV2,并且有一个好处,即appendEntry永远不会根据它的大小(寄存器为零)拒绝它. + ent := pb.Entry{ + Type: pb.EntryConfChangeV2, + Data: nil, + } + // 这个建议是不可能被拒绝的. + if !r.appendEntry(ent) { + panic("拒绝不可拒绝的 auto-leaving ConfChangeV2") + } + r.pendingConfIndex = r.raftLog.lastIndex() + r.logger.Infof("启动自动过渡,脱离joint配置 %s", r.prstrack.Config) + } + } + // 让unstable 更新数据 + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + r.raftLog.unstable.stableTo(e.Index, e.Term) + } + // 更新快照数据 + if !IsEmptySnap(rd.Snapshot) { + r.raftLog.unstable.stableSnapTo(rd.Snapshot.Metadata.Index) + } +} + +// 检测是否有未应用的EntryConfChange记录 +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 { + n++ + } + } + return n +} + +// 判断本节点是不是重新选举,因为丢失了leader +func (r *raft) pastElectionTimeout() bool { + // 选举过期计数(electionElapsed):主要用于follower来判断leader是不是正常工作, + // 当follower接受到leader的心跳的时候会把electionElapsed的时候就会置为0,electionElapsed的相加是通过外部调用实现的, + // node对外提供一个tick的接口,需要外部定时去调用,调用的周期由外部决定,每次调用就++, + // 然后检查是否会超时,上方的tickElection就是为follower状态的定时调用函数,leader状态的定时调用函数就是向follower发送心跳. + // 计时次数 超过了 限定的 选举次数, 规定:在randomizedElectionTimeout次数内必须收到来自leader的消息 + return r.electionElapsed >= r.randomizedElectionTimeout +} + +// 设置随机选举超时 +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) // 随机选举超时 +} + +// OK +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +// OK +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, // 给谁投了票 + Commit: r.raftLog.committed, + } +} + +// send 将状态持久化到一个稳定的存储中,之后再发送消息 +func (r *raft) send(m pb.Message) { + if m.From == None { + m.From = r.id + } + // 数据校验,选举类消息必须带term属性 + // 竞选投票相关的消息类型,必须设置term + if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp { + if m.Term == 0 { + panic(fmt.Sprintf("任期应该被设置%s", m.Type)) + } + } else { + // 其它类消息不能带term属性 + if m.Term != 0 { + panic(fmt.Sprintf("任期不能被设置,当 %s (was %d)", m.Type, m.Term)) + } + // 除了MsgProp和MsgReadIndex消息外,设置term为raft当前周期 + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + + r.msgs = append(r.msgs, m) // 将消息放入队列 写 +} + +// StateType 节点在集群中的状态 +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config 启动raft的配置参数 +type Config struct { + ID uint64 // ID 是本节点raft的身份.ID不能为0. + ElectionTick int // 返回选举权检查对应多少次tick触发次数 + HeartbeatTick int // 返回心跳检查对应多少次tick触发次数 + Storage Storage // Storage 存储 日志项、状态 + Applied uint64 // 提交到用户状态机的索引,起始为0 + MaxSizePerMsg uint64 // 每条消息的最大大小:math.MaxUint64表示无限制,0表示每条消息最多一个条目. 1M + MaxCommittedSizePerReady uint64 // 限制 commited --> apply 之间的数量 MaxSizePerMsg 它们之前是同一个参数 + MaxUncommittedEntriesSize uint64 // 未提交的日志项上限 + MaxInflightMsgs int // 最大的处理中的消息数量 + CheckQuorum bool // CheckQuorum 检查需要维持的选票数,一旦小于,就会丢失leader + PreVote bool // PreVote 防止分区服务器[term会很大]重新加入集群时出现中断 是否启用PreVote + ReadOnlyOption ReadOnlyOption // 必须是enabled if ReadOnlyOption is ReadOnlyLeaseBased. + DisableProposalForwarding bool // 禁止将请求转发到leader,默认FALSE + Logger Logger +} + +// OK +func (c *Config) validate() error { + if c.ID == None { + return errors.New("补鞥呢使用None作为ID") + } + // 返回心跳检查对应多少次tick触发次数 + if c.HeartbeatTick <= 0 { // + return errors.New("心跳间隔必须是>0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("选举超时必须是大于心跳间隔") + } + + if c.Storage == nil { + return errors.New("不能没有存储") + } + + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + + // 它们之前是同一个参数. + if c.MaxCommittedSizePerReady == 0 { + c.MaxCommittedSizePerReady = c.MaxSizePerMsg + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages必须是>0") + } + + if c.Logger == nil { + c.Logger = getLogger() + } + // 作为leader时的检查 + if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum { + return errors.New("如果ReadOnlyOption 是 ReadOnlyLeaseBased 的时候必须开启CheckQuorum") + } + + return nil +} + +// ok +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) // ✅ + // 搜 s = raft.NewMemoryStorage() + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) + } + + r := &raft{ + id: c.ID, // 是本节点raft的身份 + lead: None, // 当前leaderID + isLearner: false, // 本节点是不是learner角色 + raftLog: raftlog, // 当前节点的log状态信息 + maxMsgSize: c.MaxSizePerMsg, // 每条消息的最大大小 + maxUncommittedSize: c.MaxUncommittedEntriesSize, // 每条日志最大消息体 + prstrack: tracker.MakeProgressTracker(c.MaxInflightMsgs), + electionTimeout: c.ElectionTick, // 返回选举权检查对应多少次tick触发次数 + heartbeatTimeout: c.HeartbeatTick, // 返回心跳检查对应多少次tick触发次数 + logger: c.Logger, + checkQuorum: c.CheckQuorum, // 检查需要维持的选票数,一旦小于,就会丢失leader + preVote: c.PreVote, // PreVote 是否启用PreVote + readOnly: newReadOnly(c.ReadOnlyOption), // etcd/etcdserver/over_raft.go:469 默认值0 ReadOnlySafe + disableProposalForwarding: c.DisableProposalForwarding, // 禁止将请求转发到leader,默认FALSE + } + // todo 没看懂 + // ----------------------- + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prstrack, + LastIndex: raftlog.lastIndex(), + }, cs) + if err != nil { + panic(err) + } + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) // 判断相不相等 + // ----------------------- + // 根据从Storage中获取的HardState,初始化raftLog.committed字段,以及raft.Term和Vote字段 + if !IsEmptyHardState(hs) { // 判断初始状态是不是空的 + r.loadState(hs) // 更新状态索引信息 + } + // 如采Config中己置了Applied,则将raftLog.applied字段重直为指定的Applied值上层模块自己的控制正确的己应用位置时使用该配置 + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) // ✅ + } + r.becomeFollower(r.Term, None) // ✅ start + + var nodesStrs []string + for _, n := range r.prstrack.VoterNodes() { // 一开始没有 + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("【newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]】", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +// 向指定的节点发送信息 +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + commit := min(r.prstrack.Progress[to].Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, // leader会为每个Follower都维护一个leaderCommit,表示leader认为Follower已经提交的日志条目索引值 + Context: ctx, + } + r.send(m) +} + +// bcastAppend 向集群中其他节点广播MsgApp消息 +func (r *raft) bcastAppend() { + // 遍历所有节点,给除自己外的节点发送日志Append消息 + r.prstrack.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendAppend(id) + }) +} + +// OK 向集群中特定节点发送MsgApp消息 +func (r *raft) sendAppend(to uint64) { + r.maybeSendAppend(to, true) +} + +// maybeSendAppend 向给定的peer发送一个带有新条目的追加RPC.如果有消息被发送,返回true. +// sendIfEmpty参数控制是否发送没有条目的消息("空 "消息对于传达更新的Commit索引很有用,但当我们批量发送多条消息时就不可取). +func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { + // 在消息发送之前会检测当前节点的状态,然后查找待发迭的Entry记录并封装成MsgApp消息, + // 之后根据对应节点的Progress.State值决定发送消息之后的操作 + + // 1. 获取对端节点当前同步进度 + pr := r.prstrack.Progress[to] + if pr.IsPaused() { + return false + } + m := pb.Message{} + m.To = to + // 2. 注意这里带的term是本次发送给follower的第一条日志条目的term + term, errt := r.raftLog.term(pr.Next - 1) // leader认为 follower所在的任期 + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) // 要发给follower的日志 + if len(ents) == 0 && !sendIfEmpty { + // 这种情况就不发了 + return false + } + + if errt != nil || erre != nil { + // 3. 如果获取term或日志失败,说明follower落后太多,raftLog内存中日志已经做过快照后被删除了 + // 根据日志进度去取日志条目的时候发现,follower日志落后太多,这通常出现在新节点刚加入或者网络连接出现故障的情况下. + // 那么在这种情况下,leader改为发送最近一次快照给Follower,从而提高同步效率 + + if !pr.RecentActive { + r.logger.Debugf("忽略向%x发送快照,因为它最近没有活动.", to) + return false + } + // 4. 改为发送Snapshot消息 + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x 由于快照暂时不可用,未能向%x发送快照.", r.id, to) + return false + } + panic(err) + } + if IsEmptySnap(snapshot) { + panic("需要一个非空快照") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] 发送快照[index: %d, term: %d] to %x [%s]", r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.BecomeSnapshot(sindex) // 变成发送快照的状态 + r.logger.Debugf("%x 暂停发送复制信息到 %x [%s]", r.id, to, pr) + } else { + // 5. 发送Append消息 + m.Type = pb.MsgApp // 设置消息类型 + m.Index = pr.Next - 1 // 设置MsgApp消息的Index字段 + m.LogTerm = term // 设置MsgApp消息的LogTerm字段 + m.Entries = ents // 设置消息携带的Entry记录集合 + m.Commit = r.raftLog.committed // 设置消息的Commit字段,即当前节点的raftLog中最后一条已提交的记录索引值 + // 6. 每次发送日志或心跳都会带上最新的commitIndex + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // 在StateReplicate中,乐观地增加 + case tracker.StateReplicate: + last := m.Entries[n-1].Index + pr.OptimisticUpdate(last) // 新目标节点对应的Next值(这里不会更新Match) + pr.Inflights.Add(last) // 记录已发送但是未收到响应的消息 + case tracker.StateProbe: + // 消息发送后,就将Progress.Paused字段设置成true,暂停后续消息的发送 + pr.ProbeSent = true + default: + r.logger.Panicf("%x 在未知的状态下发送%s", r.id, pr.State) + } + } + } + // 7. 发送消息 + r.send(m) + return true +} + +// 取消 先前的领导权移交 +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +// tickHeartbeat 只在leader执行,在r.heartbeatTimeout之后发送一个MsgBeat. +func (r *raft) tickHeartbeat() { + // 每次tick计时器触发,会调用这个函数 + r.heartbeatElapsed++ + r.electionElapsed++ + if r.electionElapsed >= r.electionTimeout { // 如果选举计时超时 + r.electionElapsed = 0 // 重置计时器 + if r.checkQuorum { // 给自己发送一条 MsgCheckQuorum 消息,检测是否出现网络隔离 + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // leader状态下; 重置leadTransferee + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +// 所提交的state 必须在 [r.raftLog.committed,r.raftLog.lastIndex()]之间 +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d 不再指定范围内 [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote // 当前节点的选票投给了谁做我Leader +} + +// maybeCommit 尝试更新本机committed索引 +func (r *raft) maybeCommit() bool { + // 在所有的follower中 获取最大的公共的超过半数确认的index + mci := r.prstrack.Committed() + // 更新本机commitIndex + return r.raftLog.maybeCommit(mci, r.Term) +} + +// 角色变更时,都会调用此函数 +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None // 当前任期中当前节点将选票投给了哪个节点 + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() // 设置随机选举超时 + r.abortLeaderTransfer() // 置空 leader转移目标 + + r.prstrack.ResetVotes() // 将每个节点的记录信息 重置 + // 重直prs, 其中每个Progress中的Next设置为raftLog.lastindex + r.prstrack.Visit(func(id uint64, pr *tracker.Progress) { + *pr = tracker.Progress{ + Match: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.prstrack.MaxInflight), + IsLearner: pr.IsLearner, + } + if id == r.id { + pr.Match = r.raftLog.lastIndex() // 对应Follower节点当前己经成功复制的Entry记录的索引值,不知有没有同步大多数节点 + } + }) + + r.pendingConfIndex = 0 + r.uncommittedSize = 0 + r.readOnly = newReadOnly(r.readOnly.option) // 只读请求的相关摄者 +} + +// 通过减少记录未提交的条目大小 来处理新提交的条目. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + // 日志committed以后应该从这里扣除 + if r.uncommittedSize == 0 { + // follower的快速路径,他们不跟踪或执行限制. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + if s > r.uncommittedSize { + // uncommittedSize可能会低估未提交的Raft日志的大小,但永远不会高估它. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + +// 判断未提交的日志条目是不是超过限制,是的话拒绝并返回失败 +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + + if r.uncommittedSize > 0 && s > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + return false + } + r.uncommittedSize += s + return true +} + +// 日志新增, 加日志放入unstable 没有落盘 +func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { + // 1. 获取raft节点当前最后一条日志条目的index + li := r.raftLog.lastIndex() + // 2. 给新的日志条目设置term和index + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + // 3. 判断未提交的日志条目是不是超过限制,是的话拒绝并返回失败 + // etcd限制了leader上最多有多少未提交的条目,防止因为leader和follower之间出现网络问题时,导致条目一直累积. + if !r.increaseUncommittedSize(es) { + r.logger.Debugf("%x 在日志中添加新条目将超过未提交的条目大小限制;下降的建议", r.id) + return false + } + // 4. 将日志条目追加到raftLog中 + // 将日志条目追加到raftLog内存队列中,并且返回最大一条日志的index,对于leader追加日志的情况,这里返回的li肯定等于方法第1行中获取的li + li = r.raftLog.append(es...) + // 5. 检查并更新日志进度 + // raft的leader节点保存了所有节点的日志同步进度,这里面也包括它自己 + r.prstrack.Progress[r.id].MaybeUpdate(li) + // 6. 判断是否做一次commit + r.maybeCommit() + return true +} + +// 开启竞选条件判断 +func (r *raft) hup(t CampaignType) { + if r.state == StateLeader { + r.logger.Debugf("%x忽略MsgHup消息,因为已经是leader了", r.id) + return + } + + if !r.roleUp() { + r.logger.Warningf("%x角色不可以提升,不能参与竞选", r.id) + return + } + + // 获取raftLog中已提交但未apply( lip applied~committed) 的Entry记录 + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("获取没有apply日志时出现错误(%v)", err) + } + + // 检测是否有未应用的EntryConfChange记录,如果有就放弃发起选举的机会 + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x不能参与竞选在任期 %d 因为还有 %d 应用配置要更改 ", r.id, r.Term, n) + return + } + // 核对完成,开始选举 + r.logger.Infof("%x开启新的任期在任期%d", r.id, r.Term) + r.campaign(t) +} + +// campaign 开始竞选 +func (r *raft) campaign(t CampaignType) { // CampaignTransfer + if !r.roleUp() { + r.logger.Warningf("%x is 无法推动;不应该调用 campaign()", r.id) + } + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { // pre-vote模式 + r.becomePreCandidate() // 变成预竞选者角色,更新状态、step、 但不增加任期 + voteMsg = pb.MsgPreVote + // 在增加r.Term之前,将本节点打算增加到的任期数通过rpc发送出去 + term = r.Term + 1 + } else { + r.becomeCandidate() // 变成竞选者角色,更新状态、step、任期加1 + voteMsg = pb.MsgVote + term = r.Term + } + // 自己给自己投票 + // pre-vote 那么Votes会置空 + // 单机 : 那么此时给自己投一票,res是VoteWon + // 多机:此时是VotePending + // vote 直接给自己投票 + // 单机 : 那么此时给自己投一票,res是VoteWon + // 多机:此时是VotePending + // 如果是leader转移的话,不会发送vote消息 + _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true) // 获取自己的竞选结果 + if res == quorum.VoteWon { + // 我们在为自己投票后赢得了选举(这肯定意味着 这是一个单一的本地节点集群).推进到下一个状态. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + // VotePending VoteLost 两种情况 + // VoteLost + var ids []uint64 + // 给节点排序 + { + idMap := r.prstrack.Voters.IDs() + ids = make([]uint64, 0, len(idMap)) + for id := range idMap { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + } + + for _, id := range ids { + if id == r.id { + // 不给自己投票 + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] 发送 %s 请求到 %x在任期 %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { // leader开始转移 + ctx = []byte(t) + } + // leader转移 follower 发送申请投票消息,但是任期不会增加, context 是CampaignTransfer + // 给所有节点发消息 + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + _ = r.Step + } +} + +// 返回 "节点ID" 的投票响应类型,true +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) { + if v { + r.logger.Infof("%x 收到 %s 从 %x 在任期 %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x 收到 %s 拒绝消息从 %x 在任期 %d", r.id, t, id, r.Term) + } + r.prstrack.RecordVote(id, v) // 记录投票结果 + return r.prstrack.TallyVotes() // 竞选情况 +} + +// 处理leader发送来的心跳信息 【follower、Candidate】 +func (r *raft) handleHeartbeat(m pb.Message) { + // 把msg中的commit提交,commit是只增不减的 + r.raftLog.commitTo(m.Commit) // leader commit 了,follower再commit + // 发送Response给Leader 按照raft协议的要求带上自己日志的进度. + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +// 非leader角色的 tick函数, 每次逻辑计时器触发就会调用 +func (r *raft) tickElection() { + r.electionElapsed++ // 收到MsgBeat消息时会重置其选举计时器,从而防止节点发起新一轮选举. + // roleUp返回是否可以被提升为leader + // pastElectionTimeout检测当前的候选超时间是否过期 + if r.roleUp() && r.pastElectionTimeout() { + // 自己可以被promote & election timeout 超时了,规定时间没有听到心跳发起选举;发送MsgHup// 选举超时 + r.electionElapsed = 0 // 避免两次计时器触发,仍然走这里 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) // 让自己选举 + } +} + +// 处理日志 +func (r *raft) handleAppendEntries(m pb.Message) { + // 在leader在发消息时,也会将消息写入本地日志文件中,不会等待follower确认 + // 判断是否是过时的消息; 日志索引 小于本地已经commit的消息 + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + // 会进行一致性检查;尝试将消息携带的Entry记录追加到raftLog中 + // m.Index:携带的日志的最小日志索引, m.LogTerm:携带的第一条日志任期, m.Commit:leader记录的本机点已经commit的日志索引 + // m.Entries... 真正的日志数据 + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + // 返回收到的最后一条日志的索引,这样Leader节点就可以根据此值更新其对应的Next和Match值 + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + // 收到的日志索引任期不满足以下条件:任期一样,日志索引比lastIndex大1 + + // 上面的maybeAppend() 方法只会将日志存储到RaftLog维护的内存队列中, + // 日志的持久化是异步进行的,这个和Leader节点的存储WAL逻辑基本相同. + // 有一点区别就是follower节点正式发送MsgAppResp消息会在wal保存成功后 + // 而leader节点是先发送消息,后保存的wal. + + // idx 1 2 3 4 5 6 7 8 9 + // ----------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 + // extern 当flower多一些未commit数据时, Leader是如何精准地找到每个Follower 与其日志条目首个不一致的那个槽位的呢 + // Follower 将之后的删除,重新同步leader之后的数据 + // 如采追加记录失败,则将失/败信息返回给Leader节点(即MsgAppResp 消息的Reject字段为true), + // 同时返回的还有一些提示信息(RejectHint字段保存了当前节点raftLog中最后一条记录的索引) + + index, err := r.raftLog.term(m.Index) // 判断leader传过来的index在本地是否有存储 + r.logger.Debugf("%x [logterm: %d, index: %d]拒绝消息MsgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(index, err), m.Index, m.LogTerm, m.Index, m.From) + // 向leader返回一个关于两个日志可能出现分歧关于 index 和 term 的提示. + // if m.LogTerm >= term && m.Index >= index 可以跳过一些follower拥有的未提交数据 + hintIndex := min(m.Index, r.raftLog.lastIndex()) // 发来的消息最小索引与当前最新消息, 一般来说后者会比较小,6 + hintIndex = r.raftLog.findConflictByTerm(hintIndex, m.LogTerm) // 核心逻辑 + hintTerm, err := r.raftLog.term(hintIndex) + if err != nil { + panic(fmt.Sprintf("term(%d)必须是valid, but got %v", hintIndex, err)) + } + r.send(pb.Message{ + To: m.From, + Type: pb.MsgAppResp, + Index: m.Index, + Reject: true, + RejectHint: hintIndex, + LogTerm: hintTerm, + }) + } +} + +// 判断提交的日志任期与当前任期 是否一致 +func (r *raft) committedEntryInCurrentTerm() bool { + // 获取最新提交的日志所对应的任期 == 当前的任期 + return r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) == r.Term +} + +// 发送超时消息 +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) // 给指定节点发消息 +} diff --git a/raft/over_read_only.go b/raft/over_read_only.go new file mode 100644 index 00000000000..b46966f9ba2 --- /dev/null +++ b/raft/over_read_only.go @@ -0,0 +1,155 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, it's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +// 这个参数就是Node.ReadIndex()的结果回调. +type ReadState struct { + Index uint64 // leader 节点已经committed的索引 + RequestCtx []byte // 递增ID +} + +type readIndexStatus struct { + req pb.Message // 记录了对应的MsgReadIndex请求 + index uint64 // 该MsgReadIndex请求到达时,对应的已提交位置 + // NB: this never records 'false', but it's more convenient to use this + // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If + // this becomes performance sensitive enough (doubtful), quorum.VoteResult + // can change to an API that is closer to that of CommittedIndex. + acks map[uint64]bool // 记录了该MsgReadIndex相关的MsgHeartbeatResp响应的信息 +} + +type readOnly struct { + option ReadOnlyOption // 当前只读请求的处理模式,ReadOnlySafe ReadOnlyOpt 和 ReadOnlyLeaseBased两种模式 + /* + 在etcd服务端收到MsgReadIndex消息时,会为其创建一个唯一的消息ID,并作为MsgReadIndex消息的第一条Entry记录. + 在pendingReadIndex维护了消息ID与对应请求readIndexStatus实例的映射 + */ + pendingReadIndex map[string]*readIndexStatus // MsgReadIndex请求对应的消息ID -->readIndexStatus + readIndexQueue []string // 记录了MsgReadIndex请求对应的消息ID,这样可以保证MsgReadIndex的顺序 +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +/* +1.获取消息ID,在ReadIndex消息的第一个记录中记录了消息ID +2.判断该消息是否已经记录在pendingReadIndex中,如果已存在则直接返回 +3.如果不存在,则维护到pendingReadIndex中,index是当前Leader已提交的位置,m是请求的消息 +4.并将消息ID追加到readIndexQueue队列中 +*/ +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + // index 当前节点的committed索引, m MsgReadIndex消息 【自增🆔】 + s := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[s]; ok { + return + } + // 记录发送的索引值, + ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)} + ro.readIndexQueue = append(ro.readIndexQueue, s) +} + +/* +recvAck通知readonly结构,即raft状态机接受了对MsgReadIndex上下文附加的心跳的确认. +1.消息的Context即消息ID,根据消息id获取对应的readIndexStatus +2.如果获取不到则返回0 +3.记录了该Follower节点返回的MsgHeartbeatResp响应的信息 +4.返回Follower响应的数量 +*/ +func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool { + rs, ok := ro.pendingReadIndex[string(context)] + if !ok { + return nil + } + rs.acks[id] = true + return rs.acks +} + +/* +1.遍历readIndexQueue队列,如果能找到该消息的Context,则返回该消息及之前的所有记录rss,并删除readIndexQueue队列和pendingReadIndex中对应的记录 +2.如果没有Context对应的消息ID,则返回nil +*/ +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + var rss []*readIndexStatus + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("无法从挂起的映射中找到相应的读状态") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx 返回MsgReadIndex记录中最后一个消息ID +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} + +// bcastHeartbeat 发送RPC,没有日志给所有对等体. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + r.prstrack.Visit(func(id uint64, _ *tracker.Progress) { + if id == r.id { + return + } + r.sendHeartbeat(id, ctx) + }) +} diff --git a/raft/over_snapshot.go b/raft/over_snapshot.go new file mode 100644 index 00000000000..207da705da7 --- /dev/null +++ b/raft/over_snapshot.go @@ -0,0 +1,91 @@ +package raft + +import ( + "fmt" + + "github.com/ls-2018/etcd_cn/raft/confchange" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// follower、candidate 都有可能会接受到快照消息; candidate 会变成follower +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] 重置快照 [index: %d, term: %d]", r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] 忽略快照 [index: %d, term: %d]", r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore 从一个快照中恢复状态机.它恢复了日志和状态机的配置.如果这个方法返回false,说明快照被忽略了,因为它已经过时了,或者是由于一个错误. +func (r *raft) restore(s pb.Snapshot) bool { + // snapshot的index比自身committed要小,说明已有这些数据,返回false + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.state != StateFollower { // 在收到快照消息时,成为了leader + // 这是深度防御:如果领导者以某种方式结束了应用快照,它可以进入一个新的任期而不进入跟随者状态. + // 这应该永远不会发生,但如果它发生了,我们会通过提前返回来防止损害,所以只记录一个响亮的警告. + // 在写这篇文章的时候,当这个方法被调用时,实例被保证处于跟随者状态. + r.logger.Warningf("%x 试图将快照恢复为领导者;这不应该发生.", r.id) + r.becomeFollower(r.Term+1, None) + return false + } + + // 更多的深度防御:如果收件人不在配置中,就扔掉快照. + // 这不应该发生(在写这篇文章的时候),但这里和那里的很多代码都假定r.id在进度跟踪器中. + found := false + cs := s.Metadata.ConfState + for _, set := range [][]uint64{ + cs.Voters, + cs.Learners, + cs.VotersOutgoing, + } { + for _, id := range set { + if id == r.id { + found = true + break + } + } + if found { + break + } + } + if !found { + r.logger.Warningf("%x 试图恢复快照,但它不在ConfState %v中;不应该发生这种情况.", r.id, cs) + return false + } + + // 现在,继续前进并实际恢复. + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + // 自身日志条目中有相应的term和index,说明已有这些数据,返回false + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] 快速提交到快照[index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + // 自身没有这一部分数据 + r.raftLog.restore(s) + + // 重置配置并重新添加(可能更新的)对等体. + r.prstrack = tracker.MakeProgressTracker(r.prstrack.MaxInflight) // 相当于重置prs信息 + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prstrack, + LastIndex: r.raftLog.lastIndex(), + }, cs) + if err != nil { + panic(fmt.Sprintf(" 要么是我们的配置变更处理有问题,要么是客户端破坏了配置变更. %+v: %s", cs, err)) + } + + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + pr := r.prstrack.Progress[r.id] + pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): 这是未经测试的,可能不需要的. + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] 重置快照信息[index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + return true +} diff --git a/raft/over_status.go b/raft/over_status.go new file mode 100644 index 00000000000..f8470a4efd0 --- /dev/null +++ b/raft/over_status.go @@ -0,0 +1,109 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// Status 包含关于这个Raft对等体的信息和它对系统的看法."Progress "只在领导者上被填充. +type Status struct { + BasicStatus + Config tracker.Config + Progress map[uint64]tracker.Progress // 如果是Leader,还有其他节点的进度 ,不是的话,就是nil +} + +type BasicStatus struct { + ID uint64 + pb.HardState + SoftState + Applied uint64 // 应用索引,其实这个使用者自己也知道,因为Ready的回调里提交日志被应用都会有日志的索引. + LeadTransferee uint64 // Leader转移ID,如果正处于Leader转移期间. +} + +func getProgressCopy(r *raft) map[uint64]tracker.Progress { + m := make(map[uint64]tracker.Progress) + r.prstrack.Visit(func(id uint64, pr *tracker.Progress) { + p := *pr + p.Inflights = pr.Inflights.Clone() + pr = nil + + m[id] = p + }) + return m +} + +func getBasicStatus(r *raft) BasicStatus { + s := BasicStatus{ + ID: r.id, + LeadTransferee: r.leadTransferee, + } + s.HardState = r.hardState() + s.SoftState = *r.softState() + s.Applied = r.raftLog.applied + return s +} + +// getStatus 得到一份当前raft状态的副本. +func getStatus(r *raft) Status { + var s Status + s.BasicStatus = getBasicStatus(r) + if s.RaftState == StateLeader { + s.Progress = getProgressCopy(r) + } + s.Config = r.prstrack.Config.Clone() + return s +} + +// MarshalJSON 将raft的状态翻译成JSON格式.试图通过在raft中引入ID类型来简化这个过程. +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) + + if len(s.Progress) == 0 { + j += "}," + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}," + } + + j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + getLogger().Panicf("unexpected error: %v", err) + } + return string(b) +} + +// SoftState 提供对日志和调试有用的状态.该状态是不稳定的,不需要持久化到WAL中. +type SoftState struct { + Lead uint64 // 当前leader + RaftState StateType // 节点状态 +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} diff --git a/raft/over_util.go b/raft/over_util.go new file mode 100644 index 00000000000..6f0cd25c42d --- /dev/null +++ b/raft/over_util.go @@ -0,0 +1,235 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "strings" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// IsLocalMsg OK +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +// IsResponseMsg 是不是响应信息 +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// 投票信息的响应类型 +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +func DescribeHardState(hs pb.HardState) string { + var buf strings.Builder + fmt.Fprintf(&buf, "Term:%d", hs.Term) + if hs.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", hs.Vote) + } + fmt.Fprintf(&buf, " Commit:%d", hs.Commit) + return buf.String() +} + +func DescribeSoftState(ss SoftState) string { + return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState) +} + +func DescribeConfState(state pb.ConfState) string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, + ) +} + +func DescribeSnapshot(snap pb.Snapshot) string { + m := snap.Metadata + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) +} + +func DescribeReady(rd Ready, f EntryFormatter) string { + var buf strings.Builder + if rd.SoftState != nil { + fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState)) + buf.WriteByte('\n') + } + if !IsEmptyHardState(rd.HardState) { + fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState)) + buf.WriteByte('\n') + } + if len(rd.ReadStates) > 0 { + fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates) + } + if len(rd.Entries) > 0 { + buf.WriteString("Entries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.Entries, f)) + } + if !IsEmptySnap(rd.Snapshot) { + fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot)) + } + if len(rd.CommittedEntries) > 0 { + buf.WriteString("CommittedEntries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f)) + } + if len(rd.Messages) > 0 { + buf.WriteString("Messages:\n") + for _, msg := range rd.Messages { + fmt.Fprint(&buf, DescribeMessage(msg, f)) + buf.WriteByte('\n') + } + } + if buf.Len() > 0 { + return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String()) + } + return "" +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + } + return buf.String() +} + +// PayloadSize 返回数据长度 +func PayloadSize(e pb.Entry) int { + return len(e.Data) +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + if f == nil { + f = func(data []byte) string { return fmt.Sprintf("%q", data) } + } + + formatConfChange := func(cc pb.ConfChangeI) string { + // TODO(tbg): give the EntryFormatter a type argument so that it gets + // a chance to expose the Context. + return pb.ConfChangesToString(cc.AsV2().Changes) + } + + var formatted string + switch e.Type { + case pb.EntryNormal: + formatted = f(e.Data) + case pb.EntryConfChange: + var cc pb.ConfChangeV1 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + case pb.EntryConfChangeV2: + var cc pb.ConfChangeV2 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + } + if formatted != "" { + formatted = " " + formatted + } + return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted) +} + +// DescribeEntries calls DescribeEntry for each Entry, adding a newline to +// each. +func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { + var buf bytes.Buffer + for _, e := range ents { + _, _ = buf.WriteString(DescribeEntry(e, f) + "\n") + } + return buf.String() +} + +// limitsize把超过大小的数据剔除 +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} + +// OK +func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { + err := cs1.Equivalent(cs2) // ✅ + if err == nil { + return + } + l.Panic(err) +} diff --git a/raft/quorum/over_majority.go b/raft/quorum/over_majority.go new file mode 100644 index 00000000000..6642c2f3379 --- /dev/null +++ b/raft/quorum/over_majority.go @@ -0,0 +1,192 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "fmt" + "math" + "sort" + "strings" +) + +// MajorityConfig 其实就是peerID的set +type MajorityConfig map[uint64]struct{} + +func (c MajorityConfig) String() string { + sl := make([]uint64, 0, len(c)) + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + var buf strings.Builder + buf.WriteByte('(') + for i := range sl { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprint(&buf, sl[i]) + } + buf.WriteByte(')') + return buf.String() +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c MajorityConfig) Describe(l AckedIndexer) string { + if len(c) == 0 { + return "" + } + type tup struct { + id uint64 + idx Index + ok bool // idx found? + bar int // length of bar displayed for this tup + } + + // Below, populate .bar so that the i-th largest commit index has bar i (we + // plot this as sort of a progress bar). The actual code is a bit more + // complicated and also makes sure that equal index => equal bar. + + n := len(c) + info := make([]tup, 0, n) + for id := range c { + idx, ok := l.AckedIndex(id) + info = append(info, tup{id: id, idx: idx, ok: ok}) + } + + // Sort by index + sort.Slice(info, func(i, j int) bool { + if info[i].idx == info[j].idx { + return info[i].id < info[j].id + } + return info[i].idx < info[j].idx + }) + + // Populate .bar. + for i := range info { + if i > 0 && info[i-1].idx < info[i].idx { + info[i].bar = i + } + } + + // Sort by ID. + sort.Slice(info, func(i, j int) bool { + return info[i].id < info[j].id + }) + + var buf strings.Builder + + // Print. + fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n") + for i := range info { + bar := info[i].bar + if !info[i].ok { + fmt.Fprint(&buf, "?"+strings.Repeat(" ", n)) + } else { + fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar)) + } + fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id) + } + return buf.String() +} + +func (c MajorityConfig) Slice() []uint64 { + var sl []uint64 + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +// 插入排序 +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// CommittedIndex 获取已经提交了的 大多数索引 ;计算raft的提交索引 +func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { + n := len(c) + if n == 0 { + // 这里很有意思,当没有任何peer的时候返回值居然是无穷大(64位无符号范围内),如果都没有任何 + // peer,0不是更合适?其实这跟JoinConfig类型有关,此处先放一放,后面会给出解释. + return math.MaxUint64 + } + + // 下面的代码对理解函数的实现原理没有多大影响,只是用了一个小技巧,在Peer数量不大于7个的情况下 + // 优先用栈数组,否则通过堆申请内存.因为raft集群超过7个的概率不大,用栈效率会更高 + var stk [7]uint64 + var srt []uint64 + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]uint64, n) + } + + { + // 把所有的Peer.Progress.Match放入srt数组中 + i := n - 1 + for id := range c { + if idx, ok := l.AckedIndex(id); ok { + srt[i] = uint64(idx) + i-- + } + } + } + + // 插入排序 + insertionSort(srt) + + // 升序之后,取提交了大多数的日志索引 + pos := n - (n/2 + 1) + return Index(srt[pos]) +} + +// VoteResult 做选举结果统计 +func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { + if len(c) == 0 { + return VoteWon + } + // 统计支持者(nv[1])和反对者(nv[0])的数量 + ny := [2]int{} + // 当然还有弃权的,raft的弃权不是peer主动弃权的,而是丢包或者超时造成的 + var missing int + for id := range c { + v, ok := votes[id] + if !ok { + missing++ + continue + } + if v { + ny[1]++ + } else { + ny[0]++ + } + } + + q := len(c)/2 + 1 + if ny[1] >= q { + return VoteWon + } + if ny[1]+missing >= q { + return VotePending + } + return VoteLost +} diff --git a/raft/quorum/over_quorum.go b/raft/quorum/over_quorum.go new file mode 100644 index 00000000000..985150f9f21 --- /dev/null +++ b/raft/quorum/over_quorum.go @@ -0,0 +1,43 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "math" + "strconv" +) + +// Index raft日志索引 +type Index uint64 + +func (i Index) String() string { + if i == math.MaxUint64 { + return "∞" + } + return strconv.FormatUint(uint64(i), 10) +} + +type AckedIndexer interface { + AckedIndex(voterID uint64) (idx Index, found bool) +} + +//go:generate stringer -type=VoteResult +type VoteResult uint8 + +const ( + VotePending VoteResult = 1 + iota // 竞选中 + VoteLost // 竞选失败 + VoteWon // 竞选获胜 +) diff --git a/raft/quorum/over_two_majority.go b/raft/quorum/over_two_majority.go new file mode 100644 index 00000000000..66727b84a52 --- /dev/null +++ b/raft/quorum/over_two_majority.go @@ -0,0 +1,69 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +// JointConfig JointConfig和MajorityConfig功能是一样的,只是JointConfig的做法是 +// 根据两个MajorityConfig的结果做一次融合性操作 +// MakeProgressTracker 初始化 +// case [map[id]=true,map] 集群中只有一个投票成员(领导者). +// [变更节点集合,老节点集合] 或 [节点、nil] +type JointConfig [2]MajorityConfig + +func (c JointConfig) String() string { + if len(c[1]) > 0 { + return c[0].String() + "&&" + c[1].String() + } + return c[0].String() +} + +func (c JointConfig) IDs() map[uint64]struct{} { + m := map[uint64]struct{}{} + for _, cc := range c { + for id := range cc { + m[id] = struct{}{} + } + } + return m +} + +func (c JointConfig) Describe(l AckedIndexer) string { + return MajorityConfig(c.IDs()).Describe(l) +} + +// CommittedIndex 已提交索引 +func (c JointConfig) CommittedIndex(l AckedIndexer) Index { + // 返回的是二者最小的那个,这时候可以理解MajorityConfig.CommittedIndex()为什么Peers数 + // 为0的时候返回无穷大了吧,如果返回0该函数就永远返回0了. + idx0 := c[0].CommittedIndex(l) + idx1 := c[1].CommittedIndex(l) + if idx0 < idx1 { + return idx0 + } + return idx1 +} + +func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult { + r1 := c[0].VoteResult(votes) + r2 := c[1].VoteResult(votes) + // 相同的,下里面的判断逻辑基就可以知道MajorityConfig.VoteResult()在peers数为0返回选举 + // 胜利的原因. + if r1 == r2 { + return r1 + } + if r1 == VoteLost || r2 == VoteLost { + return VoteLost + } + return VotePending +} diff --git a/raft/quorum/testdata/joint_commit.txt b/raft/quorum/testdata/joint_commit.txt new file mode 100644 index 00000000000..6e1635e59a4 --- /dev/null +++ b/raft/quorum/testdata/joint_commit.txt @@ -0,0 +1,481 @@ +# No difference between a simple majority quorum and a simple majority quorum +# joint with an empty majority quorum. (This is asserted for all datadriven tests +# by the framework, so we don't dwell on it more). +# +# Note that by specifying cfgj explicitly we tell the test harness to treat the +# input as a joint quorum and not a majority quorum. If we didn't specify +# cfgj=zero the test would pass just the same, but it wouldn't backend exercising the +# joint quorum path. +committed cfg=(1,2,3) cfgj=zero idx=(100,101,99) +---- + idx +x> 100 (id=1) +xx> 101 (id=2) +> 99 (id=3) +100 + +# Joint nonoverlapping singleton quorums. + +committed cfg=(1) cfgj=(2) idx=(_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +0 + +# Voter 1 has 100 committed, 2 nothing. This means we definitely won't commit +# past 100. +committed cfg=(1) cfgj=(2) idx=(100,_) +---- + idx +x> 100 (id=1) +? 0 (id=2) +0 + +# Committed index collapses once both majorities do, to the lower index. +committed cfg=(1) cfgj=(2) idx=(13, 100) +---- + idx +> 13 (id=1) +x> 100 (id=2) +13 + +# Joint overlapping (i.e. identical) singleton quorum. + +committed cfg=(1) cfgj=(1) idx=(_) +---- + idx +? 0 (id=1) +0 + +committed cfg=(1) cfgj=(1) idx=(100) +---- + idx +> 100 (id=1) +100 + + + +# Two-node config joint with non-overlapping single node config +committed cfg=(1,3) cfgj=(2) idx=(_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +committed cfg=(1,3) cfgj=(2) idx=(100,_,_) +---- + idx +xx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +# 1 has 100 committed, 2 has 50 (collapsing half of the joint quorum to 50). +committed cfg=(1,3) cfgj=(2) idx=(100,_,50) +---- + idx +xx> 100 (id=1) +x> 50 (id=2) +? 0 (id=3) +0 + +# 2 reports 45, collapsing the other half (to 45). +committed cfg=(1,3) cfgj=(2) idx=(100,45,50) +---- + idx +xx> 100 (id=1) +x> 50 (id=2) +> 45 (id=3) +45 + +# Two-node config with overlapping single-node config. + +committed cfg=(1,2) cfgj=(2) idx=(_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +0 + +# 1 reports 100. +committed cfg=(1,2) cfgj=(2) idx=(100,_) +---- + idx +x> 100 (id=1) +? 0 (id=2) +0 + +# 2 reports 100. +committed cfg=(1,2) cfgj=(2) idx=(_,100) +---- + idx +? 0 (id=1) +x> 100 (id=2) +0 + +committed cfg=(1,2) cfgj=(2) idx=(50,100) +---- + idx +> 50 (id=1) +x> 100 (id=2) +50 + +committed cfg=(1,2) cfgj=(2) idx=(100,50) +---- + idx +x> 100 (id=1) +> 50 (id=2) +50 + + + +# Joint non-overlapping two-node configs. + +committed cfg=(1,2) cfgj=(3,4) idx=(50,_,_,_) +---- + idx +xxx> 50 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +0 + +committed cfg=(1,2) cfgj=(3,4) idx=(50,_,49,_) +---- + idx +xxx> 50 (id=1) +? 0 (id=2) +xx> 49 (id=3) +? 0 (id=4) +0 + +committed cfg=(1,2) cfgj=(3,4) idx=(50,48,49,_) +---- + idx +xxx> 50 (id=1) +x> 48 (id=2) +xx> 49 (id=3) +? 0 (id=4) +0 + +committed cfg=(1,2) cfgj=(3,4) idx=(50,48,49,47) +---- + idx +xxx> 50 (id=1) +x> 48 (id=2) +xx> 49 (id=3) +> 47 (id=4) +47 + +# Joint overlapping two-node configs. +committed cfg=(1,2) cfgj=(2,3) idx=(_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +committed cfg=(1,2) cfgj=(2,3) idx=(100,_,_) +---- + idx +xx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +committed cfg=(1,2) cfgj=(2,3) idx=(_,100,_) +---- + idx +? 0 (id=1) +xx> 100 (id=2) +? 0 (id=3) +0 + +committed cfg=(1,2) cfgj=(2,3) idx=(_,100,99) +---- + idx +? 0 (id=1) +xx> 100 (id=2) +x> 99 (id=3) +0 + +committed cfg=(1,2) cfgj=(2,3) idx=(101,100,99) +---- + idx +xx> 101 (id=1) +x> 100 (id=2) +> 99 (id=3) +99 + +# Joint identical two-node configs. +committed cfg=(1,2) cfgj=(1,2) idx=(_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +0 + +committed cfg=(1,2) cfgj=(1,2) idx=(_,40) +---- + idx +? 0 (id=1) +x> 40 (id=2) +0 + +committed cfg=(1,2) cfgj=(1,2) idx=(41,40) +---- + idx +x> 41 (id=1) +> 40 (id=2) +40 + + + +# Joint disjoint three-node configs. + +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(_,_,_,_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +? 0 (id=6) +0 + +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,_,_,_,_,_) +---- + idx +xxxxx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +? 0 (id=6) +0 + +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,_,_,90,_,_) +---- + idx +xxxxx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +xxxx> 90 (id=4) +? 0 (id=5) +? 0 (id=6) +0 + +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(100,99,_,_,_,_) +---- + idx +xxxxx> 100 (id=1) +xxxx> 99 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +? 0 (id=6) +0 + +# First quorum <= 99, second one <= 97. Both quorums guarantee that 90 is +# committed. +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(_,99,90,97,95,_) +---- + idx +? 0 (id=1) +xxxxx> 99 (id=2) +xx> 90 (id=3) +xxxx> 97 (id=4) +xxx> 95 (id=5) +? 0 (id=6) +90 + +# First quorum collapsed to 92. Second one already had at least 95 committed, +# so the result also collapses. +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(92,99,90,97,95,_) +---- + idx +xx> 92 (id=1) +xxxxx> 99 (id=2) +x> 90 (id=3) +xxxx> 97 (id=4) +xxx> 95 (id=5) +? 0 (id=6) +92 + +# Second quorum collapses, but nothing changes in the output. +committed cfg=(1,2,3) cfgj=(4,5,6) idx=(92,99,90,97,95,77) +---- + idx +xx> 92 (id=1) +xxxxx> 99 (id=2) +x> 90 (id=3) +xxxx> 97 (id=4) +xxx> 95 (id=5) +> 77 (id=6) +92 + + +# Joint overlapping three-node configs. + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,_,_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +0 + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,_,_,_,_) +---- + idx +xxxx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +0 + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,101,_,_,_) +---- + idx +xxx> 100 (id=1) +xxxx> 101 (id=2) +? 0 (id=3) +? 0 (id=4) +? 0 (id=5) +0 + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,101,100,_,_) +---- + idx +xx> 100 (id=1) +xxxx> 101 (id=2) +> 100 (id=3) +? 0 (id=4) +? 0 (id=5) +0 + +# Second quorum could commit either 98 or 99, but first quorum is open. +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,100,_,99,98) +---- + idx +? 0 (id=1) +xxxx> 100 (id=2) +? 0 (id=3) +xxx> 99 (id=4) +xx> 98 (id=5) +0 + +# Additionally, first quorum can commit either 100 or 99 +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(_,100,99,99,98) +---- + idx +? 0 (id=1) +xxxx> 100 (id=2) +xx> 99 (id=3) +> 99 (id=4) +x> 98 (id=5) +98 + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(1,100,99,99,98) +---- + idx +> 1 (id=1) +xxxx> 100 (id=2) +xx> 99 (id=3) +> 99 (id=4) +x> 98 (id=5) +98 + +committed cfg=(1,2,3) cfgj=(1,4,5) idx=(100,100,99,99,98) +---- + idx +xxx> 100 (id=1) +> 100 (id=2) +x> 99 (id=3) +> 99 (id=4) +> 98 (id=5) +99 + + +# More overlap. + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(_,_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +? 0 (id=4) +0 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(_,100,99,_) +---- + idx +? 0 (id=1) +xxx> 100 (id=2) +xx> 99 (id=3) +? 0 (id=4) +99 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(98,100,99,_) +---- + idx +x> 98 (id=1) +xxx> 100 (id=2) +xx> 99 (id=3) +? 0 (id=4) +99 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,100,99,_) +---- + idx +xx> 100 (id=1) +> 100 (id=2) +x> 99 (id=3) +? 0 (id=4) +99 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,100,99,98) +---- + idx +xx> 100 (id=1) +> 100 (id=2) +x> 99 (id=3) +> 98 (id=4) +99 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,_,_,101) +---- + idx +xx> 100 (id=1) +? 0 (id=2) +? 0 (id=3) +xxx> 101 (id=4) +0 + +committed cfg=(1,2,3) cfgj=(2,3,4) idx=(100,99,_,101) +---- + idx +xx> 100 (id=1) +x> 99 (id=2) +? 0 (id=3) +xxx> 101 (id=4) +99 + +# Identical. This is also exercised in the test harness, so it's listed here +# only briefly. +committed cfg=(1,2,3) cfgj=(1,2,3) idx=(50,45,_) +---- + idx +xx> 50 (id=1) +x> 45 (id=2) +? 0 (id=3) +45 diff --git a/raft/quorum/testdata/joint_vote.txt b/raft/quorum/testdata/joint_vote.txt new file mode 100644 index 00000000000..36cd0cabcff --- /dev/null +++ b/raft/quorum/testdata/joint_vote.txt @@ -0,0 +1,165 @@ +# Empty joint config wins all votes. This isn't used in production. Note that +# by specifying cfgj explicitly we tell the test harness to treat the input as +# a joint quorum and not a majority quorum. +vote cfgj=zero +---- +VoteWon + +# More examples with close to trivial configs. + +vote cfg=(1) cfgj=zero votes=(_) +---- +VotePending + +vote cfg=(1) cfgj=zero votes=(y) +---- +VoteWon + +vote cfg=(1) cfgj=zero votes=(n) +---- +VoteLost + +vote cfg=(1) cfgj=(1) votes=(_) +---- +VotePending + +vote cfg=(1) cfgj=(1) votes=(y) +---- +VoteWon + +vote cfg=(1) cfgj=(1) votes=(n) +---- +VoteLost + +vote cfg=(1) cfgj=(2) votes=(_,_) +---- +VotePending + +vote cfg=(1) cfgj=(2) votes=(y,_) +---- +VotePending + +vote cfg=(1) cfgj=(2) votes=(y,y) +---- +VoteWon + +vote cfg=(1) cfgj=(2) votes=(y,n) +---- +VoteLost + +vote cfg=(1) cfgj=(2) votes=(n,_) +---- +VoteLost + +vote cfg=(1) cfgj=(2) votes=(n,n) +---- +VoteLost + +vote cfg=(1) cfgj=(2) votes=(n,y) +---- +VoteLost + +# Two node configs. + +vote cfg=(1,2) cfgj=(3,4) votes=(_,_,_,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(3,4) votes=(y,_,_,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(3,4) votes=(y,y,_,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(3,4) votes=(y,y,n,_) +---- +VoteLost + +vote cfg=(1,2) cfgj=(3,4) votes=(y,y,n,n) +---- +VoteLost + +vote cfg=(1,2) cfgj=(3,4) votes=(y,y,y,n) +---- +VoteLost + +vote cfg=(1,2) cfgj=(3,4) votes=(y,y,y,y) +---- +VoteWon + +vote cfg=(1,2) cfgj=(2,3) votes=(_,_,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(2,3) votes=(_,n,_) +---- +VoteLost + +vote cfg=(1,2) cfgj=(2,3) votes=(y,y,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(2,3) votes=(y,y,n) +---- +VoteLost + +vote cfg=(1,2) cfgj=(2,3) votes=(y,y,y) +---- +VoteWon + +vote cfg=(1,2) cfgj=(1,2) votes=(_,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(1,2) votes=(y,_) +---- +VotePending + +vote cfg=(1,2) cfgj=(1,2) votes=(y,n) +---- +VoteLost + +vote cfg=(1,2) cfgj=(1,2) votes=(n,_) +---- +VoteLost + +vote cfg=(1,2) cfgj=(1,2) votes=(n,n) +---- +VoteLost + + +# Simple example for overlapping three node configs. + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,_,_,_) +---- +VotePending + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,n,_,_) +---- +VotePending + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,n,n,_) +---- +VoteLost + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(_,y,y,_) +---- +VoteWon + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,_,_) +---- +VotePending + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,_) +---- +VotePending + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,n) +---- +VoteLost + +vote cfg=(1,2,3) cfgj=(2,3,4) votes=(y,y,n,y) +---- +VoteWon diff --git a/raft/quorum/testdata/majority_commit.txt b/raft/quorum/testdata/majority_commit.txt new file mode 100644 index 00000000000..7c2fb8505dc --- /dev/null +++ b/raft/quorum/testdata/majority_commit.txt @@ -0,0 +1,153 @@ +# The empty quorum commits "everything". This is useful for its use in joint +# quorums. +committed +---- +∞ + + + +# A single voter quorum is not final when no index is known. +committed cfg=(1) idx=(_) +---- + idx +? 0 (id=1) +0 + +# When an index is known, that's the committed index, and that's final. +committed cfg=(1) idx=(12) +---- + idx +> 12 (id=1) +12 + + + + +# With two nodes, start out similarly. +committed cfg=(1, 2) idx=(_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +0 + +# The first committed index becomes known (for n1). Nothing changes in the +# output because idx=12 is not known to backend on a quorum (which is both nodes). +committed cfg=(1, 2) idx=(12,_) +---- + idx +x> 12 (id=1) +? 0 (id=2) +0 + +# The second index comes in and finalize the decision. The result will backend the +# smaller of the two indexes. +committed cfg=(1,2) idx=(12,5) +---- + idx +x> 12 (id=1) +> 5 (id=2) +5 + + + + +# No surprises for three nodes. +committed cfg=(1,2,3) idx=(_,_,_) +---- + idx +? 0 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +committed cfg=(1,2,3) idx=(12,_,_) +---- + idx +xx> 12 (id=1) +? 0 (id=2) +? 0 (id=3) +0 + +# We see a committed index, but a higher committed index for the last pending +# votes could change (increment) the outcome, so not final yet. +committed cfg=(1,2,3) idx=(12,5,_) +---- + idx +xx> 12 (id=1) +x> 5 (id=2) +? 0 (id=3) +5 + +# a) the case in which it does: +committed cfg=(1,2,3) idx=(12,5,6) +---- + idx +xx> 12 (id=1) +> 5 (id=2) +x> 6 (id=3) +6 + +# b) the case in which it does not: +committed cfg=(1,2,3) idx=(12,5,4) +---- + idx +xx> 12 (id=1) +x> 5 (id=2) +> 4 (id=3) +5 + +# c) a different case in which the last index is pending but it has no chance of +# swaying the outcome (because nobody in the current quorum agrees on anything +# higher than the candidate): +committed cfg=(1,2,3) idx=(5,5,_) +---- + idx +x> 5 (id=1) +> 5 (id=2) +? 0 (id=3) +5 + +# c) continued: Doesn't matter what shows up last. The result is final. +committed cfg=(1,2,3) idx=(5,5,12) +---- + idx +> 5 (id=1) +> 5 (id=2) +xx> 12 (id=3) +5 + +# With all committed idx known, the result is final. +committed cfg=(1, 2, 3) idx=(100, 101, 103) +---- + idx +> 100 (id=1) +x> 101 (id=2) +xx> 103 (id=3) +101 + + + +# Some more complicated examples. Similar to case c) above. The result is +# already final because no index higher than 103 is one short of quorum. +committed cfg=(1, 2, 3, 4, 5) idx=(101, 104, 103, 103,_) +---- + idx +x> 101 (id=1) +xxxx> 104 (id=2) +xx> 103 (id=3) +> 103 (id=4) +? 0 (id=5) +103 + +# A similar case which is not final because another vote for >= 103 would change +# the outcome. +committed cfg=(1, 2, 3, 4, 5) idx=(101, 102, 103, 103,_) +---- + idx +x> 101 (id=1) +xx> 102 (id=2) +xxx> 103 (id=3) +> 103 (id=4) +? 0 (id=5) +102 diff --git a/raft/quorum/testdata/majority_vote.txt b/raft/quorum/testdata/majority_vote.txt new file mode 100644 index 00000000000..5f9564b4f51 --- /dev/null +++ b/raft/quorum/testdata/majority_vote.txt @@ -0,0 +1,97 @@ +# The empty config always announces a won vote. +vote +---- +VoteWon + +vote cfg=(1) votes=(_) +---- +VotePending + +vote cfg=(1) votes=(n) +---- +VoteLost + +vote cfg=(123) votes=(y) +---- +VoteWon + + + + +vote cfg=(4,8) votes=(_,_) +---- +VotePending + +# With two voters, a single rejection loses the vote. +vote cfg=(4,8) votes=(n,_) +---- +VoteLost + +vote cfg=(4,8) votes=(y,_) +---- +VotePending + +vote cfg=(4,8) votes=(n,y) +---- +VoteLost + +vote cfg=(4,8) votes=(y,y) +---- +VoteWon + + + +vote cfg=(2,4,7) votes=(_,_,_) +---- +VotePending + +vote cfg=(2,4,7) votes=(n,_,_) +---- +VotePending + +vote cfg=(2,4,7) votes=(y,_,_) +---- +VotePending + +vote cfg=(2,4,7) votes=(n,n,_) +---- +VoteLost + +vote cfg=(2,4,7) votes=(y,n,_) +---- +VotePending + +vote cfg=(2,4,7) votes=(y,y,_) +---- +VoteWon + +vote cfg=(2,4,7) votes=(y,y,n) +---- +VoteWon + +vote cfg=(2,4,7) votes=(n,y,n) +---- +VoteLost + + + +# Test some random example with seven nodes (why not). +vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,_,_,_) +---- +VotePending + +vote cfg=(1,2,3,4,5,6,7) votes=(_,y,y,_,n,y,n) +---- +VotePending + +vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,_,n,y) +---- +VoteWon + +vote cfg=(1,2,3,4,5,6,7) votes=(y,y,_,n,y,n,n) +---- +VotePending + +vote cfg=(1,2,3,4,5,6,7) votes=(y,y,n,y,n,n,n) +---- +VoteLost diff --git a/raft/quorum/voteresult_string.go b/raft/quorum/voteresult_string.go new file mode 100644 index 00000000000..ab5fd481e6c --- /dev/null +++ b/raft/quorum/voteresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=VoteResult"; + +package quorum + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VotePending-1] + _ = x[VoteLost-2] + _ = x[VoteWon-3] +} + +const _VoteResult_name = "VotePendingVoteLostVoteWon" + +var _VoteResult_index = [...]uint8{0, 11, 19, 26} + +func (i VoteResult) String() string { + i -= 1 + if i >= VoteResult(len(_VoteResult_index)-1) { + return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]] +} diff --git a/raft/raftpb/confchange.go b/raft/raftpb/confchange.go new file mode 100644 index 00000000000..934d8b34f61 --- /dev/null +++ b/raft/raftpb/confchange.go @@ -0,0 +1,126 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI 配置变更 +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChangeV1, bool) +} + +var ( + _ ConfChangeI = ConfChangeV1{} + _ ConfChangeI = ConfChangeV2{} +) + +// EnterJoint 返回两个布尔.当且仅当此配置变更将使用联合共识时,第二个bool为真, +// 如果它包含一个以上的变更或明确要求使用联合共识,则属于这种情况. +// 第一个bool只有在第二个bool为真时才会出现,它表示联合状态是否会被自动留下. +func (c ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // 注:理论上,更多的配置变化可以符合 "simple "协议的要求,但这取决于这些变化所基于的配置. + // 例如,如果两个节点都是基本配置的一部分,增加两个learner是不可以的(也就是说,在应用配置变化的过程中, + // 两个voter变成了learner).在实践中,这些区别应该是不重要的,所以我们保持简单,随意使用联合共识. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // 使用联合共识 + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("未知的过渡状态: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +// 是真,如果配置改变留下了一个联合配置.如果ConfChangeV2为零,就会出现这种情况,但Context字段可能例外. +func (c ConfChangeV2) LeaveJoint() bool { + // NB: c已经是一个拷贝 + c.Context = "" + return proto.Equal(&c, &ConfChangeV2{}) +} + +// ------------------------------ over ---------------------------------------- + +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +func (c ConfChangeV1) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +func (c ConfChangeV1) AsV1() (ConfChangeV1, bool) { + return c, true +} + +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +func (c ConfChangeV2) AsV1() (ConfChangeV1, bool) { return ConfChangeV1{}, false } + +// ConfChangesToString 与ConfChangesFromString正好相反. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteString("add") + case ConfChangeAddLearnerNode: + buf.WriteString("learner") + case ConfChangeRemoveNode: + buf.WriteString("remove") + case ConfChangeUpdateNode: + buf.WriteString("update") + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/raft/raftpb/confstate.go b/raft/raftpb/confstate.go new file mode 100644 index 00000000000..fd872bd77ef --- /dev/null +++ b/raft/raftpb/confstate.go @@ -0,0 +1,43 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent 判断相不相等 +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates 不相同sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/raft/raftpb/over_self_serialize.go b/raft/raftpb/over_self_serialize.go new file mode 100644 index 00000000000..9a318596402 --- /dev/null +++ b/raft/raftpb/over_self_serialize.go @@ -0,0 +1,135 @@ +package raftpb + +import ( + "encoding/json" +) + +type A struct { + Type ConfChangeType + NodeID uint64 + Context string + ID uint64 +} + +func (m *ConfChangeV1) Marshal() (dAtA []byte, err error) { + a := A{ + Type: m.Type, + NodeID: m.NodeID, + Context: string(m.Context), + ID: m.ID, + } + return json.Marshal(a) +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +type temp struct { + Data string + Metadata SnapshotMetadata +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + t := temp{ + Data: string(m.Data), + Metadata: m.Metadata, + } + return json.Marshal(t) +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +type B struct { + Term uint64 + Index uint64 + Type EntryType + Data string +} + +func (m *Entry) Marshal() (dAtA []byte, err error) { + b := B{ + Term: m.Term, + Index: m.Index, + Type: m.Type, + Data: string(m.Data), + } + return json.Marshal(b) +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + return json.Marshal(m) +} + +func (m *Entry) Unmarshal(dAtA []byte) error { + b := B{ + Term: m.Term, + Index: m.Index, + Type: m.Type, + Data: string(m.Data), + } + err := json.Unmarshal(dAtA, &b) + m.Index = b.Index + m.Type = b.Type + m.Term = b.Term + m.Data = []byte(b.Data) // ok + return err +} + +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *Snapshot) Unmarshal(dAtA []byte) error { + t := temp{} + err := json.Unmarshal(dAtA, &t) + if err == nil { + m.Data = []byte(t.Data) + m.Metadata = t.Metadata + } + return err +} + +func (m *Message) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *HardState) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ConfState) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ConfChangeV1) Unmarshal(dAtA []byte) error { + a := A{} + err := json.Unmarshal(dAtA, &a) + m.Context = a.Context + m.Type = a.Type + m.ID = a.ID + m.NodeID = a.NodeID + return err +} + +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} + +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { + return json.Unmarshal(dAtA, m) +} diff --git a/raft/raftpb/raft.pb.go b/raft/raftpb/raft.pb.go new file mode 100644 index 00000000000..baf4ea559cd --- /dev/null +++ b/raft/raftpb/raft.pb.go @@ -0,0 +1,589 @@ +// Code generated by protoc-gen-gogo. +// source: raft.proto + +package raftpb + +import ( + "encoding/json" + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +var ( + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", + 2: "EntryConfChangeV2", +} + +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} + +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} + +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} + +func (EntryType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{0} +} + +// For description of different message types, see: +// https://pkg.go.dev/github.com/ls-2018/etcd_cn/raft#hdr-MessageType +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} + +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} + +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} + +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} + +func (MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{1} +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +// 指定关于联合共识的配置更改的行为.自动、显式、隐式 +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} + +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} + +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p +} + +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} + +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} + +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{2} +} + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} + +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} + +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} + +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} + +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{3} +} + +type Entry struct { + // Term:表示该Entry所在的任期. + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + // Index:当前这个entry在整个raft日志中的位置索引,有了Term和Index之后,一个`log entry`就能被唯一标识. + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + // 当前entry的类型 + // 目前etcd支持两种类型:EntryNormal和EntryConfChange + // EntryNormaln表示普通的数据操作 + // EntryConfChange表示集群的变更操作 + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + // 具体操作使用的数据 + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{0} +} + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{1} +} + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{2} +} + +// Message 消息格式 +// Raft协议时提到,节点之间传递的是消息(Message), 每条消息中可以携带多条Entry记录,每条Entry记录对应一个独立的操作. +type Message struct { + // 该字段定义了不同的消息类型,etcd-raft就是通过不同的消息类型来进行处理的,etcd中一共定义了19种类型 + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + // 消息的目标节点 ID,在急群中每个节点都有一个唯一的id作为标识 + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + // 发送消息的节点ID.在集群中,每个节点都拥有一个唯一ID作为标识.,或者leader转移的下一个跳 + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + // 发送消息的节点的Term值. 如果Term值为0,则为本地消息,在etcd刊负模块的实现中,对本地消息进行特殊处理. + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + // 该消息携带的第一条Entry记录的Term值. + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` // 日志索引ID,用于节点向leader汇报自己已经commit的日志数据ID + // 如果是MsgApp类型的消息,则该字段中保存了Leader节点复制到Follower节点的Entry记录.在其他类型消息中,该字段的含义后面会详细介绍. + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + // 搜 ProgressTracker + // handleAppendEntries 处理函数 + // leader会为每个Follower都维护一个leaderCommit,表示leader认为Follower已经提交的日志条目索引值 + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + // 在传输快照时,该字段保存了快照数据 + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + // 主要用于响应类型的消息,表示是否拒绝收到的消息. + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` // 拒绝同步日志请求时返回的当前节点日志ID,用于被拒绝方快速定位到下一次合适的同步日志位置 + // 携带的一些上下文的信息, 例如,campaignTransfer + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{3} +} + +// HardState 封装了raft协议中规定的需要实时持久化的状态属性:当前选举周期、投票和已提交的Index +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` // 当前任期 + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` // 给谁投了票 + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` // 已提交的位置 +} + +// Reset 重置集群状态 +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{4} +} + +// ConfState tracker.Config的另一种体现形式 +type ConfState struct { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` +} + +// ConfChangeV1 只能传递一个节点变更 +type ConfChangeV1 struct { + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` // NodeID: 变更节点的id + Context string `protobuf:"string,4,opt,name=context" json:"context,omitempty"` + // 这个字段只被etcd用来传播一个唯一的标识符.理想情况下,它应该真正使用Context来代替.在ConfChangeV2中没有对应的字段存在. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` // 节点变更的次数 +} + +// ConfChangeV2 可以传递多个 +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context string `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{5} +} + +func (m *ConfChangeV1) Reset() { *m = ConfChangeV1{} } +func (m *ConfChangeV1) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV1) ProtoMessage() {} +func (*ConfChangeV1) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{6} +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{7} +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{8} +} + +func init() { + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChangeV1)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") +} + +func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } + +var fileDescriptor_b042552c306ae59b = []byte{ + // 1026 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, + 0x17, 0x25, 0x29, 0x5a, 0x3f, 0x57, 0xb2, 0x3c, 0xbe, 0xf1, 0x17, 0x10, 0x86, 0xc1, 0xe8, 0x53, + 0x52, 0x44, 0x70, 0x11, 0xb7, 0xd0, 0xa2, 0x28, 0xba, 0xf3, 0x4f, 0x00, 0xab, 0xb0, 0xdc, 0x54, + 0x76, 0xbc, 0x28, 0x50, 0x08, 0x63, 0x71, 0x44, 0xb3, 0x15, 0x39, 0x04, 0x39, 0x72, 0xed, 0x4d, + 0x51, 0xf4, 0x09, 0xba, 0xec, 0x26, 0xdb, 0x3e, 0x40, 0x9f, 0xc2, 0x4b, 0x03, 0xdd, 0x74, 0x15, + 0x34, 0xf6, 0x8b, 0x14, 0x33, 0x1c, 0x4a, 0x94, 0x6c, 0x64, 0xd1, 0xdd, 0xcc, 0xb9, 0x67, 0xee, + 0x9c, 0x73, 0xef, 0xe5, 0x10, 0x20, 0xa1, 0x63, 0xb1, 0x13, 0x27, 0x5c, 0x70, 0x2c, 0xcb, 0x75, + 0x7c, 0xbe, 0xb9, 0xe1, 0x73, 0x9f, 0x2b, 0xe8, 0x33, 0xb9, 0xca, 0xa2, 0xed, 0x9f, 0x61, 0xe5, + 0x75, 0x24, 0x92, 0x6b, 0x74, 0xc0, 0x3e, 0x65, 0x49, 0xe8, 0x58, 0x2d, 0xb3, 0x63, 0xef, 0xd9, + 0x37, 0xef, 0x9f, 0x19, 0x03, 0x85, 0xe0, 0x26, 0xac, 0xf4, 0x22, 0x8f, 0x5d, 0x39, 0xa5, 0x42, + 0x28, 0x83, 0xf0, 0x53, 0xb0, 0x4f, 0xaf, 0x63, 0xe6, 0x98, 0x2d, 0xb3, 0xd3, 0xec, 0xae, 0xef, + 0x64, 0x77, 0xed, 0xa8, 0x94, 0x32, 0x30, 0x4b, 0x74, 0x1d, 0x33, 0x44, 0xb0, 0x0f, 0xa8, 0xa0, + 0x8e, 0xdd, 0x32, 0x3b, 0x8d, 0x81, 0x5a, 0xb7, 0x7f, 0x31, 0x81, 0x9c, 0x44, 0x34, 0x4e, 0x2f, + 0xb8, 0xe8, 0x33, 0x41, 0x3d, 0x2a, 0x28, 0x7e, 0x01, 0x30, 0xe2, 0xd1, 0x78, 0x98, 0x0a, 0x2a, + 0xb2, 0xdc, 0xf5, 0x79, 0xee, 0x7d, 0x1e, 0x8d, 0x4f, 0x64, 0x40, 0xe7, 0xae, 0x8d, 0x72, 0x40, + 0x2a, 0x0d, 0x94, 0xd2, 0xa2, 0x89, 0x0c, 0x92, 0xfe, 0x84, 0xf4, 0x57, 0x34, 0xa1, 0x90, 0xf6, + 0x77, 0x50, 0xcd, 0x15, 0x48, 0x89, 0x52, 0x81, 0xba, 0xb3, 0x31, 0x50, 0x6b, 0xfc, 0x0a, 0xaa, + 0xa1, 0x56, 0xa6, 0x12, 0xd7, 0xbb, 0x4e, 0xae, 0x65, 0x59, 0xb9, 0xce, 0x3b, 0xe3, 0xb7, 0xdf, + 0x95, 0xa0, 0xd2, 0x67, 0x69, 0x4a, 0x7d, 0x86, 0xaf, 0xc0, 0x16, 0xf3, 0x5a, 0x3d, 0xc9, 0x73, + 0xe8, 0x70, 0xb1, 0x5a, 0x92, 0x86, 0x1b, 0x60, 0x09, 0xbe, 0xe0, 0xc4, 0x12, 0x5c, 0xda, 0x18, + 0x27, 0x7c, 0xc9, 0x86, 0x44, 0x66, 0x06, 0xed, 0x65, 0x83, 0xe8, 0x42, 0x65, 0xc2, 0x7d, 0xd5, + 0xdd, 0x95, 0x42, 0x30, 0x07, 0xe7, 0x65, 0x2b, 0x3f, 0x2c, 0xdb, 0x2b, 0xa8, 0xb0, 0x48, 0x24, + 0x01, 0x4b, 0x9d, 0x4a, 0xab, 0xd4, 0xa9, 0x77, 0x57, 0x17, 0x7a, 0x9c, 0xa7, 0xd2, 0x1c, 0xdc, + 0x82, 0xf2, 0x88, 0x87, 0x61, 0x20, 0x9c, 0x6a, 0x21, 0x97, 0xc6, 0xb0, 0x0b, 0xd5, 0x54, 0x57, + 0xcc, 0xa9, 0xa9, 0x4a, 0x92, 0xe5, 0x4a, 0xe6, 0x15, 0xcc, 0x79, 0x32, 0x63, 0xc2, 0x7e, 0x60, + 0x23, 0xe1, 0x40, 0xcb, 0xec, 0x54, 0xf3, 0x8c, 0x19, 0x86, 0x2f, 0x00, 0xb2, 0xd5, 0x61, 0x10, + 0x09, 0xa7, 0x5e, 0xb8, 0xb3, 0x80, 0xa3, 0x03, 0x95, 0x11, 0x8f, 0x04, 0xbb, 0x12, 0x4e, 0x43, + 0x35, 0x36, 0xdf, 0xb6, 0xbf, 0x87, 0xda, 0x21, 0x4d, 0xbc, 0x6c, 0x7c, 0xf2, 0x0a, 0x9a, 0x0f, + 0x2a, 0xe8, 0x80, 0x7d, 0xc9, 0x05, 0x5b, 0xfc, 0x38, 0x24, 0x52, 0x30, 0x5c, 0x7a, 0x68, 0xb8, + 0xfd, 0xa7, 0x09, 0xb5, 0xd9, 0xbc, 0xe2, 0x53, 0x28, 0xcb, 0x33, 0x49, 0xea, 0x98, 0xad, 0x52, + 0xc7, 0x1e, 0xe8, 0x1d, 0x6e, 0x42, 0x75, 0xc2, 0x68, 0x12, 0xc9, 0x88, 0xa5, 0x22, 0xb3, 0x3d, + 0xbe, 0x84, 0xb5, 0x8c, 0x35, 0xe4, 0x53, 0xe1, 0xf3, 0x20, 0xf2, 0x9d, 0x92, 0xa2, 0x34, 0x33, + 0xf8, 0x1b, 0x8d, 0xe2, 0x73, 0x58, 0xcd, 0x0f, 0x0d, 0x23, 0xe9, 0xd4, 0x56, 0xb4, 0x46, 0x0e, + 0x1e, 0xb3, 0x2b, 0x81, 0xcf, 0x01, 0xe8, 0x54, 0xf0, 0xe1, 0x84, 0xd1, 0x4b, 0xa6, 0x86, 0x21, + 0x2f, 0x68, 0x4d, 0xe2, 0x47, 0x12, 0x6e, 0xbf, 0x33, 0x01, 0xa4, 0xe8, 0xfd, 0x0b, 0x1a, 0xf9, + 0x0c, 0x3f, 0xd7, 0x63, 0x6b, 0xa9, 0xb1, 0x7d, 0x5a, 0xfc, 0x0c, 0x33, 0xc6, 0x83, 0xc9, 0x7d, + 0x09, 0x95, 0x88, 0x7b, 0x6c, 0x18, 0x78, 0xba, 0x28, 0x4d, 0x19, 0xbc, 0x7b, 0xff, 0xac, 0x7c, + 0xcc, 0x3d, 0xd6, 0x3b, 0x18, 0x94, 0x65, 0xb8, 0xe7, 0x15, 0xfb, 0x62, 0x2f, 0xf4, 0x05, 0x37, + 0xc1, 0x0a, 0x3c, 0xdd, 0x08, 0xd0, 0xa7, 0xad, 0xde, 0xc1, 0xc0, 0x0a, 0xbc, 0x76, 0x08, 0x64, + 0x7e, 0xf9, 0x49, 0x10, 0xf9, 0x93, 0xb9, 0x48, 0xf3, 0xbf, 0x88, 0xb4, 0x3e, 0x26, 0xb2, 0xfd, + 0x87, 0x09, 0x8d, 0x79, 0x9e, 0xb3, 0x2e, 0xee, 0x01, 0x88, 0x84, 0x46, 0x69, 0x20, 0x02, 0x1e, + 0xe9, 0x1b, 0xb7, 0x1e, 0xb9, 0x71, 0xc6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x84, 0xca, 0x48, + 0xb1, 0xb2, 0x8e, 0x17, 0x9e, 0x94, 0x65, 0x6b, 0xf9, 0x17, 0xa6, 0xe9, 0xc5, 0x9a, 0x95, 0x16, + 0x6a, 0xb6, 0x7d, 0x08, 0xb5, 0xd9, 0xbb, 0x8b, 0x6b, 0x50, 0x57, 0x9b, 0x63, 0x9e, 0x84, 0x74, + 0x42, 0x0c, 0x7c, 0x02, 0x6b, 0x0a, 0x98, 0xe7, 0x27, 0x26, 0xfe, 0x0f, 0xd6, 0x97, 0xc0, 0xb3, + 0x2e, 0xb1, 0xb6, 0xff, 0xb2, 0xa0, 0x5e, 0x78, 0x96, 0x10, 0xa0, 0xdc, 0x4f, 0xfd, 0xc3, 0x69, + 0x4c, 0x0c, 0xac, 0x43, 0xa5, 0x9f, 0xfa, 0x7b, 0x8c, 0x0a, 0x62, 0xea, 0xcd, 0x9b, 0x84, 0xc7, + 0xc4, 0xd2, 0xac, 0xdd, 0x38, 0x26, 0x25, 0x6c, 0x02, 0x64, 0xeb, 0x01, 0x4b, 0x63, 0x62, 0x6b, + 0xe2, 0x19, 0x17, 0x8c, 0xac, 0x48, 0x6d, 0x7a, 0xa3, 0xa2, 0x65, 0x1d, 0x95, 0x4f, 0x00, 0xa9, + 0x20, 0x81, 0x86, 0xbc, 0x8c, 0xd1, 0x44, 0x9c, 0xcb, 0x5b, 0xaa, 0xb8, 0x01, 0xa4, 0x88, 0xa8, + 0x43, 0x35, 0x44, 0x68, 0xf6, 0x53, 0xff, 0x6d, 0x94, 0x30, 0x3a, 0xba, 0xa0, 0xe7, 0x13, 0x46, + 0x00, 0xd7, 0x61, 0x55, 0x27, 0x92, 0x5f, 0xdc, 0x34, 0x25, 0x75, 0x4d, 0xdb, 0xbf, 0x60, 0xa3, + 0x1f, 0xbf, 0x9d, 0xf2, 0x64, 0x1a, 0x92, 0x86, 0xb4, 0xdd, 0x4f, 0x7d, 0xd5, 0xa0, 0x31, 0x4b, + 0x8e, 0x18, 0xf5, 0x58, 0x42, 0x56, 0xf5, 0xe9, 0xd3, 0x20, 0x64, 0x7c, 0x2a, 0x8e, 0xf9, 0x4f, + 0xa4, 0xa9, 0xc5, 0x0c, 0x18, 0xf5, 0xd4, 0xff, 0x8e, 0xac, 0x69, 0x31, 0x33, 0x44, 0x89, 0x21, + 0xda, 0xef, 0x9b, 0x84, 0x29, 0x8b, 0xeb, 0xfa, 0x56, 0xbd, 0x57, 0x1c, 0xdc, 0xfe, 0xd5, 0x84, + 0x8d, 0xc7, 0xc6, 0x03, 0xb7, 0xc0, 0x79, 0x0c, 0xdf, 0x9d, 0x0a, 0x4e, 0x0c, 0xfc, 0x04, 0xfe, + 0xff, 0x58, 0xf4, 0x6b, 0x1e, 0x44, 0xa2, 0x17, 0xc6, 0x93, 0x60, 0x14, 0xc8, 0x56, 0x7c, 0x8c, + 0xf6, 0xfa, 0x4a, 0xd3, 0xac, 0xed, 0x6b, 0x68, 0x2e, 0x7e, 0x14, 0xb2, 0x18, 0x73, 0x64, 0xd7, + 0xf3, 0xe4, 0xf8, 0x13, 0x03, 0x9d, 0xa2, 0xd8, 0x01, 0x0b, 0xf9, 0x25, 0x53, 0x11, 0x73, 0x31, + 0xf2, 0x36, 0xf6, 0xa8, 0xc8, 0x22, 0xd6, 0xa2, 0x91, 0x5d, 0xcf, 0x3b, 0xca, 0xde, 0x1e, 0x15, + 0x2d, 0xed, 0xbd, 0xb8, 0xf9, 0xe0, 0x1a, 0xb7, 0x1f, 0x5c, 0xe3, 0xe6, 0xce, 0x35, 0x6f, 0xef, + 0x5c, 0xf3, 0x9f, 0x3b, 0xd7, 0xfc, 0xed, 0xde, 0x35, 0x7e, 0xbf, 0x77, 0x8d, 0xdb, 0x7b, 0xd7, + 0xf8, 0xfb, 0xde, 0x35, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xee, 0xe3, 0x39, 0x8b, 0xbb, 0x08, + 0x00, 0x00, +} + +func (m *Entry) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *SnapshotMetadata) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Snapshot) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *Message) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *HardState) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +func (m *ConfState) Size() (n int) { + marshal, _ := json.Marshal(m) + return len(marshal) +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRaft = fmt.Errorf("proto: unexpected end of group") +) diff --git a/raft/raftpb/raft.proto b/raft/raftpb/raft.proto new file mode 100644 index 00000000000..1cf6e9bd59b --- /dev/null +++ b/raft/raftpb/raft.proto @@ -0,0 +1,190 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; +option (gogoproto.goproto_unkeyed_all) = false; +option (gogoproto.goproto_unrecognized_all) = false; +option (gogoproto.goproto_sizecache_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; //必须是64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; //必须是64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +// For description of different message types, see: +// https://pkg.go.dev/github.com/ls-2018/etcd_cn/raft#hdr-MessageType +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + // logTerm is generally used for appending Raft logs to followers. For example, + // (type=MsgApp,index=100,logTerm=5) means leader appends entries starting at + // index=101, and the term of entry at index 100 is 5. + // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some + // entries from its leader as it already has an entry with term 5 at index 100. + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; + //ID: 表示节点变更的消息id,这个意义不大 + //Type: ConfChangeAddNode或者ConfChangeRemoveNode + //NodeID: 变更节点的id +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/raft/rawnode.go b/raft/rawnode.go new file mode 100644 index 00000000000..9255ff3ecf4 --- /dev/null +++ b/raft/rawnode.go @@ -0,0 +1,223 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// ErrStepLocalMsg 当尝试步入一个本地raft信息时,会返回 +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prstrack for that localNode. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe RaftNodeInterFace. +// The methods of this struct correspond to the methods of RaftNodeInterFace and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +// NewRawNode 从给定的配置中实例化一个RawNode. +// 参见Bootstrap()来引导一个初始状态;它取代了这个方法以前的'peers'参数(具有相同的行为). +// 然而,建议应用程序不要调用Bootstrap,而是通过设置一个第一索引大于1的存储空间,并将所需的ConfState作为其InitialState来手动引导其状态. +func NewRawNode(config *Config) (*RawNode, error) { + r := newRaft(config) // ✅ + rn := &RawNode{ + raft: r, + } + + rn.prevSoftSt = r.softState() // 节点状态 + rn.prevHardSt = r.hardState() // 持久化存储状态 + return rn, nil +} + +// Tick 触发一次内部时钟逻辑 +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{Type: pb.MsgHup}) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }, + }) +} + +// ProposeConfChange proposes a config change. See (RaftNodeInterFace).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) + if err != nil { + return err + } + return rn.raft.Step(m) +} + +// ApplyConfChange applies a config change to the local localNode. The app must call +// this when it applies a configuration change, except when it decides to reject +// the configuration change, in which case no call must take place. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if pr := rn.raft.prstrack.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// 计算状态变化;返回ready结构体 +func (rn *RawNode) readyWithoutAccept() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) // 计算状态变化 +} + +// 当已经commit的消息提交给上层应用后 被调用 +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if len(rd.ReadStates) != 0 { + // 清除MsgReadIndex消息导致的数据存储 + rn.raft.readStates = nil + } + rn.raft.msgs = nil +} + +// HasReady 检查是否有ready消息未处理 与 Ready.containsUpdates().保持一致 +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.hasPendingSnapshot() { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance 通知RawNode应用程序已经应用并保存了最后一个Ready结果的进度. +func (rn *RawNode) Advance(rd Ready) { + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + rn.raft.advance(rd) +} + +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { + status := getStatus(rn.raft) + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this localNode and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.prstrack.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) +} + +// ReportUnreachable reports the given localNode is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) // ok +} diff --git a/raft/read_index.go b/raft/read_index.go new file mode 100644 index 00000000000..0eefb0cf247 --- /dev/null +++ b/raft/read_index.go @@ -0,0 +1,63 @@ +package raft + +import pb "github.com/ls-2018/etcd_cn/raft/raftpb" + +func releasePendingReadIndexMessages(r *raft) { + if !r.committedEntryInCurrentTerm() { + r.logger.Error("pending MsgReadIndex should be released only after first commit in current term") + return + } + + msgs := r.pendingReadIndexMessages + r.pendingReadIndexMessages = nil + + for _, m := range msgs { + sendMsgReadIndexResponse(r, m) + } +} + +// raft结构体中的readOnly作用是批量处理只读请求,只读请求有两种模式,分别是ReadOnlySafe和ReadOnlyLeaseBased +// ReadOnlySafe是ETCD作者推荐的模式,因为这种模式不受节点之间时钟差异和网络分区的影响 +// 线性一致性读用的就是ReadOnlySafe +func sendMsgReadIndexResponse(r *raft, m pb.Message) { + switch r.readOnly.option { + // 如果需要更多的地方投票,进行全面的广播. + case ReadOnlySafe: + // 该线性读模式,每次 Follower 进行读请求时,需要和 Leader 同步日志提交位点信息,而 Leader需要向过半的 Follower 发起证明自己是 Leader 的轻量的 RPC 请求, + // 相当于一个 Follower 读,至少需要 1 +(n/2)+ 1 次的 RPC 请求. + + // 清空readOnly中指定消息ID及之前的所有记录 + r.readOnly.addRequest(r.raftLog.committed, m) // 记录当前节点的raftLog.committed字段值,即已提交位置 + + // recvAck通知只读结构raft状态机已收到对附加只读请求上下文的心跳信号的确认. + r.readOnly.recvAck(r.id, m.Entries[0].Data) // 本机确认此消息 + // leader 节点向其他节点发起广播 + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + + case ReadOnlyLeaseBased: + if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None { + r.send(resp) + } + } +} + +// responseToReadIndexReq 为' req '构造响应.如果' req '来自对等体本身,则返回一个空值. +func (r *raft) responseToReadIndexReq(req pb.Message, readIndex uint64) pb.Message { + // 通过from来判断该消息是否是follower节点转发到leader中的 + // 如果是客户端直接发到leader节点的消息,将MsgReadIndex消息中的已提交位置和消息id封装成ReadState实例,添加到readStates + // raft 模块也有一个 for-loop 的 goroutine,来读取该数组,并对MsgReadIndex进行响应 + if req.From == None || req.From == r.id { + r.readStates = append(r.readStates, ReadState{ + Index: readIndex, + RequestCtx: req.Entries[0].Data, + }) + return pb.Message{} + } + // 转发自follower + return pb.Message{ + Type: pb.MsgReadIndexResp, + To: req.From, + Index: readIndex, + Entries: req.Entries, + } +} diff --git a/raft/role.go b/raft/role.go new file mode 100644 index 00000000000..216e20391b7 --- /dev/null +++ b/raft/role.go @@ -0,0 +1,611 @@ +package raft + +import ( + "bytes" + "fmt" + + "github.com/ls-2018/etcd_cn/raft/quorum" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" + "github.com/ls-2018/etcd_cn/raft/tracker" +) + +// Step 分流各种消息 +func (r *raft) Step(m pb.Message) error { + // 处理消息的期限,这可能会导致我们成为一名follower. + switch { + case m.Term == 0: + // 本地消息 MsgHup、MsgProp、MsgReadindex + case m.Term > r.Term: // 投票或预投票请求 + // 消息的Term大于节点当前的Term + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + // 如果收到的是投票类消息 + // 当context为campaignTransfer时表示强制要求进行竞选 + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + // 是否在租约期以内 + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // 如果非强制,而且又在租约期以内,就不做任何处理 + // 非强制又在租约期内可以忽略选举消息,见论文的4.2.3,这是为了阻止已经离开集群的节点再次发起投票请求 + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] 忽略消息 %s from %x [logterm: %d, index: %d] at term %d: 租约没有过期 (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + } + switch { + case m.Type == pb.MsgPreVote: + // 在应答一个prevote消息时不对任期term做修改 + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the localNode that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap { + r.becomeFollower(m.Term, m.From) + } else { + r.becomeFollower(m.Term, None) + } + } + + case m.Term < r.Term: + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this localNode has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed localNode will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases, by notifying leader of this localNode's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck localNode with + // fresh election. This can be prevented with Pre-Vote phase. + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: // 没有leader时会触发, 开始选举 + if r.preVote { // PreVote 是否启用PreVote + r.hup(campaignPreElection) + } else { + r.hup(campaignElection) + } + + case pb.MsgVote, pb.MsgPreVote: + // 当前节点在参与投票时,会综合下面几个条件决定是否投票(在Raft协议的介绍中也捉到过). + // 1. 投票情况是已经投过了 + // 2. 没投过并且没有leader + // 3. 预投票并且term大 + + // case leader 转移 ---> raft Timeout -> follower ---> all + // 如果转移m.From会默认填充leader id, ---> r.Vote == m.From =true ; r.Vote == None =false ; m.Type == pb.MsgPreVote=false canVote是False + canVote := r.Vote == m.From || (r.Vote == None && r.lead == None) || (m.Type == pb.MsgPreVote && m.Term > r.Term) + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // 只记录真实的投票. + r.electionElapsed = 0 + r.Vote = m.From // 当前节点的选票投给了谁做我Leader + } + } else { + // 不满足上述投赞同票条件时,当前节点会返回拒绝票(响应消息中的Reject字段会设立成true) + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] 拒绝来自投票请求 %s %x [logterm: %d, index: %d] 当前任期 %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + err := r.step(r, m) // 如果当前节点是Follower状态,raft.step字段指向stepFollower()函数 + if err != nil { + return err + } + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) error + +func stepLeader(r *raft, m pb.Message) error { + // 这些消息不会处理These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: // leader专属 + r.bcastHeartbeat() + return nil + //--------------------其他消息处理---------------------- + case pb.MsgCheckQuorum: // leader专属 + // 将 leader 自己的 RecentActive 状态设置为 true + if pr := r.prstrack.Progress[r.id]; pr != nil { + pr.RecentActive = true + } + if !r.prstrack.QuorumActive() { + // 如果当前 leader 发现其不满足 quorum 的条件,则说明该 leader 有可能处于隔离状态,step down + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + // Mark everyone (but ourselves) as inactive in preparation for the next + // CheckQuorum. + r.prstrack.Visit(func(id uint64, pr *tracker.Progress) { + if id != r.id { + pr.RecentActive = false + } + }) + return nil + case pb.MsgProp: // leader、Candidate、follower专属 + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if r.prstrack.Progress[r.id] == nil { + // 判断当前节点是不是已经被从集群中移除了 + return ErrProposalDropped + } + if r.leadTransferee != None { + // 如果正在进行leader切换,拒绝写入 + r.logger.Debugf("%x [term %d] // 如果正在进行leader切换,拒绝写入 %x ", r.id, r.Term, r.leadTransferee) + return ErrProposalDropped + } + + for i := range m.Entries { // 判断是否有配置变更的日志,有的话做一些特殊处理 + e := &m.Entries[i] + var cc pb.ConfChangeI + if e.Type == pb.EntryConfChange { + var ccc pb.ConfChangeV1 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } else if e.Type == pb.EntryConfChangeV2 { + var ccc pb.ConfChangeV2 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } + if cc != nil { + alreadyPending := r.pendingConfIndex > r.raftLog.applied // 是否已经apply了该配置变更 + alreadyJoint := len(r.prstrack.Config.Voters[1]) > 0 // 判断第二个MajorityConfig:map[uint64]struct{} 有没有数据 + wantsLeaveJoint := len(cc.AsV2().Changes) == 0 // 节点个数 + // 首先切换到过渡形态,我们称之为联合共识; + // 一旦提交了联合共识,系统就会过渡到新的配置.联合共识结合了新旧配置. + var refused string + if alreadyPending { + refused = fmt.Sprintf("在索引%d处可能有未应用的conf变更(应用于%d).", r.pendingConfIndex, r.raftLog.applied) + } else if alreadyJoint && !wantsLeaveJoint { + refused = "必须先从联合配置中过渡出去" + } else if !alreadyJoint && wantsLeaveJoint { + refused = "不处于联合状态;拒绝空洞的改变" + } + // true, true + // false false + if refused != "" { // 忽略配置变更 + // 如果发现当前是在joint consensus过程中,拒绝变更,直接将message type 变成普通的entry. + // 处理完毕后,会等待将该消息分发. + r.logger.Infof("%x 忽略配置变更 %v %s: %s", r.id, cc, r.prstrack.Config, refused) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 + } + } + } + // 将日志追加到raft unstable 中 + if !r.appendEntry(m.Entries...) { // 类似于健康检查的消息,就会走这里 + return ErrProposalDropped + } + // 发送日志给集群其它节点 + r.bcastAppend() + return nil + case pb.MsgReadIndex: + // 表示当前集群只有一个节点,当前节点就是leader + if r.prstrack.IsSingleton() { + // 记录当前的commit index,称为ReadIndex; + resp := r.responseToReadIndexReq(m, r.raftLog.committed) + if resp.To != None { + r.send(resp) + } + return nil + } + + // 当leader在其任期内没有提交任何日志记录时,推迟只读请求. + if !r.committedEntryInCurrentTerm() { // 任期变更时,有数据没有committed + r.pendingReadIndexMessages = append(r.pendingReadIndexMessages, m) + return nil + } + + // 发送消息读取响应 + sendMsgReadIndexResponse(r, m) // case pb.MsgReadIndex: + return nil + } + // 根据消息的From字段获取对应的Progress实例,为后面的消息处理做准备 + pr := r.prstrack.Progress[m.From] + if pr == nil { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return nil + } + switch m.Type { + case pb.MsgAppResp: + // Leader节点在向Follower广播日志后,就一直在等待follower的MsgAppResp消息,收到后还是会进到stepLeader函数. + // 更新对应Progress实例的RecentActive字段,从Leader节点的角度来看,MsgAppResp消息的发送节点还是存活的 + + pr.RecentActive = true + + if m.Reject { // MsgApp 消息被拒绝;如果收到的是reject消息,则根据follower反馈的index重新发送日志 + _ = r.handleAppendEntries // 含有拒绝的逻辑 + r.logger.Debugf("%x 收到 MsgAppResp(rejected, hint: (index %d, term %d)) from %x for index %d", r.id, m.RejectHint, m.LogTerm, m.From, m.Index) + // 发送的是 9 5 + nextProbeIdx := m.RejectHint // 拒绝之处的日志索引 6 + if m.LogTerm > 0 { // 拒绝之处的日志任期 2 + // example 1 + // idx 1 2 3 4 5 6 7 8 9 + // ----------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 + nextProbeIdx = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm) // 下一次直接发送索引为1的消息 🐂 + } + // 通过MsgAppResp消息携带的信息及对应的Progress状态,重新设立其Next + if pr.MaybeDecrTo(m.Index, nextProbeIdx) { // leader是否降低对该节点索引记录 ---- > 降低索引数据 + r.logger.Debugf("%x回滚进度 节点:%x to [%s]", r.id, m.From, pr) + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.sendAppend(m.From) + } + } else { + // 走到这说明 之前发送的MsgApp消息已经被对方的Follower节点接收(Entry记录被成功追加) + oldPaused := pr.IsPaused() + // m.Index: 对应Follower节点收到的raftLog中最后一条Entry记录的索引, + if pr.MaybeUpdate(m.Index) { // 更新pr的进度 + + switch { + case pr.State == tracker.StateProbe: + // 一旦更新了pr状态,就不再进行探测 + pr.BecomeReplicate() + case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: + // 复制完快照 + r.logger.Debugf("%x 从需要的快照中恢复,恢复发送复制信息到 %x [%s]", r.id, m.From, pr) + pr.BecomeProbe() // 再探测一次 + pr.BecomeReplicate() // 正常发送日志 + case pr.State == tracker.StateReplicate: + pr.Inflights.FreeLE(m.Index) + } + // 如果进度有更新,判断并更新commitIndex + // 收到一个Follower节点的MsgAppResp消息之后,除了修改相应的Match和Next,还会尝试更新raftLog.committed,因为有些Entry记录可能在此次复制中被保存到了 + // 半数以上的节点中,raft.maybeCommit()方法在前面已经分析过了 + if r.maybeCommit() { + // committed index has progressed for the term, so it is safe + // to respond to pending read index requests + releasePendingReadIndexMessages(r) + // 向所有节点发送MsgApp消息,注意,此次MsgApp消息的Commit字段与上次MsgApp消息已经不同,raft.bcastAppend()方法前面已经讲过 + + r.bcastAppend() + } else if oldPaused { + // 之前是pause状态,现在可以任性地发消息了 + // 之前Leader节点暂停向该Follower节点发送消息,收到MsgAppResp消息后,在上述代码中已经重立了相应状态,所以可以继续发送MsgApp消息 + // If we were paused before, this localNode may be missing the + // latest commit index, so send it. + r.sendAppend(m.From) + } + // We've updated flow control information above, which may + // allow us to send multiple (size-limited) in-flight messages + // at once (such as when transitioning from probe to + // replicate, or when freeTo() covers multiple messages). If + // we have more entries to send, send as many messages as we + // can (without sending empty messages for the commit index) + // 循环发送所有剩余的日志给follower + for r.maybeSendAppend(m.From, false) { + } + // 是否正在进行leader转移 + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.ProbeSent = false + + // free one slot for the full inflights window to allow progress. + if pr.State == tracker.StateReplicate && pr.Inflights.Full() { + pr.Inflights.FreeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return nil + } + // 判断leader有没有收到大多数节点的确认 + // 也就是ReadIndex算法中,leader节点得到follower的确认,证明自己目前还是Leader + readIndexStates := r.readOnly.recvAck(m.From, m.Context) // 记录了每个节点对 m.Context 的响应 + xxx := r.prstrack.Voters.VoteResult(readIndexStates) + if xxx != quorum.VoteWon { + return nil + } + // 收到了响应节点超过半数,会清空readOnly中指定消息ID及之前的所有记录 + rss := r.readOnly.advance(m) // 响应的ReadIndex + // 返回follower的心跳回执 + for _, rs := range rss { + if resp := r.responseToReadIndexReq(rs.req, rs.index); resp.To != None { + r.send(resp) + } + } + case pb.MsgSnapStatus: + if pr.State != tracker.StateSnapshot { + return nil + } + // TODO(tbg): this code is very similar to the snapshot handling in + // MsgAppResp above. In fact, the code there is more correct than the + // code here and should likely be updated to match (or even better, the + // logic pulled into a newly created Progress state machine handler). + if !m.Reject { + pr.BecomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + // NB: the order here matters or we'll be probing erroneously from + // the snapshot index, but the snapshot never applied. + pr.PendingSnapshot = 0 + pr.BecomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the MsgAppResp from the remote localNode before sending + // out the next MsgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.ProbeSent = true + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() + } + r.logger.Debugf("%x 发送消息到 %x 失败 ,因为不可达[%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + // pr 当前leader对该节点状态的记录 + // client ---> raft --- > leader + // client ---> raft --- > follower --- > leader + if pr.IsLearner { + r.logger.Debugf("%x 是learner.忽视转移领导", r.id) + return nil + } + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] 正在转移leader给%x,忽略对同一个localNode的请求%x", r.id, r.Term, leadTransferee, leadTransferee) + return nil + } + r.abortLeaderTransfer() // 上一个leader转移没完成,又进行下一个 + r.logger.Infof("%x [term %d] 取消先前的领导权移交 %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x 已经是leader", r.id) + return nil + } + r.logger.Infof("%x [term %d] 开始进行leader转移to %x", r.id, r.Term, leadTransferee) + // 转移领导权应该在一个electionTimeout中完成,因此重置r.e tionelapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + // leader转移 follower 发送申请投票消息,但是任期不会增加, context 是CampaignTransfer + r.sendTimeoutNow(leadTransferee) // 发送方,指定为了下任leader + r.logger.Infof("%x 立即发送MsgTimeoutNow到%x,因为%x已经有最新的日志", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) // 发送转移到哪个节点,用于加快该节点的日志进度 + } + } + return nil +} + +// stepCandidate 两个阶段都会调用 StateCandidate and StatePreCandidate; +// 区别在于对投票请求的处理 +func stepCandidate(r *raft, m pb.Message) error { + // 根据当前节点的状态决定其能够处理的选举响应消息的类型 + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: // leader、Candidate、follower专属 + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + case pb.MsgApp: + r.becomeFollower(m.Term, m.From) + r.handleAppendEntries(m) + case pb.MsgHeartbeat: // √ + r.becomeFollower(m.Term, m.From) + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) + r.handleSnapshot(m) + case myVoteRespType: // ✅ + // 投票、预投票 + // 处理收到的选举响应消息,当前示例中处理的是MsgPreVoteResp消息 + gr, rj, res := r.poll(m.From, m.Type, !m.Reject) // 计算当前收到多少投票 + r.logger.Infof("%x 收到了 %d %s 同已投票 %d 拒绝投票", r.id, gr, m.Type, rj) + // 投票数、拒绝数 过半判定 + switch res { + case quorum.VoteWon: // 如果quorum 都选择了投票 + if r.state == StatePreCandidate { + r.campaign(campaignElection) // 预投票发起正式投票 + } else { + r.becomeLeader() // 当前节点切换成为Leader状态, 其中会重置每个节点对应的Next和Match两个索引, + r.bcastAppend() // 向集群中其他节点广播MsgApp消息 + } + case quorum.VoteLost: // 集票失败,转为 follower + r.becomeFollower(r.Term, None) // 注意,此时任期没有改变 + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] 忽略 MsgTimeoutNow 消息from %x", r.id, r.Term, r.state, m.From) + } + return nil +} + +// follower 的功能 +func stepFollower(r *raft, m pb.Message) error { + switch m.Type { + case pb.MsgProp: // leader、Candidate、follower专属 + if r.lead == None { + r.logger.Infof("%x 由于当前没有leader term %d; 拒绝提议", r.id, r.Term) + return ErrProposalDropped + } else if r.disableProposalForwarding { + r.logger.Infof("%x 禁止转发回leader %x at term %d; 拒绝提议", r.id, r.lead, r.Term) + return ErrProposalDropped + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + // 配置信息也会走到这里 + // Leader节点处理完命令后,发送日志和持久化操作都是异步进行的,但是这不代表leader已经收到回复. + // Raft协议要求在返回leader成功的时候,日志一定已经提交了,所以Leader需要等待超过半数的Follower节点处理完日志并反馈,下面先看一下Follower的日志处理. + // 日志消息到达Follower后,也是由EtcdServer.Process()方法来处理,最终会进到Raft模块的stepFollower()函数中. + // 重置心跳计数 + r.electionElapsed = 0 + // 设置Leader + r.lead = m.From + // 处理日志条目 + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; 丢弃leader转移消息", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + r.logger.Infof("%x [term %d] 收到来自 %x(下任leader) 的MsgTimeoutNow,并开始选举获得领导.", r.id, r.Term, m.From) + // 即使r.preVote为真,领导层转移也不会使用pre-vote;我们知道我们不是在从一个分区恢复,所以不需要额外的往返. + r.hup(campaignTransfer) + + case pb.MsgReadIndex: // ✅ + if r.lead == None { + r.logger.Infof("%x 当前任期没有leader %d; 跳过读索引", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: // ✅ + if len(m.Entries) != 1 { + r.logger.Errorf("%x 来自 %x的 MsgReadIndexResp 格式无效, 日志条数: %d", r.id, m.From, len(m.Entries)) + return nil + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } + return nil +} + +// roleUp 是否可以被提升为leader. +func (r *raft) roleUp() bool { + pr := r.prstrack.Progress[r.id] // 是本节点raft的身份 + // 节点不是learner 且 没有正在应用快照 + return pr != nil && !pr.IsLearner && !r.raftLog.hasPendingSnapshot() +} + +// 变成Follower 当前任期,当前leader +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x 成为Follower 在任期: %d", r.id, r.Term) +} + +// 变成竞选者角色 +func (r *raft) becomeCandidate() { + if r.state == StateLeader { + panic("无效的转移 [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id // 当前节点的选票投给了谁做我Leader + r.state = StateCandidate + r.logger.Infof("%x 成为Candidate 在任期: %d", r.id, r.Term) +} + +// 变成预竞选者角色 +func (r *raft) becomePreCandidate() { + if r.state == StateLeader { + panic("无效的转移 [leader -> pre-candidate]") + } + // 变成预竞选者更新step func和state,但绝对不能增加任期和投票 + r.step = stepCandidate + r.prstrack.ResetVotes() // // 清空接收到了哪些节点的投票 + r.tick = r.tickElection + r.lead = None + r.state = StatePreCandidate + r.logger.Infof("%x 成为 pre-candidate在任期 %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + // Followers enter replicate mode when they've been successfully probed + // (perhaps after having received a snapshot as a result). The leader is + // trivially in this state. Note that r.reset() has initialized this + // progress with the last index already. + r.prstrack.Progress[r.id].BecomeReplicate() + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + r.pendingConfIndex = r.raftLog.lastIndex() + + emptyEnt := pb.Entry{Data: nil} + if !r.appendEntry(emptyEnt) { + // This won't happen because we just called reset() above. + r.logger.Panic("empty entry was dropped") + } + // As a special case, don't count the initial empty entry towards the + // uncommitted log quota. This is because we want to preserve the + // behavior of allowing one entry larger than quota if the current + // usage is zero. + r.reduceUncommittedSize([]pb.Entry{emptyEnt}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} diff --git a/raft/tracker/over_inflights.go b/raft/tracker/over_inflights.go new file mode 100644 index 00000000000..8d9c5721d8b --- /dev/null +++ b/raft/tracker/over_inflights.go @@ -0,0 +1,117 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// Inflights 在 Raft 中存储的是已发送给 Follower 的 MsgApp 消息,但没有收到 MsgAppResp 的消息 最大Index 值. +// 简单的说就是 Leader 发送一个消息给 Follower,Leader 在对应的 Follower 状态维护结构(progress)中, +// 将这个消息的 ID 记录在 inFlight 中, 当 Follower 收到消息之后,告知 Leader 收到了这个 ID 的消息,Leader 将从 inFlight 中删除, +// 表示 Follower 已经接收,否则如果 Follower 在指定时间内没有响应,Leader 会根据一定策略进行重发. +// 一批一批的发送 +type Inflights struct { + start int // 记录最旧的那个未被响应的消息,在buffer中的位置 + count int // 已发送,但未响应的消息总数 + size int // buffer的最大长度 + buffer []uint64 // 存储ID值 通过一个具有最大长度(size)的数组([]uint64)构造成一个环形数组. +} + +// NewInflights sets up an Inflights that allows up to 'size' inflight messages. +func NewInflights(size int) *Inflights { + return &Inflights{ + size: size, + } +} + +// Clone 返回一个与相同的Inflights但不共享buffer. +func (in *Inflights) Clone() *Inflights { + ins := *in + ins.buffer = append([]uint64(nil), in.buffer...) + return &ins +} + +// Add 记录待确认的日志索引, inflight每批发送的消息中 最新的日志索引 +func (in *Inflights) Add(inflight uint64) { + if in.Full() { // ok + panic("不能添加到一个已满的inflights") + } + next := in.start + in.count // 是环形数组,下一个要放的位置 + size := in.size + if next >= size { + next -= size + } // 回执 + if next >= len(in.buffer) { // 判断实际存储有没有足够空间 + in.grow() + } + in.buffer[next] = inflight // 放入数据 + in.count++ // 计数++ +} + +// grow 按需grow 首次>=size就不在执行 +func (in *Inflights) grow() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// FreeLE 释放日志索引小于to的日志 +func (in *Inflights) FreeLE(to uint64) { + // to传过来的是索引 + if in.count == 0 || to < in.buffer[in.start] { + // 窗口左侧 + return + } + + idx := in.start + var i int + // 从 start 开始,直到找到最大且小于 to 的元素位置 + for i = 0; i < in.count; i++ { // 当前有多少个条消息 + if to < in.buffer[idx] { // found the first large inflight + break + } + // 判断有没有越界 + size := in.size + if idx++; idx >= size { + idx -= size + } + } + in.count -= i // 释放的日志条数 + in.start = idx // 日志 跳转的位置 + if in.count == 0 { + // 如果没有fly日志了,就将start重置为0 + in.start = 0 + } +} + +// FreeFirstOne 释放第一条日志 +func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } + +// Full OK +func (in *Inflights) Full() bool { + return in.count == in.size +} + +// Count 当前未确认的消息数 +func (in *Inflights) Count() int { return in.count } + +// reset 释放所有未确认的消息 +func (in *Inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/raft/tracker/over_progress.go b/raft/tracker/over_progress.go new file mode 100644 index 00000000000..c998288b55f --- /dev/null +++ b/raft/tracker/over_progress.go @@ -0,0 +1,227 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" +) + +// Progress 在leader看来,Progress代表follower的进度.leader维护所有follower的进度,并根据follower的进度向其发送条目. +// NB(tbg).Progress基本上是一个状态机 +type Progress struct { + Match uint64 // 对应Follower节点当前己经成功复制的Entry记录的索引值. + Next uint64 // 对应Follower节点下一个待复制的Entry记录的索引值 就是还在飞行中或者还在路上的日志数量(Inflights) + // State 对应Follower节点的复制状态 + // 当处于StateProbe状态时,leader在每个心跳间隔内最多发送一条复制消息.它也会探测follower的实际进度. + // 当处于StateReplicate状态时,leader在发送复制消息后,乐观地增加next 索引.这是一个优化后的的状态,用于快速复制日志条目给follower. + // 当处于StateSnapshot状态时,leader应该已经发送了快照,并停止发送任何复制消息. + State StateType + PendingSnapshot uint64 // 表示Leader节点正在向目标节点发送快照数据.快照的索引值[最大的索引] + RecentActive bool // 从当前Leader节点的角度来看,该Progress实例对应的Follower节点是否存活.如果新一轮的选举,那么新的Leader默认为都是不活跃的. + ProbeSent bool // 探测状态时才有用,表示探测消息是否已经发送了,如果发送了就不会再发了,避免不必要的IO. + Inflights *Inflights // 维护着向该follower已发送,但未收到确认的消息索引 [环形队列] + IsLearner bool // 该节点是不是learner +} + +// MaybeUpdate 更新已收到、下次发送的 日志索引, n:上一次发送出去的最大日志索引号 +func (pr *Progress) MaybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n // 对应Follower节点当前己经成功复制的Entry记录的索引值 + updated = true + // 这个函数就是把ProbeSent设置为false,试问为什么在这个条件下才算是确认收到探测包? + // 这就要从探测消息说起了,raft可以把日志消息、心跳消息当做探测消息,此处是把日志消息 + // 当做探测消息的处理逻辑.新的日志肯定会让Match更新,只有收到了比Match更大的回复才 + // 能算是这个节点收到了新日志消息,其他的反馈都可以视为过时消息.比如Match=9,新的日志 + // 索引是10,只有收到了>=10的反馈才能确定节点收到了当做探测消息的日志. + pr.ProbeAcked() + } + // 这会发生在什么时候?Next是Leader认为发送给Peer最大的日志索引了,Peer怎么可能会回复一个 + // 比Next更大的日志索引呢?这个其实是在系统初始化的时候亦或是每轮选举完成后,新的Leader + // 还不知道Leer的接收的最大日志索引,所以此时的Next还是个初识值. + pr.Next = max(pr.Next, n+1) // 对应Follower节点下一个待复制的Entry记录的索引值 + return updated +} + +// OptimisticUpdate 记录下次日志发送的起始位置,n是已发送的最新日志索引 +func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 } + +// MaybeDecrTo follower拒绝了rejected索引的日志, matchHint是应该重新调整的日志索引[leader记录的]; +// leader是否降低对该节点索引记录 9 6 +func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool { + if pr.State == StateReplicate { + if rejected <= pr.Match { + return false + } + // 根据前面对MsgApp消息发送过程的分析,处于ProgressStateReplicate状态时,发送MsgApp + // 消息的同时会直接调用Progress.optimisticUpdate()方法增加Next,这就使得Next可能会 + // 比Match大很多,这里回退Next至Match位置,并在后面重新发送MsgApp消息进行尝试 + + // 在复制状态下Leader会发送多个日志信息给Peer再等待Peer的回复,例如:Match+1,Match+2,Match+3,Match+4, + // 此时如果Match+3丢了,那么Match+4肯定好会被拒绝,此时match应该是Match+2,Next=last+1 + // 应该更合理.但是从peer的角度看,如果收到了Match+2的日志就会给leader一次回复,这个 + // 回复理论上是早于当前这个拒绝消息的,所以当Leader收到Match+4拒绝消息,此时的Match + // 已经更新到Match+2,如果Peer回复的消息也丢包了Match可能也没有更新.所以Match+1 + // 大概率和last相同,少数情况可能last更好,但是用Match+1做可能更保险一点. + + pr.Next = pr.Match + 1 + return true + } + // 源码注释翻译:如果拒绝日志索引不是Next-1,肯定是陈旧消息这是因为非复制状态探测消息一次只 + // 发送一条日志.这句话是什么意思呢,读者需要注意,代码执行到这里说明Progress不是复制状态, + // 应该是探测状态.为了效率考虑,Leader向Peer发送日志消息一次会带多条日志,比如一个日志消息 + // 会带有10条日志.上面Match+1,Match+2,Match+3,Match+4的例子是为了理解方便假设每个 + // 日志消息一条日志.真实的情况是Message[Match,Match+9],Message[Match+10,Match+15], + // 一个日志消息如果带有多条日志,Peer拒绝的是其中一条日志.此时用什么判断拒绝索引日志就在刚刚 + // 发送的探测消息中呢?所以探测消息一次只发送一条日志就能做到了,因为这个日志的索引肯定是Next-1. + + // 出现过时的MsgAppResp消息直接忽略 + // 只有拒绝比当前大,才会重新 一般都是当前记录了A给follower发送了A+1,[leader会将Next设为A+2]被拒绝了 此时 rejected=A+1 + if pr.Next-1 != rejected { + return false + } + + // idx 1 2 3 4 5 6 7 8 9 + // ----------------- + // term (L) 1 3 3 3 5 5 5 5 5 + // term (F) 1 1 1 1 2 2 + pr.Next = max(min(rejected, matchHint+1), 1) // 此时Next就设为2 // 根据Peer的反馈调整Next + pr.ProbeSent = false // Next重置完成,恢复消息发送,并在后面重新发送MsgApp消息 + return true +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +// ProbeAcked 当follower接受了append消息,标志着可以继续向该节点发送消息 +func (pr *Progress) ProbeAcked() { + pr.ProbeSent = false +} + +// IsPaused 返回发往该节点的消息是否被限流 +// 当一个节点拒绝了最近的MsgApps,目前正在等待快照,或者已经达到MaxInflightMsgs限制时,就会这样做. +// 在正常操作中,这是假的.一个被节流的节点将被减少联系的频率,直到它达到能够再次接受稳定的日志条目的状态. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case StateProbe: // 每个心跳间隔内最多发送一条复制消息,默认false + // 探测状态下如果已经发送了探测消息Progress就暂停了,意味着不能再发探测消息了,前一个消息 + // 还没回复呢,如果节点真的不活跃,发再多也没用. + return pr.ProbeSent + case StateReplicate: // 消息复制状态 + return pr.Inflights.Full() // 根据队列是否满,判断 + case StateSnapshot: + // 快照状态Progress就是暂停的,Peer正在复制Leader发送的快照,这个过程是一个相对较大 + // 而且重要的事情,因为所有的日志都是基于某一快照基础上的增量,所以快照不完成其他的都是 + // 徒劳. + return true + default: + panic("未知的状态") + } +} + +// ResetState 重置节点的跟踪状态 +func (pr *Progress) ResetState(state StateType) { + pr.ProbeSent = false // 表示探测消息是否已经发送了,如果发送了就不会再发了,避免不必要的IO. + pr.PendingSnapshot = 0 + pr.State = state + pr.Inflights.reset() +} + +// BecomeProbe 转变为StateProbe.下一步是重置为Match+1,或者,如果更大的话,重置为待定快照的索引. +// 恢复follower状态,以正常发送消息 +func (pr *Progress) BecomeProbe() { + // 变成探测状态,等发出去的消息响应了,再继续发消息 + if pr.State == StateSnapshot { // 当前状态是发送快照 + // 如果原始状态是快照,说明快照已经被Peer接收了,那么Next=pendingSnapshot+1, + // 意思就是从快照索引的下一个索引开始发送. + pendingSnapshot := pr.PendingSnapshot + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { // follower 链接有问题 、网络有问题 + // 上面的逻辑是Peer接收完快照后再探测一次才能继续发日志,而这里的逻辑是Peer从复制状态转 + // 到探测状态,这在Peer拒绝了日志、日志消息丢失的情况会发生,此时Leader不知道从哪里开始, + // 倒不如从Match+1开始,因为Match是节点明确已经收到的. + pr.Next = pr.Match + 1 // match ---> next的数据丢弃, 下次重新发送 + } + pr.ResetState(StateProbe) +} + +func (pr *Progress) BecomeReplicate() { + // 除了复位一下状态就是调整Next,为什么Next也是Match+1?进入复制状态肯定是收到了探测消息的 + // 反馈,此时Match会被更新,那从Match+1也就理所当然了 + pr.ResetState(StateReplicate) + pr.Next = pr.Match + 1 +} + +// BecomeSnapshot 正在发送快照snapshoti 为快照的最新日志索引 +func (pr *Progress) BecomeSnapshot(snapshoti uint64) { + // 除了复位一下状态就是设置快照的索引,此处为什么不需要调整Next?因为这个状态无需在发日志给 + // peer,直到快照完成后才能继续 + pr.ResetState(StateSnapshot) + pr.PendingSnapshot = snapshoti +} + +func (pr *Progress) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + if pr.IsLearner { + fmt.Fprint(&buf, " learner") + } + if pr.IsPaused() { + fmt.Fprint(&buf, " paused") + } + if pr.PendingSnapshot > 0 { + fmt.Fprintf(&buf, " 发送中的快照=%d", pr.PendingSnapshot) + } + if !pr.RecentActive { + fmt.Fprintf(&buf, " 不活跃的") + } + if n := pr.Inflights.Count(); n > 0 { + fmt.Fprintf(&buf, " 未确认的消息=%d", n) + if pr.Inflights.Full() { + fmt.Fprint(&buf, "[full]") + } + } + return buf.String() +} + +type ProgressMap map[uint64]*Progress + +func (m ProgressMap) String() string { + ids := make([]uint64, 0, len(m)) + for k := range m { + ids = append(ids, k) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d: %s\n", id, m[id]) + } + return buf.String() +} diff --git a/raft/tracker/over_state.go b/raft/tracker/over_state.go new file mode 100644 index 00000000000..5ccb6f9c70c --- /dev/null +++ b/raft/tracker/over_state.go @@ -0,0 +1,39 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// StateType follower状态追踪 +type StateType uint64 + +const ( + // StateProbe 一般是系统选举完成后,Leader不知道所有Follower都是什么进度,所以需要发消息探测一下,从 + // Follower的回复消息获取进度.在还没有收到回消息前都还是探测状态.因为不确定Follower是否活跃, + // 所以发送太多的探测消息意义不大,只发送一个探测消息即可. + StateProbe StateType = iota // 探测 + // StateReplicate :当Peer回复探测消息后,消息中有该节点接收的最大日志索引,如果回复的最大索引大于Match, 【可能会出现日志冲突】 + // 以此索引更新Match,Progress就进入了复制状态,开启高速复制模式.复制制状态不同于 + // 探测状态,Leader会发送更多的日志消息来提升IO效率,就是上面提到的异步发送.这里就要引入 + // Inflight概念了,飞行中的日志,意思就是已经发送给Follower还没有被确认接收的日志数据, + StateReplicate // 复制 + StateSnapshot // 快照状态说明Follower正在复制Leader的快照 +) + +var prstmap = [...]string{ + "StateProbe", + "StateReplicate", + "StateSnapshot", +} + +func (st StateType) String() string { return prstmap[uint64(st)] } diff --git a/raft/tracker/tracker.go b/raft/tracker/tracker.go new file mode 100644 index 00000000000..d4e44404b38 --- /dev/null +++ b/raft/tracker/tracker.go @@ -0,0 +1,250 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" + + "github.com/ls-2018/etcd_cn/raft/quorum" + pb "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +type Config struct { + Voters quorum.JointConfig // 投票者,分为两个阶段 【新的配置、 旧的配置】或 【配置、 nil】 + // AutoLeave 如果配置是joint的,并且在可能的情况下,应该由Raft自动进行到传递配置的过渡,则为true. + // 如果为false,则该配置将被连接,直到应用程序手动启动转换. + AutoLeave bool + Learners map[uint64]struct{} // Learners 当前配置中的learner ID + // 当我们在联合共识转换过程中把voter变成learner时,我们不能在进入联合状态时直接增加learner. + // 这是因为这将违反voter和learner的交集是空的这一不变性.例如,假设一个voter被移除,并立即重新添加为learner + // (或者换句话说,它被降级). + // + // 最初,配置将是 + // voters: {1 2 3} + // learners: {} + // + // 而我们想降级3.进入联合配置,我们天真地认为 + // voters: {1 2} & {1 2 3} + // learners: {3} + // + // 但这违反了不变量(3既是投票者又是learner).相反,我们得到 + // voters: {1 2} & {1 2 3} + // learners: {} + // next_learners: {3} + // + // 其中3号现在还是纯粹的投票者,但我们记住了在过渡到最终配置时使其成为learner的意图. + // voters: {1 2} + // learners: {3} + // next_learners: {} + // + // 请注意,在添加一个不属于joint config中投票人的learner时,不使用next_learners. + // 在这种情况下,learners在进入联合配置时被立即添加,以便尽快赶上. + LearnersNext map[uint64]struct{} +} + +func (c Config) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "voters=%s", c.Voters) + if c.Learners != nil { + fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String()) + } + if c.LearnersNext != nil { + fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) + } + if c.AutoLeave { + fmt.Fprintf(&buf, " autoleave") + } + return buf.String() +} + +func (c *Config) Clone() Config { + clone := func(m map[uint64]struct{}) map[uint64]struct{} { + if m == nil { + return nil + } + mm := make(map[uint64]struct{}, len(m)) + for k := range m { + mm[k] = struct{}{} + } + return mm + } + return Config{ + Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])}, + Learners: clone(c.Learners), + LearnersNext: clone(c.LearnersNext), + } +} + +// ProgressTracker 追踪配置以及节点信息. +type ProgressTracker struct { + Config + // leader需要缓存当前所有Follower的日志同步进度 + Progress ProgressMap // nodeID ---> nodeInfoMap + Votes map[uint64]bool // 记录接收到了哪些节点的投票 + MaxInflight int +} + +// MakeProgressTracker 初始化 +func MakeProgressTracker(maxInflight int) ProgressTracker { + p := ProgressTracker{ + MaxInflight: maxInflight, // 最大的处理中的消息数量 + Config: Config{ + Voters: quorum.JointConfig{ + quorum.MajorityConfig{}, // 只初始化了第一个 + nil, // 使用时初始化 + }, + Learners: nil, // 使用时初始化 + LearnersNext: nil, // 使用时初始化 + }, + Votes: map[uint64]bool{}, + Progress: map[uint64]*Progress{}, + } + return p +} + +// ConfState 返回一个代表active配置的ConfState. +func (p *ProgressTracker) ConfState() pb.ConfState { + return pb.ConfState{ + Voters: p.Voters[0].Slice(), + VotersOutgoing: p.Voters[1].Slice(), + Learners: quorum.MajorityConfig(p.Learners).Slice(), + LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(), + AutoLeave: p.AutoLeave, + } +} + +// IsSingleton 集群中只有一个投票成员(领导者). +func (p *ProgressTracker) IsSingleton() bool { + return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0 +} + +// QuorumActive 如果从本地raft状态机的角度来看,该法定人数是活动的,则返回true.否则,它将返回false. +func (p *ProgressTracker) QuorumActive() bool { + votes := map[uint64]bool{} + p.Visit(func(id uint64, pr *Progress) { + if pr.IsLearner { + return + } + votes[id] = pr.RecentActive + }) + + return p.Voters.VoteResult(votes) == quorum.VoteWon +} + +type matchAckIndexer map[uint64]*Progress + +var _ quorum.AckedIndexer = matchAckIndexer(nil) + +// AckedIndex 返回指定ID的Peer接收的最大日志索引,就是Progress.Match. +func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) { + pr, ok := l[id] + if !ok { + return 0, false + } + return quorum.Index(pr.Match), true +} + +// Committed 根据投票成员已确认的 返回已提交的最大日志索引. +func (p *ProgressTracker) Committed() uint64 { + return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress))) +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// Visit 对所有跟踪的进度按稳定的顺序调用所提供的闭包. +func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) { + n := len(p.Progress) + var sl [7]uint64 + var ids []uint64 + if len(sl) >= n { + ids = sl[:n] + } else { + ids = make([]uint64, n) + } + for id := range p.Progress { + n-- + ids[n] = id + } + insertionSort(ids) + for _, id := range ids { + f(id, p.Progress[id]) + } +} + +// VoterNodes 返回一个经过排序的选民 +func (p *ProgressTracker) VoterNodes() []uint64 { + m := p.Voters.IDs() + nodes := make([]uint64, 0, len(m)) + for id := range m { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// LearnerNodes 返回所有的learners +func (p *ProgressTracker) LearnerNodes() []uint64 { + if len(p.Learners) == 0 { + return nil + } + nodes := make([]uint64, 0, len(p.Learners)) + for id := range p.Learners { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// ResetVotes 准备通过recordVote进行新一轮的计票工作. +func (p *ProgressTracker) ResetVotes() { + p.Votes = map[uint64]bool{} // 记录接收到了哪些节点的投票 +} + +// RecordVote id=true, 该节点给本节点投了票; id=false, 该节点没有给本节点投票 +func (p *ProgressTracker) RecordVote(id uint64, v bool) { + _, ok := p.Votes[id] // 记录接收到了哪些节点的投票 + if !ok { + p.Votes[id] = v // 记录接收到了哪些节点的投票 + } +} + +// TallyVotes 返回批准和拒绝的票数,以及是否知道选举结果. +func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) { + for id, pr := range p.Progress { + if pr.IsLearner { + continue + } + v, voted := p.Votes[id] // 记录接收到了哪些节点的投票 + if !voted { // 没有收到结果 + continue + } + if v { + granted++ + } else { + rejected++ + } + } + result := p.Voters.VoteResult(p.Votes) + return granted, rejected, result +} diff --git a/raftexample/Procfile b/raftexample/Procfile new file mode 100644 index 00000000000..6b2f7ccf03c --- /dev/null +++ b/raftexample/Procfile @@ -0,0 +1,4 @@ +# Use goreman to run `go get github.com/mattn/goreman` +raftexample1: ./raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380 +raftexample2: ./raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380 +raftexample3: ./raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380 diff --git a/raftexample/README.md b/raftexample/README.md new file mode 100644 index 00000000000..7250a5aecb2 --- /dev/null +++ b/raftexample/README.md @@ -0,0 +1,131 @@ +# raftexample + +raftexample is an example usage of etcd's [raft library](../../raft). It provides a simple REST API for a key-value +store cluster backed by the [Raft][raft] consensus algorithm. + +[raft]: http://raftconsensus.github.io/ + +## Getting Started + +### Building raftexample + +Clone `etcd` to `/src/go.etcd.io/etcd` + +```sh +export GOPATH= +cd /src/github.com/ls-2018/etcd_cn/contrib/raftexample +go build -o raftexample +``` + +### Running single node raftexample + +First start a single-member cluster of raftexample: + +```sh +raftexample --id 1 --cluster http://127.0.0.1:12379 --port 12380 +``` + +Each raftexample process maintains a single raft instance and a key-value server. The process's list of comma separated +peers (--cluster), its raft ID index into the peer list (--id), and http key-value server port (--port) are passed +through the command line. + +Next, store a value ("hello") to a key ("my-key"): + +``` +curl -L http://127.0.0.1:12380/my-key -XPUT -d hello +``` + +Finally, retrieve the stored key: + +``` +curl -L http://127.0.0.1:12380/my-key +``` + +### Running a local cluster + +First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications. + +The [Procfile script](./Procfile) will set up a local example cluster. Start it with: + +```sh +goreman start +``` + +This will bring up three raftexample instances. + +Now it's possible to write a key-value pair to any member of the cluster and likewise retrieve it from any member. + +### Fault Tolerance + +To test cluster recovery, first start a cluster and write a value "foo": + +```sh +goreman start +curl -L http://127.0.0.1:12380/my-key -XPUT -d foo +``` + +Next, remove a node and replace the value with "bar" to check cluster availability: + +```sh +goreman run stop raftexample2 +curl -L http://127.0.0.1:12380/my-key -XPUT -d bar +curl -L http://127.0.0.1:32380/my-key +``` + +Finally, bring the node back up and verify it recovers with the updated value "bar": + +```sh +goreman run start raftexample2 +curl -L http://127.0.0.1:22380/my-key +``` + +### Dynamic cluster reconfiguration + +ExternNodes can backend added to or removed from a running cluster using requests to the REST API. + +For example, suppose we have a 3-node cluster that was started with the commands: + +```sh +raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380 +raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380 +raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380 +``` + +A fourth node with ID 4 can backend added by issuing a POST: + +```sh +curl -L http://127.0.0.1:12380/4 -XPOST -d http://127.0.0.1:42379 +``` + +Then the new node can backend started as the others were, using the --join option: + +```sh +raftexample --id 4 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379,http://127.0.0.1:42379 --port 42380 --join +``` + +The new node should join the cluster and backend able to service key/value requests. + +We can remove a node using a DELETE request: + +```sh +curl -L http://127.0.0.1:12380/3 -XDELETE +``` + +NodeExtern 3 should shut itself down once the cluster has processed this request. + +## Design + +The raftexample consists of three components: a raft-backed key-value store, a REST API server, and a raft consensus +server based on etcd's raft implementation. + +The raft-backed key-value store is a key-value map that holds all committed key-values. The store bridges communication +between the raft server and the REST server. Key-value updates are issued through the store to the raft server. The +store updates its map once raft reports the updates are committed. + +The REST server exposes the current raft consensus by accessing the raft-backed key-value store. A GET command looks up +a key in the store and returns the value, if any. A key-value PUT command issues an update proposal to the store. + +The raft server participates in consensus with its cluster peers. When the REST server submits a proposal, the raft +server transmits the proposal to its peers. When raft reaches a consensus, the server publishes all committed updates +over a commit channel. For raftexample, this commit channel is consumed by the key-value store. + diff --git a/raftexample/httpapi.go b/raftexample/httpapi.go new file mode 100644 index 00000000000..ddb54f75d25 --- /dev/null +++ b/raftexample/httpapi.go @@ -0,0 +1,120 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "io/ioutil" + "log" + "net/http" + "strconv" + + "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// Handler for a http based key-value store backed by raft +// 基于http的基于raft的键值存储的处理程序 +type httpKVAPI struct { + store *kvstore + confChangeC chan<- raftpb.ConfChangeV1 +} + +func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { + key := r.RequestURI + defer r.Body.Close() + switch { + case r.Method == "PUT": + v, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("无法读取put的数据 (%v)\n", err) + http.Error(w, "失败 on PUT", http.StatusBadRequest) + return + } + h.store.Propose(key, string(v)) // 👌🏻 + // 乐观——不用等待木筏上的ack.值还没有committed,因此后续的GET可以返回旧值 + w.WriteHeader(http.StatusNoContent) + case r.Method == "GET": + if v, ok := h.store.Lookup(key); ok { + w.Write([]byte(v)) + } else { + http.Error(w, "Failed to GET", http.StatusNotFound) + } + case r.Method == "POST": // 新增节点 + url, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("Failed to read on POST (%v)\n", err) + http.Error(w, "Failed on POST", http.StatusBadRequest) + return + } + nodeId, err := strconv.ParseUint(key[1:], 0, 64) + if err != nil { + log.Printf("Failed to convert ID for conf change (%v)\n", err) + http.Error(w, "Failed on POST", http.StatusBadRequest) + return + } + + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeAddNode, + NodeID: nodeId, + Context: string(url), + } + h.confChangeC <- cc // 新增节点 + + // As above, optimistic that raft will apply the conf change + w.WriteHeader(http.StatusNoContent) + case r.Method == "DELETE": // 删除节点 + nodeId, err := strconv.ParseUint(key[1:], 0, 64) + if err != nil { + log.Printf("Failed to convert ID for conf change (%v)\n", err) + http.Error(w, "Failed on DELETE", http.StatusBadRequest) + return + } + + cc := raftpb.ConfChangeV1{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: nodeId, + } + h.confChangeC <- cc // 删除节点 + + // As above, optimistic that raft will apply the conf change + w.WriteHeader(http.StatusNoContent) + default: + w.Header().Set("Allow", "PUT") + w.Header().Add("Allow", "GET") + w.Header().Add("Allow", "POST") + w.Header().Add("Allow", "DELETE") + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +// serveHttpKVAPI 启动一个带有GET/PUT API的键值etcd并监听. +func serveHttpKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChangeV1, errorC <-chan error) { + srv := http.Server{ + Addr: ":" + strconv.Itoa(port), + Handler: &httpKVAPI{ + store: kv, + confChangeC: confChangeC, // 主动触发的配置变更 + }, + } + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Fatal(err) + } + }() + + // exit when raft goes down + if err, ok := <-errorC; ok { + log.Fatal(err) + } +} diff --git a/raftexample/kvstore.go b/raftexample/kvstore.go new file mode 100644 index 00000000000..39e3db3268c --- /dev/null +++ b/raftexample/kvstore.go @@ -0,0 +1,137 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "encoding/json" + "log" + "sync" + + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +// 基于raft的k,v存储 +type kvstore struct { + proposeC chan<- string // 提议通知channel + mu sync.RWMutex + kvStore map[string]string // 当前提交的键值对 + snapshotter *snap.Snapshotter // 快照管理器 +} + +type kv struct { + Key string `json:"key"` + Val string `json:"val"` +} + +func newKVStore(snapshotter *snap.Snapshotter, proposeC chan<- string, commitC <-chan *commit, errorC <-chan error) *kvstore { + s := &kvstore{ + proposeC: proposeC, + kvStore: make(map[string]string), + snapshotter: snapshotter, + } + snapshot, err := s.loadSnapshot() // 加载最新的快照 + if err != nil { + log.Panic(err) + } + if snapshot != nil { + log.Printf("开始加载快照 at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index) + if err := s.recoverFromSnapshot(snapshot.Data); err != nil { + log.Panic(err) + } + } + // 从raft读取提交到kvStore映射,直到错误 + go s.readCommits(commitC, errorC) + return s +} + +func (s *kvstore) Lookup(key string) (string, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + v, ok := s.kvStore[key] + return v, ok +} + +// Propose 处理客户端提交的的数据 put k,v +func (s *kvstore) Propose(k string, v string) { + marshal, _ := json.Marshal(kv{k, v}) + s.proposeC <- string(marshal) +} + +// 客户端读取raft已经committed的数据 +func (s *kvstore) readCommits(commitC <-chan *commit, errorC <-chan error) { + for commit := range commitC { + if commit == nil { + // 信号加载快照 + snapshot, err := s.loadSnapshot() + if err != nil { + log.Panic(err) + } + if snapshot != nil { + log.Printf("正在加载快照at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index) + if err := s.recoverFromSnapshot(snapshot.Data); err != nil { + log.Panic(err) + } + } + continue + } + + for _, data := range commit.data { + var dataKv kv + err := json.Unmarshal([]byte(data), &dataKv) + if err != nil { + log.Fatalf("raftexample:不能解析数据(%v)", err) + } + + s.mu.Lock() + s.kvStore[dataKv.Key] = dataKv.Val + s.mu.Unlock() + } + close(commit.applyDoneC) + } + if err, ok := <-errorC; ok { + log.Fatal(err) + } +} + +// 生成一个快照 +func (s *kvstore) genSnapshot() ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + return json.Marshal(s.kvStore) +} + +func (s *kvstore) loadSnapshot() (*raftpb.Snapshot, error) { + snapshot, err := s.snapshotter.Load() + if err == snap.ErrNoSnapshot { + return nil, nil + } + if err != nil { + return nil, err + } + return snapshot, nil +} + +// 恢复快照数据,启动时 +func (s *kvstore) recoverFromSnapshot(snapshot []byte) error { + var store map[string]string + if err := json.Unmarshal(snapshot, &store); err != nil { + return err + } + s.mu.Lock() + defer s.mu.Unlock() + s.kvStore = store + return nil +} diff --git a/raftexample/listener.go b/raftexample/listener.go new file mode 100644 index 00000000000..9ad08fb0111 --- /dev/null +++ b/raftexample/listener.go @@ -0,0 +1,58 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "errors" + "net" + "time" +) + +// stoppableListener 在接受的连接上设置TCP keep-alive 超时,并等待stopc消息 +type stoppableListener struct { + *net.TCPListener + stopc <-chan struct{} +} + +func newStoppableListener(addr string, stopc <-chan struct{}) (*stoppableListener, error) { + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + return &stoppableListener{ln.(*net.TCPListener), stopc}, nil +} + +func (ln stoppableListener) Accept() (c net.Conn, err error) { + connc := make(chan *net.TCPConn, 1) + errc := make(chan error, 1) + go func() { + tc, err := ln.AcceptTCP() + if err != nil { + errc <- err + return + } + connc <- tc + }() + select { + case <-ln.stopc: + return nil, errors.New("etcd stopped") + case err := <-errc: + return nil, err + case tc := <-connc: + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil + } +} diff --git a/raftexample/main.go b/raftexample/main.go new file mode 100644 index 00000000000..695a20496d7 --- /dev/null +++ b/raftexample/main.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "strings" + + "github.com/ls-2018/etcd_cn/raft/raftpb" +) + +func main() { + cluster := flag.String("cluster", "http://127.0.0.1:9021", "comma separated cluster peers") + id := flag.Int("id", 1, "node ID") + kvport := flag.Int("port", 9121, "http 服务器 key-value etcd port") + join := flag.Bool("join", false, "join an existing cluster") + flag.Parse() + + proposeC := make(chan string) // 提议通道, ---->放入raft状态机,返回错误 + defer close(proposeC) + triggerConfChangeC := make(chan raftpb.ConfChangeV1) // 配置通道 + defer close(triggerConfChangeC) + + // Raft为来自HTTP API的propose提供了commitC + var kvs *kvstore + getSnapshot := func() ([]byte, error) { return kvs.genSnapshot() } + commitC, errorC, snapshotterReady := newRaftNode(*id, strings.Split(*cluster, ","), *join, getSnapshot, proposeC, triggerConfChangeC) + + kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC) + + // kv http处理程序将propose 更新到raft上 + serveHttpKVAPI(kvs, *kvport, triggerConfChangeC, errorC) +} + +/* + ---> proposeC ---> +app <--- commitC <--- raft + <--- errorC <--- +*/ diff --git a/raftexample/raft.go b/raftexample/raft.go new file mode 100644 index 00000000000..330e619df7b --- /dev/null +++ b/raftexample/raft.go @@ -0,0 +1,505 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/ls-2018/etcd_cn/raft" + + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/raft/raftpb" + + "go.uber.org/zap" +) + +// 一个批次一个批次交给上层应用来处理 +type commit struct { + data []string + applyDoneC chan<- struct{} +} + +// raftNode.transport.Handler() --> +// 基于raft的k,v存储 +type raftNode struct { + proposeC <-chan string // 它用来接收 client 发送的写请求提案消息; + triggerConfChangeC <-chan raftpb.ConfChangeV1 // 它用来接收集群配置变化消息; + commitC chan<- *commit // 它用来输出 Raft 共识模块已提交的日志条目消息. (k,v) + errorC chan<- error // 返回raft回话的错误 + id int // 本机ID + peers []string // 每个节点的通信地址 + join bool // 是否加入一个存在的集群 + waldir string // wal存储路径 + snapdir string // 存储快照的路径 + getSnapshot func() ([]byte, error) + confState raftpb.ConfState + snapshotIndex uint64 + appliedIndex uint64 + node raft.RaftNodeInterFace // 用于提交/错误通道的raft + raftStorage *raft.MemoryStorage + wal *wal.WAL // wal 日志管理 + snapshotter *snap.Snapshotter // 快照管理者 + snapshotterReady chan *snap.Snapshotter // 快照管理者就绪的信号 + snapCount uint64 + transport *rafthttp.Transport // 负责 raft 节点之间的网络通信服务 + stopc chan struct{} // signals proposal channel closed + httpstopc chan struct{} // signals http etcd to shutdown + httpdonec chan struct{} // signals http etcd shutdown complete + logger *zap.Logger +} + +func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error { + walSnap := walpb.Snapshot{ + Index: snap.Metadata.Index, + Term: snap.Metadata.Term, + ConfState: &snap.Metadata.ConfState, + } + // 在把快照写到wal之前保存快照文件.这使得快照文件有可能成为孤儿,但可以防止一个WAL快照条目没有相应的快照文件. + if err := rc.snapshotter.SaveSnap(snap); err != nil { + return err + } + if err := rc.wal.SaveSnapshot(walSnap); err != nil { // 写一条快照记录【 索引、任期、配置】 + return err + } + return rc.wal.ReleaseLockTo(snap.Metadata.Index) +} + +var snapshotCatchUpEntriesN uint64 = 10000 + +// 判断是否应该创建快照,每次apply就会调用 +func (rc *raftNode) maybeTriggerSnapshot(applyDoneC <-chan struct{}) { + if rc.appliedIndex-rc.snapshotIndex <= rc.snapCount { + // 日志条数 阈值 + return + } + if applyDoneC != nil { + select { + case <-applyDoneC: // 等待所有提交的条目被应用 + case <-rc.stopc: // (或etcd被关闭) + return + } + } + log.Printf("开始打快照 [applied index: %d | last snapshot index: %d]", rc.appliedIndex, rc.snapshotIndex) + data, err := rc.getSnapshot() + if err != nil { + log.Panic(err) + } + snap, err := rc.raftStorage.CreateSnapshot(rc.appliedIndex, &rc.confState, data) + if err != nil { + panic(err) + } + if err := rc.saveSnap(snap); err != nil { + panic(err) + } + + compactIndex := uint64(1) + if rc.appliedIndex > snapshotCatchUpEntriesN { + compactIndex = rc.appliedIndex - snapshotCatchUpEntriesN + } + if err := rc.raftStorage.Compact(compactIndex); err != nil { + panic(err) + } + + log.Printf("压缩 [0%d]的日志索引 ", compactIndex) + rc.snapshotIndex = rc.appliedIndex +} + +// 会单独启动一个后台 goroutine来负责上层模块 传递给 etcd-raft 模块的数据, +func (rc *raftNode) serveChannels() { + // 这里是获取快照数据和快照的元数据 + snap, err := rc.raftStorage.Snapshot() + if err != nil { + panic(err) + } + rc.confState = snap.Metadata.ConfState + rc.snapshotIndex = snap.Metadata.Index + rc.appliedIndex = snap.Metadata.Index + + defer rc.wal.Close() + + // 创建一个每隔 lOOms 触发一次的定时器,那么在逻辑上,lOOms 即是 etcd-raft 组件的最小时间单位 + // 该定时器每触发一次,则逻辑时钟推进一次 + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + // 单独启 动一个 goroutine 负责将 proposeC、 confChangeC 远远上接收到的数据传递给 etcd-raft 组件进行处理 + go func() { + confChangeCount := uint64(0) + for rc.proposeC != nil && rc.triggerConfChangeC != nil { // 必须设置 + select { + case prop, ok := <-rc.proposeC: + if !ok { // 关闭了 + rc.proposeC = nil // 发生异常将proposeC置空 + } else { + rc.node.Propose(context.TODO(), []byte(prop)) // 阻塞直到消息被处理 + } + case cc, ok := <-rc.triggerConfChangeC: // 收到上层应用通过 confChangeC远远传递过来的数据 + if !ok { + rc.triggerConfChangeC = nil // 如果发生异常将confChangeC置空 + } else { + confChangeCount++ + cc.ID = confChangeCount + rc.node.ProposeConfChange(context.TODO(), cc) + } + } + } + // 关闭 stopc 通道,触发 rafeNode.stop() 方法的调用 + close(rc.stopc) + }() + + // 处理 etcd-raft 模块返回给上层模块的数据及其他相关的操作 + for { + select { + case <-ticker.C: + // 上述 ticker 定时器触发一次 + rc.node.Tick() + case rd := <-rc.node.Ready(): // demo + // 将当前 etcd raft 组件的状态信息,以及待持久化的 Entry 记录先记录到 WAL 日志文件中, + // 即使之后宕机,这些信息也可以在节点下次启动时,通过前面回放 WAL 日志的方式进行恢复 + rc.wal.Save(rd.HardState, rd.Entries) + // 检测到 etcd-raft 组件生成了新的快照数据 + if !raft.IsEmptySnap(rd.Snapshot) { + rc.saveSnap(rd.Snapshot) // 将新的快照数据写入快照文件中 + rc.raftStorage.ApplySnapshot(rd.Snapshot) // 从持久化的内存存储中恢复出快照 + rc.publishSnapshot(rd.Snapshot) + } + // TODO, 以下两行不懂 + rc.raftStorage.Append(rd.Entries) // 将待持久化的 Entry 记录追加到 raftStorage 中完成持久化 + rc.transport.Send(rd.Messages) // 将待发送的消息发送到指定节点 + // 将已提交、待应用的 Entry 记录应用到上层应用的状态机中,并返回等待处理完成的channel + applyDoneC, ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries)) + if !ok { + rc.stop() + return + } + // 随着节点的运行, WAL 日志量和 raftLog.storage 中的 Entry 记录会不断增加 + // 所以节点每处理 10000 条(默认值) Entry 记录,就会触发一次创建快照的过程, + // 同时 WAL 会释放一些日志文件的句柄,raftLog.storage 也会压缩其保存的 Entry 记录 + rc.maybeTriggerSnapshot(applyDoneC) + // 上层应用处理完该 Ready 实例,通知 etcd-raft 纽件准备返回下一个 Ready 实例 + rc.node.Advance() + + case err := <-rc.transport.ErrorC: + rc.writeError(err) + return + + case <-rc.stopc: + rc.stop() + return + } + } +} + +// OK +func (rc *raftNode) serveRaft() { + url, err := url.Parse(rc.peers[rc.id-1]) + if err != nil { + log.Fatalf("raftexample: 剖析URL失败 (%v)", err) + } + + ln, err := newStoppableListener(url.Host, rc.httpstopc) // etcd 的通信节点 + if err != nil { + log.Fatalf("raftexample:监听rafthttp失败 (%v)", err) + } + + err = (&http.Server{Handler: rc.transport.Handler()}).Serve(ln) + select { + case <-rc.httpstopc: + default: + log.Fatalf("raftexample: 启动失败rafthttp (%v)", err) + } + close(rc.httpdonec) +} + +func (rc *raftNode) Process(ctx context.Context, m raftpb.Message) error { + return rc.node.Step(ctx, m) +} + +func (rc *raftNode) IsIDRemoved(id uint64) bool { return false } +func (rc *raftNode) ReportUnreachable(id uint64) {} +func (rc *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) {} + +// 主要完成了raftNode的初始化 +// 使用上层模块传入的配置信息来创建raftNode实例,同时创建commitC 通道和errorC通道返回给上层模块使用 +// 上层的应用通过这几个channel就能和raftNode进行交互 +func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), + proposeC <-chan string, + triggerConfChangeC <-chan raftpb.ConfChangeV1, +) (<-chan *commit, <-chan error, <-chan *snap.Snapshotter) { + // channel,主要传输Entry记录 + // raftNode会将etcd-raft模块返回的待apply Entry封装在 Ready实例中然后 写入commitC通道, + // 另一方面,kvstore会从commitC通道中读取这些待应用的 Entry 记录井保存其中的键值对信息. + commitC := make(chan *commit) + errorC := make(chan error) + + rc := &raftNode{ + proposeC: proposeC, + triggerConfChangeC: triggerConfChangeC, // 主动触发的 + commitC: commitC, + errorC: errorC, + id: id, + peers: peers, + join: join, + // 初始化存放 WAL 日志和 Snapshot 文件的的目录 + waldir: fmt.Sprintf("./raftexample/db/raftexample-%d", id), + snapdir: fmt.Sprintf("./raftexample/db/raftexample-%d-snap", id), + getSnapshot: getSnapshot, + snapCount: 10000, + stopc: make(chan struct{}), + httpstopc: make(chan struct{}), + httpdonec: make(chan struct{}), + logger: zap.NewExample(), + snapshotterReady: make(chan *snap.Snapshotter, 1), + // 其余结构在WAL重放后填充 + } + // 启动一个goroutine,完成剩余的初始化工作 + go rc.startRaft() + return commitC, errorC, rc.snapshotterReady +} + +// stop closes http, closes all channels, and stops raft. +func (rc *raftNode) stop() { + rc.stopHTTP() + close(rc.commitC) + close(rc.errorC) + rc.node.Stop() +} + +func (rc *raftNode) stopHTTP() { + rc.transport.Stop() + close(rc.httpstopc) + <-rc.httpdonec +} + +func (rc *raftNode) loadSnapshot() *raftpb.Snapshot { + if wal.Exist(rc.waldir) { + walSnaps, err := wal.ValidSnapshotEntries(rc.logger, rc.waldir) // wal 记录过的快照信息 + if err != nil { + log.Fatalf("raftexample: list快照失败 (%v)", err) + } + snapshot, err := rc.snapshotter.LoadNewestAvailable(walSnaps) // 获取任期、索引一致的快照 + if err != nil && err != snap.ErrNoSnapshot { + log.Fatalf("raftexample: error loading snapshot (%v)", err) + } + return snapshot + } + return &raftpb.Snapshot{} +} + +// 接收到了快照 +func (rc *raftNode) publishSnapshot(snapshotToSave raftpb.Snapshot) { + if raft.IsEmptySnap(snapshotToSave) { + return + } + + log.Printf("publishing snapshot at index %d", rc.snapshotIndex) + defer log.Printf("finished publishing snapshot at index %d", rc.snapshotIndex) + + if snapshotToSave.Metadata.Index <= rc.appliedIndex { + log.Fatalf("snapshot index [%d] should > progress.appliedIndex [%d]", snapshotToSave.Metadata.Index, rc.appliedIndex) + } + rc.commitC <- nil // 通知应用加载快照 + rc.confState = snapshotToSave.Metadata.ConfState + rc.snapshotIndex = snapshotToSave.Metadata.Index + rc.appliedIndex = snapshotToSave.Metadata.Index +} + +// replayWAL 重放wal日志 +func (rc *raftNode) replayWAL() *wal.WAL { + log.Printf("重放 WAL of member %d", rc.id) + snapshot := rc.loadSnapshot() // 获取最新的快照 + w := rc.openWAL(snapshot) + _, st, ents, err := w.ReadAll() // 自快照以后的wal日志记录 + if err != nil { + log.Fatalf("raftexample: 读取wal失败 (%v)", err) + } + rc.raftStorage = raft.NewMemoryStorage() + if snapshot != nil { + rc.raftStorage.ApplySnapshot(*snapshot) // 从持久化的内存存储中恢复出快照 + } + rc.raftStorage.SetHardState(st) // 从持久化的内存存储中恢复出状态 + rc.raftStorage.Append(ents) // 从持久化的内存存储中恢复出日志 + return w +} + +func (rc *raftNode) startRaft() { + if !fileutil.Exist(rc.snapdir) { + if err := os.Mkdir(rc.snapdir, 0o750); err != nil { + log.Fatalf("raftexample: 无法创建快照目录 (%v)", err) + } + } + rc.snapshotter = snap.New(zap.NewExample(), rc.snapdir) + // 创建 WAL 实例,然后加载快照并回放 WAL 日志 + oldwal := wal.Exist(rc.waldir) + // raftNode.replayWAL() 方法首先会读取快照数据, + // 在快照数据中记录了该快照包含的最后一条Entry记录的 Term 值 和 索引值. + // 然后根据 Term 值 和 索引值确定读取 WAL 日志文件的位置, 并进行日志记录的读取. + rc.wal = rc.replayWAL() // 读取wal日志文件 + rc.snapshotterReady <- rc.snapshotter + + rpeers := make([]raft.Peer, len(rc.peers)) + for i := range rpeers { + rpeers[i] = raft.Peer{ID: uint64(i + 1)} + } + // 创建 raft.Config 实例 + c := &raft.Config{ + ID: uint64(rc.id), + ElectionTick: 10, // 返回选举权检查对应多少次tick触发次数 + HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数 + Storage: rc.raftStorage, + MaxSizePerMsg: 1024 * 1024, + MaxInflightMsgs: 256, + MaxUncommittedEntriesSize: 1 << 30, + } + // 初始化底层的 etcd-raft 模块,这里会根据 WAL 日志的回放情况, + // 判断当前节点是首次启动还是重新启动 + if oldwal || rc.join { + rc.node = raft.RestartNode(c) // 有节点的信息 + } else { + rc.node = raft.StartNode(c, rpeers) + } + // 创建 Transport 实例并启动,他负责 raft 节点之间的网络通信服务 + rc.transport = &rafthttp.Transport{ + Logger: rc.logger, + ID: types.ID(rc.id), + ClusterID: 0x1000, // 集群标识符 + Raft: rc, + ServerStats: stats.NewServerStats("", ""), + LeaderStats: stats.NewLeaderStats(zap.NewExample(), strconv.Itoa(rc.id)), + ErrorC: make(chan error), + } + // 启动网络服务相关组件 + rc.transport.Start() + // 建立与集群中其他各个节点的连接 + for i := range rc.peers { + if i+1 != rc.id { + rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]}) + } + } + // 启动一个goroutine,其中会监听当前节点与集群中其他节点之间的网络连接 + go rc.serveRaft() + // 启动后台 goroutine 处理上层应用与底层 etcd-raft 模块的交互 + go rc.serveChannels() +} + +// 向上层应用返回err +func (rc *raftNode) writeError(err error) { + rc.stopHTTP() + close(rc.commitC) + rc.errorC <- err + close(rc.errorC) + rc.node.Stop() +} + +// openWAL 返回一个准备读取的WAL. +func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL { + if !wal.Exist(rc.waldir) { + if err := os.Mkdir(rc.waldir, 0o750); err != nil { + log.Fatalf("raftexample: 创建wal目录失败 (%v)", err) + } + w, err := wal.Create(zap.NewExample(), rc.waldir, nil) + if err != nil { + log.Fatalf("raftexample: create wal error (%v)", err) + } + w.Close() + } + + walsnap := walpb.Snapshot{} + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + log.Printf("加载 WAL at term %d and index %d", walsnap.Term, walsnap.Index) + w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap) + if err != nil { + log.Fatalf("raftexample:加载wal失败(%v)", err) + } + + return w +} + +// 将raft已经commit的消息,过滤 +func (rc *raftNode) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) { + if len(ents) == 0 { + return ents + } + firstIdx := ents[0].Index + if firstIdx > rc.appliedIndex+1 { + log.Fatalf("第一条提交的日志索引[%d] 应该小于等于 <= progress.appliedIndex[%d]+1", firstIdx, rc.appliedIndex) + } + if rc.appliedIndex-firstIdx+1 < uint64(len(ents)) { + nents = ents[rc.appliedIndex-firstIdx+1:] + } + return nents +} + +// publishEntries 将已提交的log序列发往commit通道,并返回应用是否已经apply完的通道 +func (rc *raftNode) publishEntries(ents []raftpb.Entry) (<-chan struct{}, bool) { + if len(ents) == 0 { + return nil, true + } + + data := make([]string, 0, len(ents)) + for i := range ents { + switch ents[i].Type { + case raftpb.EntryNormal: + if len(ents[i].Data) == 0 { + break + } + s := string(ents[i].Data) + data = append(data, s) + case raftpb.EntryConfChange: + var cc raftpb.ConfChangeV1 + cc.Unmarshal(ents[i].Data) + rc.confState = *rc.node.ApplyConfChange(cc) // 变更后的 + switch cc.Type { + case raftpb.ConfChangeAddNode: + if len(cc.Context) > 0 { + rc.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)}) + } + case raftpb.ConfChangeRemoveNode: + if cc.NodeID == uint64(rc.id) { + log.Println("我已经被移出集群了!关闭.") + return nil, false + } + rc.transport.RemovePeer(types.ID(cc.NodeID)) + } + } + } + var applyDoneC chan struct{} + if len(data) > 0 { + applyDoneC = make(chan struct{}, 1) + select { + case rc.commitC <- &commit{data, applyDoneC}: + case <-rc.stopc: + return nil, false + } + } + // 提交后,更新appliedIndex + rc.appliedIndex = ents[len(ents)-1].Index + return applyDoneC, true +} diff --git a/raftexample/req.py b/raftexample/req.py new file mode 100644 index 00000000000..d4276485a57 --- /dev/null +++ b/raftexample/req.py @@ -0,0 +1,7 @@ +import json + +import requests + +for i in range(100000): + print(requests.put('http://127.0.0.1:9121/a%s' % i, data=json.dumps({"a": i})).text) + print(i) diff --git a/read.md b/read.md new file mode 100644 index 00000000000..ee465002c67 --- /dev/null +++ b/read.md @@ -0,0 +1,36 @@ +``` +在一个任期内,一个Raft 节点最多只能为一个候选人投票,按照先到先得的原则,投给最早来拉选票的候选人(注意:下文的"安全性"针对投票添加了一个额外的限制) +``` + +#### append + +1) 客户端向Leader 发送写请求. +2) Leader 将写请求解析成操作指令追加到本地日志文件中. +3) Leader 为每个Follower 广播AppendEntries RPC . +4) Follower 通过一致性检查,选择从哪个位置开始追加Leader 的日志条目. +5) 一旦日志项commit成功, Leader 就将该日志条目对应的指令应用(apply) 到本地状态机,并向客户端返回操作结果. +6) Leader后续通过AppendEntries RPC 将已经成功(在大多数节点上)提交的日志项告知Follower . +7) Follower 收到提交的日志项之后,将其应用至本地状态机. + +``` Goland +type Storage interface { + InitialState() (pb.HardState, pb.ConfState, error) + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + Term(i uint64) (uint64, error) + LastIndex() (uint64, error) // 返回最后一条数据的索引 + FirstIndex() (uint64, error) // 返回第一条数据的索引 + Snapshot() (pb.Snapshot, error) // 反回最近的快照数据 +} +var _ Storage = &MemoryStorage{} + + +type raftLog struct { + storage Storage // stable + unstable unstable + committed uint64 + applied uint64 + logger Logger + maxNextEntsSize uint64 +} + +``` diff --git a/req.py b/req.py new file mode 100644 index 00000000000..bafd0f3018f --- /dev/null +++ b/req.py @@ -0,0 +1,5 @@ +import os + +for i in range(600): + print(i) + os.system(" ~/.gopath/bin/etcdctl put a%s b%s" % (i, i)) diff --git a/scripts/README b/scripts/README deleted file mode 100644 index 2139feb7c0d..00000000000 --- a/scripts/README +++ /dev/null @@ -1 +0,0 @@ -scripts for etcd development \ No newline at end of file diff --git a/scripts/backup.sh b/scripts/backup.sh new file mode 100644 index 00000000000..d59bebc61a5 --- /dev/null +++ b/scripts/backup.sh @@ -0,0 +1,6 @@ + +date_time=`date +%Y%m%d` +rm -rf ./etcd_backup/ +etcdutl backup --data-dir ../default.etcd --backup-dir ./etcd_backup/"${date_time}" + +find ./etcd_backup/ -ctime +7 -exec rm -r {} \; diff --git a/scripts/build-binary.sh b/scripts/build-binary.sh deleted file mode 100755 index 6cd70e1273a..00000000000 --- a/scripts/build-binary.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env bash - -set -e - -source ./scripts/test_lib.sh - -VER=$1 -REPOSITORY="${REPOSITORY:-git@github.com:etcd-io/etcd.git}" - -if [ -z "$1" ]; then - echo "Usage: ${0} VERSION" >> /dev/stderr - exit 255 -fi - -set -u - -function setup_env { - local ver=${1} - local proj=${2} - - if [ ! -d "${proj}" ]; then - run git clone "${REPOSITORY}" - fi - - pushd "${proj}" >/dev/null - run git fetch --all - run git checkout "${ver}" - popd >/dev/null -} - - -function package { - local target=${1} - local srcdir="${2}/bin" - - local ccdir="${srcdir}/${GOOS}_${GOARCH}" - if [ -d "${ccdir}" ]; then - srcdir="${ccdir}" - fi - local ext="" - if [ "${GOOS}" == "windows" ]; then - ext=".exe" - fi - for bin in etcd etcdctl etcdutl; do - cp "${srcdir}/${bin}" "${target}/${bin}${ext}" - done - - cp etcd/README.md "${target}"/README.md - cp etcd/etcdctl/README.md "${target}"/README-etcdctl.md - cp etcd/etcdctl/READMEv2.md "${target}"/READMEv2-etcdctl.md - cp etcd/etcdutl/README.md "${target}"/README-etcdutl.md - - cp -R etcd/Documentation "${target}"/Documentation -} - -function main { - local proj="etcd" - - mkdir -p release - cd release - setup_env "${VER}" "${proj}" - - tarcmd=tar - if [[ $(go env GOOS) == "darwin" ]]; then - echo "Please use linux machine for release builds." - exit 1 - fi - - for os in darwin windows linux; do - export GOOS=${os} - TARGET_ARCHS=("amd64") - - if [ ${GOOS} == "linux" ]; then - TARGET_ARCHS+=("arm64") - TARGET_ARCHS+=("ppc64le") - TARGET_ARCHS+=("s390x") - fi - - if [ ${GOOS} == "darwin" ]; then - TARGET_ARCHS+=("arm64") - fi - - for TARGET_ARCH in "${TARGET_ARCHS[@]}"; do - export GOARCH=${TARGET_ARCH} - - pushd etcd >/dev/null - GO_LDFLAGS="-s -w" ./scripts/build.sh - popd >/dev/null - - TARGET="etcd-${VER}-${GOOS}-${GOARCH}" - mkdir "${TARGET}" - package "${TARGET}" "${proj}" - - if [ ${GOOS} == "linux" ]; then - ${tarcmd} cfz "${TARGET}.tar.gz" "${TARGET}" - echo "Wrote release/${TARGET}.tar.gz" - else - zip -qr "${TARGET}.zip" "${TARGET}" - echo "Wrote release/${TARGET}.zip" - fi - done - done -} - -main diff --git a/scripts/build-docker.sh b/scripts/build-docker.sh deleted file mode 100755 index a255dc2cb15..00000000000 --- a/scripts/build-docker.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -set -e - -if [ "$#" -ne 1 ]; then - echo "Usage: $0 VERSION" >&2 - exit 1 -fi - -ARCH=$(go env GOARCH) -VERSION="${1}-${ARCH}" -DOCKERFILE="Dockerfile-release.${ARCH}" - -if [ -z "${BINARYDIR}" ]; then - RELEASE="etcd-${1}"-$(go env GOOS)-$(go env GOARCH) - BINARYDIR="${RELEASE}" - TARFILE="${RELEASE}.tar.gz" - TARURL="https://github.com/etcd-io/etcd/releases/download/${1}/${TARFILE}" - if ! curl -f -L -o "${TARFILE}" "${TARURL}" ; then - echo "Failed to download ${TARURL}." - exit 1 - fi - tar -zvxf "${TARFILE}" -fi - -BINARYDIR=${BINARYDIR:-.} -BUILDDIR=${BUILDDIR:-.} - -IMAGEDIR=${BUILDDIR}/image-docker - -mkdir -p "${IMAGEDIR}"/var/etcd -mkdir -p "${IMAGEDIR}"/var/lib/etcd -cp "${BINARYDIR}"/etcd "${BINARYDIR}"/etcdctl "${BINARYDIR}"/etcdutl "${IMAGEDIR}" - -cat ./"${DOCKERFILE}" > "${IMAGEDIR}"/Dockerfile - -if [ -z "$TAG" ]; then - docker build -t "gcr.io/etcd-development/etcd:${VERSION}" "${IMAGEDIR}" - docker build -t "quay.io/coreos/etcd:${VERSION}" "${IMAGEDIR}" -else - docker build -t "${TAG}:${VERSION}" "${IMAGEDIR}" -fi diff --git a/scripts/build-release.sh b/scripts/build-release.sh deleted file mode 100755 index e90fb3e138f..00000000000 --- a/scripts/build-release.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# -# Build all release binaries and images to directory ./release. -# Run from repository root. -# -set -e - -source ./scripts/test_lib.sh - -VERSION=$1 -if [ -z "${VERSION}" ]; then - echo "Usage: ${0} VERSION" >> /dev/stderr - exit 255 -fi - -if ! command -v docker >/dev/null; then - echo "cannot find docker" - exit 1 -fi - -ETCD_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. - -pushd "${ETCD_ROOT}" >/dev/null - log_callout "Building etcd binary..." - ./scripts/build-binary.sh "${VERSION}" - - for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do - log_callout "Building ${TARGET_ARCH} docker image..." - GOOS=linux GOARCH=${TARGET_ARCH} BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} BUILDDIR=release ./scripts/build-docker.sh "${VERSION}" - done -popd >/dev/null diff --git a/scripts/build.sh b/scripts/build.sh deleted file mode 100755 index 4a588e4de3f..00000000000 --- a/scripts/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -# This scripts build the etcd binaries -# To build the tools, run `build_tools.sh` - -source ./scripts/test_lib.sh -source ./scripts/build_lib.sh - -# only build when called directly, not sourced -if echo "$0" | grep -E "build(.sh)?$" >/dev/null; then - run_build etcd_build -fi diff --git a/scripts/build_lib.sh b/scripts/build_lib.sh deleted file mode 100755 index 9c297a191e5..00000000000 --- a/scripts/build_lib.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash - -source ./scripts/test_lib.sh - -GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound") -VERSION_SYMBOL="${ROOT_MODULE}/api/v3/version.GitSHA" - -# Set GO_LDFLAGS="-s" for building without symbols for debugging. -# shellcheck disable=SC2206 -GO_LDFLAGS=(${GO_LDFLAGS} "-X=${VERSION_SYMBOL}=${GIT_SHA}") -GO_BUILD_ENV=("CGO_ENABLED=0" "GO_BUILD_FLAGS=${GO_BUILD_FLAGS}" "GOOS=${GOOS}" "GOARCH=${GOARCH}") - -etcd_build() { - out="bin" - if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi - - run rm -f "${out}/etcd" - ( - cd ./server - # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK - # shellcheck disable=SC2086 - run env "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -trimpath \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcd" . || return 2 - ) || return 2 - - run rm -f "${out}/etcdutl" - # shellcheck disable=SC2086 - ( - cd ./etcdutl - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -trimpath \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcdutl" . || return 2 - ) || return 2 - - run rm -f "${out}/etcdctl" - # shellcheck disable=SC2086 - ( - cd ./etcdctl - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -trimpath \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcdctl" . || return 2 - ) || return 2 - # Verify whether symbol we overwrote exists - # For cross-compiling we cannot run: ${out}/etcd --version | grep -q "Git SHA: ${GIT_SHA}" - - # We need symbols to do this check: - if [[ "${GO_LDFLAGS[*]}" != *"-s"* ]]; then - go tool nm "${out}/etcd" | grep "${VERSION_SYMBOL}" > /dev/null - if [[ "${PIPESTATUS[*]}" != "0 0" ]]; then - log_error "FAIL: Symbol ${VERSION_SYMBOL} not found in binary: ${out}/etcd" - return 2 - fi - fi -} - -tools_build() { - out="bin" - if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi - tools_path="tools/benchmark - tools/etcd-dump-db - tools/etcd-dump-logs - tools/local-tester/bridge" - for tool in ${tools_path} - do - echo "Building" "'${tool}'"... - run rm -f "${out}/${tool}" - # shellcheck disable=SC2086 - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} \ - -trimpath \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="${out}/${tool}" "./${tool}" || return 2 - done - tests_build "${@}" -} - -tests_build() { - out=${BINDIR:-./bin} - out=$(readlink -f "$out") - out="${out}/functional/cmd" - mkdir -p "${out}" - BINDIR="${out}" run ./tests/functional/build.sh || return 2 -} - -run_build() { - echo Running "$1" - if $1; then - log_success "SUCCESS: $1 (GOARCH=${GOARCH})" - else - log_error "FAIL: $1 (GOARCH=${GOARCH})" - exit 2 - fi -} diff --git a/scripts/build_tools.sh b/scripts/build_tools.sh deleted file mode 100755 index 48fa9d6faeb..00000000000 --- a/scripts/build_tools.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -source ./scripts/test_lib.sh -source ./scripts/build_lib.sh - -run_build tools_build diff --git a/scripts/codecov_upload.sh b/scripts/codecov_upload.sh deleted file mode 100755 index 8ee1a65afb8..00000000000 --- a/scripts/codecov_upload.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -# Script used to collect and upload test coverage (mostly by travis). -# Usage ./test_coverage_upload.sh [log_file] - -set -o pipefail - -LOG_FILE=${1:-test-coverage.log} - -# We collect the coverage -COVERDIR=covdir PASSES='build build_cov cov' ./scripts/test.sh 2>&1 | tee "${LOG_FILE}" -test_success="$?" - -# We try to upload whatever we have: -bash <(curl -s https://codecov.io/bash) -f ./covdir/all.coverprofile -cF all || exit 2 - -# Expose the original status of the test coverage execution. -exit ${test_success} diff --git a/scripts/etcd_version_annotations.txt b/scripts/etcd_version_annotations.txt deleted file mode 100644 index 3e5d23f94d8..00000000000 --- a/scripts/etcd_version_annotations.txt +++ /dev/null @@ -1,465 +0,0 @@ -authpb.Permission: "" -authpb.Permission.READ: "" -authpb.Permission.READWRITE: "" -authpb.Permission.Type: "" -authpb.Permission.WRITE: "" -authpb.Permission.key: "" -authpb.Permission.permType: "" -authpb.Permission.range_end: "" -authpb.Role: "" -authpb.Role.keyPermission: "" -authpb.Role.name: "" -authpb.User: "" -authpb.User.name: "" -authpb.User.options: "" -authpb.User.password: "" -authpb.User.roles: "" -authpb.UserAddOptions: "" -authpb.UserAddOptions.no_password: "" -etcdserverpb.AlarmMember: "3.0" -etcdserverpb.AlarmMember.alarm: "" -etcdserverpb.AlarmMember.memberID: "" -etcdserverpb.AlarmRequest: "3.0" -etcdserverpb.AlarmRequest.ACTIVATE: "" -etcdserverpb.AlarmRequest.AlarmAction: "3.0" -etcdserverpb.AlarmRequest.DEACTIVATE: "" -etcdserverpb.AlarmRequest.GET: "" -etcdserverpb.AlarmRequest.action: "" -etcdserverpb.AlarmRequest.alarm: "" -etcdserverpb.AlarmRequest.memberID: "" -etcdserverpb.AlarmResponse: "3.0" -etcdserverpb.AlarmResponse.alarms: "" -etcdserverpb.AlarmResponse.header: "" -etcdserverpb.AlarmType: "3.0" -etcdserverpb.AuthDisableRequest: "3.0" -etcdserverpb.AuthDisableResponse: "3.0" -etcdserverpb.AuthDisableResponse.header: "" -etcdserverpb.AuthEnableRequest: "3.0" -etcdserverpb.AuthEnableResponse: "3.0" -etcdserverpb.AuthEnableResponse.header: "" -etcdserverpb.AuthRoleAddRequest: "3.0" -etcdserverpb.AuthRoleAddRequest.name: "" -etcdserverpb.AuthRoleAddResponse: "3.0" -etcdserverpb.AuthRoleAddResponse.header: "" -etcdserverpb.AuthRoleDeleteRequest: "3.0" -etcdserverpb.AuthRoleDeleteRequest.role: "" -etcdserverpb.AuthRoleDeleteResponse: "3.0" -etcdserverpb.AuthRoleDeleteResponse.header: "" -etcdserverpb.AuthRoleGetRequest: "3.0" -etcdserverpb.AuthRoleGetRequest.role: "" -etcdserverpb.AuthRoleGetResponse: "" -etcdserverpb.AuthRoleGetResponse.header: "3.0" -etcdserverpb.AuthRoleGetResponse.perm: "3.0" -etcdserverpb.AuthRoleGrantPermissionRequest: "3.0" -etcdserverpb.AuthRoleGrantPermissionRequest.name: "" -etcdserverpb.AuthRoleGrantPermissionRequest.perm: "" -etcdserverpb.AuthRoleGrantPermissionResponse: "3.0" -etcdserverpb.AuthRoleGrantPermissionResponse.header: "" -etcdserverpb.AuthRoleListRequest: "3.0" -etcdserverpb.AuthRoleListResponse: "3.0" -etcdserverpb.AuthRoleListResponse.header: "" -etcdserverpb.AuthRoleListResponse.roles: "" -etcdserverpb.AuthRoleRevokePermissionRequest: "3.0" -etcdserverpb.AuthRoleRevokePermissionRequest.key: "" -etcdserverpb.AuthRoleRevokePermissionRequest.range_end: "" -etcdserverpb.AuthRoleRevokePermissionRequest.role: "" -etcdserverpb.AuthRoleRevokePermissionResponse: "3.0" -etcdserverpb.AuthRoleRevokePermissionResponse.header: "" -etcdserverpb.AuthStatusRequest: "3.5" -etcdserverpb.AuthStatusResponse: "3.5" -etcdserverpb.AuthStatusResponse.authRevision: "" -etcdserverpb.AuthStatusResponse.enabled: "" -etcdserverpb.AuthStatusResponse.header: "" -etcdserverpb.AuthUserAddRequest: "3.0" -etcdserverpb.AuthUserAddRequest.hashedPassword: "3.5" -etcdserverpb.AuthUserAddRequest.name: "" -etcdserverpb.AuthUserAddRequest.options: "3.4" -etcdserverpb.AuthUserAddRequest.password: "" -etcdserverpb.AuthUserAddResponse: "3.0" -etcdserverpb.AuthUserAddResponse.header: "" -etcdserverpb.AuthUserChangePasswordRequest: "3.0" -etcdserverpb.AuthUserChangePasswordRequest.hashedPassword: "3.5" -etcdserverpb.AuthUserChangePasswordRequest.name: "" -etcdserverpb.AuthUserChangePasswordRequest.password: "" -etcdserverpb.AuthUserChangePasswordResponse: "3.0" -etcdserverpb.AuthUserChangePasswordResponse.header: "" -etcdserverpb.AuthUserDeleteRequest: "3.0" -etcdserverpb.AuthUserDeleteRequest.name: "" -etcdserverpb.AuthUserDeleteResponse: "3.0" -etcdserverpb.AuthUserDeleteResponse.header: "" -etcdserverpb.AuthUserGetRequest: "3.0" -etcdserverpb.AuthUserGetRequest.name: "" -etcdserverpb.AuthUserGetResponse: "3.0" -etcdserverpb.AuthUserGetResponse.header: "" -etcdserverpb.AuthUserGetResponse.roles: "" -etcdserverpb.AuthUserGrantRoleRequest: "3.0" -etcdserverpb.AuthUserGrantRoleRequest.role: "" -etcdserverpb.AuthUserGrantRoleRequest.user: "" -etcdserverpb.AuthUserGrantRoleResponse: "3.0" -etcdserverpb.AuthUserGrantRoleResponse.header: "" -etcdserverpb.AuthUserListRequest: "3.0" -etcdserverpb.AuthUserListResponse: "3.0" -etcdserverpb.AuthUserListResponse.header: "" -etcdserverpb.AuthUserListResponse.users: "" -etcdserverpb.AuthUserRevokeRoleRequest: "3.0" -etcdserverpb.AuthUserRevokeRoleRequest.name: "" -etcdserverpb.AuthUserRevokeRoleRequest.role: "" -etcdserverpb.AuthUserRevokeRoleResponse: "3.0" -etcdserverpb.AuthUserRevokeRoleResponse.header: "" -etcdserverpb.AuthenticateRequest: "3.0" -etcdserverpb.AuthenticateRequest.name: "" -etcdserverpb.AuthenticateRequest.password: "" -etcdserverpb.AuthenticateResponse: "3.0" -etcdserverpb.AuthenticateResponse.header: "" -etcdserverpb.AuthenticateResponse.token: "" -etcdserverpb.CORRUPT: "3.3" -etcdserverpb.CompactionRequest: "3.0" -etcdserverpb.CompactionRequest.physical: "" -etcdserverpb.CompactionRequest.revision: "" -etcdserverpb.CompactionResponse: "3.0" -etcdserverpb.CompactionResponse.header: "" -etcdserverpb.Compare: "3.0" -etcdserverpb.Compare.CREATE: "" -etcdserverpb.Compare.CompareResult: "3.0" -etcdserverpb.Compare.CompareTarget: "3.0" -etcdserverpb.Compare.EQUAL: "" -etcdserverpb.Compare.GREATER: "" -etcdserverpb.Compare.LEASE: "3.3" -etcdserverpb.Compare.LESS: "" -etcdserverpb.Compare.MOD: "" -etcdserverpb.Compare.NOT_EQUAL: "3.1" -etcdserverpb.Compare.VALUE: "" -etcdserverpb.Compare.VERSION: "" -etcdserverpb.Compare.create_revision: "" -etcdserverpb.Compare.key: "" -etcdserverpb.Compare.lease: "3.3" -etcdserverpb.Compare.mod_revision: "" -etcdserverpb.Compare.range_end: "3.3" -etcdserverpb.Compare.result: "" -etcdserverpb.Compare.target: "" -etcdserverpb.Compare.value: "" -etcdserverpb.Compare.version: "" -etcdserverpb.DefragmentRequest: "3.0" -etcdserverpb.DefragmentResponse: "3.0" -etcdserverpb.DefragmentResponse.header: "" -etcdserverpb.DeleteRangeRequest: "3.0" -etcdserverpb.DeleteRangeRequest.key: "" -etcdserverpb.DeleteRangeRequest.prev_kv: "3.1" -etcdserverpb.DeleteRangeRequest.range_end: "" -etcdserverpb.DeleteRangeResponse: "3.0" -etcdserverpb.DeleteRangeResponse.deleted: "" -etcdserverpb.DeleteRangeResponse.header: "" -etcdserverpb.DeleteRangeResponse.prev_kvs: "3.1" -etcdserverpb.DowngradeRequest: "3.5" -etcdserverpb.DowngradeRequest.CANCEL: "" -etcdserverpb.DowngradeRequest.DowngradeAction: "3.5" -etcdserverpb.DowngradeRequest.ENABLE: "" -etcdserverpb.DowngradeRequest.VALIDATE: "" -etcdserverpb.DowngradeRequest.action: "" -etcdserverpb.DowngradeRequest.version: "" -etcdserverpb.DowngradeResponse: "3.5" -etcdserverpb.DowngradeResponse.header: "" -etcdserverpb.DowngradeResponse.version: "" -etcdserverpb.EmptyResponse: "" -etcdserverpb.HashKVRequest: "3.3" -etcdserverpb.HashKVRequest.revision: "" -etcdserverpb.HashKVResponse: "3.3" -etcdserverpb.HashKVResponse.compact_revision: "" -etcdserverpb.HashKVResponse.hash: "" -etcdserverpb.HashKVResponse.hash_revision: "3.6" -etcdserverpb.HashKVResponse.header: "" -etcdserverpb.HashRequest: "3.0" -etcdserverpb.HashResponse: "3.0" -etcdserverpb.HashResponse.hash: "" -etcdserverpb.HashResponse.header: "" -etcdserverpb.InternalAuthenticateRequest: "3.0" -etcdserverpb.InternalAuthenticateRequest.name: "" -etcdserverpb.InternalAuthenticateRequest.password: "" -etcdserverpb.InternalAuthenticateRequest.simple_token: "" -etcdserverpb.InternalRaftRequest: "3.0" -etcdserverpb.InternalRaftRequest.ID: "" -etcdserverpb.InternalRaftRequest.alarm: "" -etcdserverpb.InternalRaftRequest.auth_disable: "" -etcdserverpb.InternalRaftRequest.auth_enable: "" -etcdserverpb.InternalRaftRequest.auth_role_add: "" -etcdserverpb.InternalRaftRequest.auth_role_delete: "" -etcdserverpb.InternalRaftRequest.auth_role_get: "" -etcdserverpb.InternalRaftRequest.auth_role_grant_permission: "" -etcdserverpb.InternalRaftRequest.auth_role_list: "" -etcdserverpb.InternalRaftRequest.auth_role_revoke_permission: "" -etcdserverpb.InternalRaftRequest.auth_status: "3.5" -etcdserverpb.InternalRaftRequest.auth_user_add: "" -etcdserverpb.InternalRaftRequest.auth_user_change_password: "" -etcdserverpb.InternalRaftRequest.auth_user_delete: "" -etcdserverpb.InternalRaftRequest.auth_user_get: "" -etcdserverpb.InternalRaftRequest.auth_user_grant_role: "" -etcdserverpb.InternalRaftRequest.auth_user_list: "" -etcdserverpb.InternalRaftRequest.auth_user_revoke_role: "" -etcdserverpb.InternalRaftRequest.authenticate: "" -etcdserverpb.InternalRaftRequest.cluster_member_attr_set: "3.5" -etcdserverpb.InternalRaftRequest.cluster_version_set: "3.5" -etcdserverpb.InternalRaftRequest.compaction: "" -etcdserverpb.InternalRaftRequest.delete_range: "" -etcdserverpb.InternalRaftRequest.downgrade_info_set: "3.5" -etcdserverpb.InternalRaftRequest.header: "" -etcdserverpb.InternalRaftRequest.lease_checkpoint: "3.4" -etcdserverpb.InternalRaftRequest.lease_grant: "" -etcdserverpb.InternalRaftRequest.lease_revoke: "" -etcdserverpb.InternalRaftRequest.put: "" -etcdserverpb.InternalRaftRequest.range: "" -etcdserverpb.InternalRaftRequest.txn: "" -etcdserverpb.InternalRaftRequest.v2: "" -etcdserverpb.LeaseCheckpoint: "3.4" -etcdserverpb.LeaseCheckpoint.ID: "" -etcdserverpb.LeaseCheckpoint.remaining_TTL: "" -etcdserverpb.LeaseCheckpointRequest: "3.4" -etcdserverpb.LeaseCheckpointRequest.checkpoints: "" -etcdserverpb.LeaseCheckpointResponse: "3.4" -etcdserverpb.LeaseCheckpointResponse.header: "" -etcdserverpb.LeaseGrantRequest: "3.0" -etcdserverpb.LeaseGrantRequest.ID: "" -etcdserverpb.LeaseGrantRequest.TTL: "" -etcdserverpb.LeaseGrantResponse: "3.0" -etcdserverpb.LeaseGrantResponse.ID: "" -etcdserverpb.LeaseGrantResponse.TTL: "" -etcdserverpb.LeaseGrantResponse.error: "" -etcdserverpb.LeaseGrantResponse.header: "" -etcdserverpb.LeaseKeepAliveRequest: "3.0" -etcdserverpb.LeaseKeepAliveRequest.ID: "" -etcdserverpb.LeaseKeepAliveResponse: "3.0" -etcdserverpb.LeaseKeepAliveResponse.ID: "" -etcdserverpb.LeaseKeepAliveResponse.TTL: "" -etcdserverpb.LeaseKeepAliveResponse.header: "" -etcdserverpb.LeaseLeasesRequest: "3.3" -etcdserverpb.LeaseLeasesResponse: "3.3" -etcdserverpb.LeaseLeasesResponse.header: "" -etcdserverpb.LeaseLeasesResponse.leases: "" -etcdserverpb.LeaseRevokeRequest: "3.0" -etcdserverpb.LeaseRevokeRequest.ID: "" -etcdserverpb.LeaseRevokeResponse: "3.0" -etcdserverpb.LeaseRevokeResponse.header: "" -etcdserverpb.LeaseStatus: "3.3" -etcdserverpb.LeaseStatus.ID: "" -etcdserverpb.LeaseTimeToLiveRequest: "3.1" -etcdserverpb.LeaseTimeToLiveRequest.ID: "" -etcdserverpb.LeaseTimeToLiveRequest.keys: "" -etcdserverpb.LeaseTimeToLiveResponse: "3.1" -etcdserverpb.LeaseTimeToLiveResponse.ID: "" -etcdserverpb.LeaseTimeToLiveResponse.TTL: "" -etcdserverpb.LeaseTimeToLiveResponse.grantedTTL: "" -etcdserverpb.LeaseTimeToLiveResponse.header: "" -etcdserverpb.LeaseTimeToLiveResponse.keys: "" -etcdserverpb.Member: "3.0" -etcdserverpb.Member.ID: "" -etcdserverpb.Member.clientURLs: "" -etcdserverpb.Member.isLearner: "3.4" -etcdserverpb.Member.name: "" -etcdserverpb.Member.peerURLs: "" -etcdserverpb.MemberAddRequest: "3.0" -etcdserverpb.MemberAddRequest.isLearner: "3.4" -etcdserverpb.MemberAddRequest.peerURLs: "" -etcdserverpb.MemberAddResponse: "3.0" -etcdserverpb.MemberAddResponse.header: "" -etcdserverpb.MemberAddResponse.member: "" -etcdserverpb.MemberAddResponse.members: "" -etcdserverpb.MemberListRequest: "3.0" -etcdserverpb.MemberListRequest.linearizable: "3.5" -etcdserverpb.MemberListResponse: "3.0" -etcdserverpb.MemberListResponse.header: "" -etcdserverpb.MemberListResponse.members: "" -etcdserverpb.MemberPromoteRequest: "3.4" -etcdserverpb.MemberPromoteRequest.ID: "" -etcdserverpb.MemberPromoteResponse: "3.4" -etcdserverpb.MemberPromoteResponse.header: "" -etcdserverpb.MemberPromoteResponse.members: "" -etcdserverpb.MemberRemoveRequest: "3.0" -etcdserverpb.MemberRemoveRequest.ID: "" -etcdserverpb.MemberRemoveResponse: "3.0" -etcdserverpb.MemberRemoveResponse.header: "" -etcdserverpb.MemberRemoveResponse.members: "" -etcdserverpb.MemberUpdateRequest: "3.0" -etcdserverpb.MemberUpdateRequest.ID: "" -etcdserverpb.MemberUpdateRequest.peerURLs: "" -etcdserverpb.MemberUpdateResponse: "3.0" -etcdserverpb.MemberUpdateResponse.header: "" -etcdserverpb.MemberUpdateResponse.members: "3.1" -etcdserverpb.Metadata: "" -etcdserverpb.Metadata.ClusterID: "" -etcdserverpb.Metadata.NodeID: "" -etcdserverpb.MoveLeaderRequest: "3.3" -etcdserverpb.MoveLeaderRequest.targetID: "" -etcdserverpb.MoveLeaderResponse: "3.3" -etcdserverpb.MoveLeaderResponse.header: "" -etcdserverpb.NONE: "" -etcdserverpb.NOSPACE: "" -etcdserverpb.PutRequest: "3.0" -etcdserverpb.PutRequest.ignore_lease: "3.2" -etcdserverpb.PutRequest.ignore_value: "3.2" -etcdserverpb.PutRequest.key: "" -etcdserverpb.PutRequest.lease: "" -etcdserverpb.PutRequest.prev_kv: "3.1" -etcdserverpb.PutRequest.value: "" -etcdserverpb.PutResponse: "3.0" -etcdserverpb.PutResponse.header: "" -etcdserverpb.PutResponse.prev_kv: "3.1" -etcdserverpb.RangeRequest: "3.0" -etcdserverpb.RangeRequest.ASCEND: "" -etcdserverpb.RangeRequest.CREATE: "" -etcdserverpb.RangeRequest.DESCEND: "" -etcdserverpb.RangeRequest.KEY: "" -etcdserverpb.RangeRequest.MOD: "" -etcdserverpb.RangeRequest.NONE: "" -etcdserverpb.RangeRequest.SortOrder: "3.0" -etcdserverpb.RangeRequest.SortTarget: "3.0" -etcdserverpb.RangeRequest.VALUE: "" -etcdserverpb.RangeRequest.VERSION: "" -etcdserverpb.RangeRequest.count_only: "" -etcdserverpb.RangeRequest.key: "" -etcdserverpb.RangeRequest.keys_only: "" -etcdserverpb.RangeRequest.limit: "" -etcdserverpb.RangeRequest.max_create_revision: "3.1" -etcdserverpb.RangeRequest.max_mod_revision: "3.1" -etcdserverpb.RangeRequest.min_create_revision: "3.1" -etcdserverpb.RangeRequest.min_mod_revision: "3.1" -etcdserverpb.RangeRequest.range_end: "" -etcdserverpb.RangeRequest.revision: "" -etcdserverpb.RangeRequest.serializable: "" -etcdserverpb.RangeRequest.sort_order: "" -etcdserverpb.RangeRequest.sort_target: "" -etcdserverpb.RangeResponse: "3.0" -etcdserverpb.RangeResponse.count: "" -etcdserverpb.RangeResponse.header: "" -etcdserverpb.RangeResponse.kvs: "" -etcdserverpb.RangeResponse.more: "" -etcdserverpb.Request: "" -etcdserverpb.Request.Dir: "" -etcdserverpb.Request.Expiration: "" -etcdserverpb.Request.ID: "" -etcdserverpb.Request.Method: "" -etcdserverpb.Request.Path: "" -etcdserverpb.Request.PrevExist: "" -etcdserverpb.Request.PrevIndex: "" -etcdserverpb.Request.PrevValue: "" -etcdserverpb.Request.Quorum: "" -etcdserverpb.Request.Recursive: "" -etcdserverpb.Request.Refresh: "" -etcdserverpb.Request.Since: "" -etcdserverpb.Request.Sorted: "" -etcdserverpb.Request.Stream: "" -etcdserverpb.Request.Time: "" -etcdserverpb.Request.Val: "" -etcdserverpb.Request.Wait: "" -etcdserverpb.RequestHeader: "3.0" -etcdserverpb.RequestHeader.ID: "" -etcdserverpb.RequestHeader.auth_revision: "3.1" -etcdserverpb.RequestHeader.username: "" -etcdserverpb.RequestOp: "3.0" -etcdserverpb.RequestOp.request_delete_range: "" -etcdserverpb.RequestOp.request_put: "" -etcdserverpb.RequestOp.request_range: "" -etcdserverpb.RequestOp.request_txn: "3.3" -etcdserverpb.ResponseHeader: "3.0" -etcdserverpb.ResponseHeader.cluster_id: "" -etcdserverpb.ResponseHeader.member_id: "" -etcdserverpb.ResponseHeader.raft_term: "" -etcdserverpb.ResponseHeader.revision: "" -etcdserverpb.ResponseOp: "3.0" -etcdserverpb.ResponseOp.response_delete_range: "" -etcdserverpb.ResponseOp.response_put: "" -etcdserverpb.ResponseOp.response_range: "" -etcdserverpb.ResponseOp.response_txn: "3.3" -etcdserverpb.SnapshotRequest: "3.3" -etcdserverpb.SnapshotResponse: "3.3" -etcdserverpb.SnapshotResponse.blob: "" -etcdserverpb.SnapshotResponse.header: "" -etcdserverpb.SnapshotResponse.remaining_bytes: "" -etcdserverpb.SnapshotResponse.version: "3.6" -etcdserverpb.StatusRequest: "3.0" -etcdserverpb.StatusResponse: "3.0" -etcdserverpb.StatusResponse.dbSize: "" -etcdserverpb.StatusResponse.dbSizeInUse: "3.4" -etcdserverpb.StatusResponse.errors: "3.4" -etcdserverpb.StatusResponse.header: "" -etcdserverpb.StatusResponse.isLearner: "3.4" -etcdserverpb.StatusResponse.leader: "" -etcdserverpb.StatusResponse.raftAppliedIndex: "3.4" -etcdserverpb.StatusResponse.raftIndex: "" -etcdserverpb.StatusResponse.raftTerm: "" -etcdserverpb.StatusResponse.storageVersion: "3.6" -etcdserverpb.StatusResponse.version: "" -etcdserverpb.TxnRequest: "3.0" -etcdserverpb.TxnRequest.compare: "" -etcdserverpb.TxnRequest.failure: "" -etcdserverpb.TxnRequest.success: "" -etcdserverpb.TxnResponse: "3.0" -etcdserverpb.TxnResponse.header: "" -etcdserverpb.TxnResponse.responses: "" -etcdserverpb.TxnResponse.succeeded: "" -etcdserverpb.WatchCancelRequest: "3.1" -etcdserverpb.WatchCancelRequest.watch_id: "3.1" -etcdserverpb.WatchCreateRequest: "3.0" -etcdserverpb.WatchCreateRequest.FilterType: "3.1" -etcdserverpb.WatchCreateRequest.NODELETE: "" -etcdserverpb.WatchCreateRequest.NOPUT: "" -etcdserverpb.WatchCreateRequest.filters: "3.1" -etcdserverpb.WatchCreateRequest.fragment: "3.4" -etcdserverpb.WatchCreateRequest.key: "" -etcdserverpb.WatchCreateRequest.prev_kv: "3.1" -etcdserverpb.WatchCreateRequest.progress_notify: "" -etcdserverpb.WatchCreateRequest.range_end: "" -etcdserverpb.WatchCreateRequest.start_revision: "" -etcdserverpb.WatchCreateRequest.watch_id: "3.4" -etcdserverpb.WatchProgressRequest: "3.4" -etcdserverpb.WatchRequest: "3.0" -etcdserverpb.WatchRequest.cancel_request: "" -etcdserverpb.WatchRequest.create_request: "" -etcdserverpb.WatchRequest.progress_request: "3.4" -etcdserverpb.WatchResponse: "3.0" -etcdserverpb.WatchResponse.cancel_reason: "3.4" -etcdserverpb.WatchResponse.canceled: "" -etcdserverpb.WatchResponse.compact_revision: "" -etcdserverpb.WatchResponse.created: "" -etcdserverpb.WatchResponse.events: "" -etcdserverpb.WatchResponse.fragment: "3.4" -etcdserverpb.WatchResponse.header: "" -etcdserverpb.WatchResponse.watch_id: "" -membershippb.Attributes: "3.5" -membershippb.Attributes.client_urls: "" -membershippb.Attributes.name: "" -membershippb.ClusterMemberAttrSetRequest: "3.5" -membershippb.ClusterMemberAttrSetRequest.member_ID: "" -membershippb.ClusterMemberAttrSetRequest.member_attributes: "" -membershippb.ClusterVersionSetRequest: "3.5" -membershippb.ClusterVersionSetRequest.ver: "" -membershippb.DowngradeInfoSetRequest: "3.5" -membershippb.DowngradeInfoSetRequest.enabled: "" -membershippb.DowngradeInfoSetRequest.ver: "" -membershippb.Member: "3.5" -membershippb.Member.ID: "" -membershippb.Member.member_attributes: "" -membershippb.Member.raft_attributes: "" -membershippb.RaftAttributes: "3.5" -membershippb.RaftAttributes.is_learner: "" -membershippb.RaftAttributes.peer_urls: "" -mvccpb.Event: "" -mvccpb.Event.DELETE: "" -mvccpb.Event.EventType: "" -mvccpb.Event.PUT: "" -mvccpb.Event.kv: "" -mvccpb.Event.prev_kv: "" -mvccpb.Event.type: "" -mvccpb.KeyValue: "" -mvccpb.KeyValue.create_revision: "" -mvccpb.KeyValue.key: "" -mvccpb.KeyValue.lease: "" -mvccpb.KeyValue.mod_revision: "" -mvccpb.KeyValue.value: "" -mvccpb.KeyValue.version: "" -walpb.Record: "" -walpb.Record.crc: "" -walpb.Record.data: "" -walpb.Record.type: "" -walpb.Snapshot: "" -walpb.Snapshot.conf_state: "" -walpb.Snapshot.index: "" -walpb.Snapshot.term: "" diff --git a/scripts/fix.sh b/scripts/fix.sh deleted file mode 100755 index d0fc89262fa..00000000000 --- a/scripts/fix.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# Top level problems with modules can lead to test_lib being not functional -go mod tidy - -source ./scripts/test_lib.sh -source ./scripts/updatebom.sh - -ROOTDIR=$(pwd) - -# To fix according to newer version of go: -# go get golang.org/dl/gotip -# gotip download -# GO_CMD="gotip" -GO_CMD="go" - -function mod_tidy_fix { - run rm ./go.sum - run ${GO_CMD} mod tidy || return 2 -} - -function bash_ws_fix { - TAB=$'\t' - - log_callout "Fixing whitespaces in the bash scripts" - # Makes sure all bash scripts do use ' ' (double space) for indention. - log_cmd "find ./ -name '*.sh' -print0 | xargs -0 sed -i.bak 's|${TAB}| |g'" - find ./ -name '*.sh' -print0 | xargs -0 sed -i.bak "s|${TAB}| |g" - find ./ -name '*.sh.bak' -print0 | xargs -0 rm -} - -function go_imports_fix { - GOFILES=$(run ${GO_CMD} list --f "{{with \$d:=.}}{{range .GoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...) - TESTGOFILES=$(run ${GO_CMD} list --f "{{with \$d:=.}}{{range .TestGoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...) - cd "${ROOTDIR}/tools/mod" - echo "${GOFILES}" "${TESTGOFILES}" | grep -v '.gw.go' | grep -v '.pb.go' | xargs -n 100 go run golang.org/x/tools/cmd/goimports -w -local go.etcd.io -} - -log_callout -e "\\nFixing etcd code for you...\n" - -run_for_modules mod_tidy_fix || exit 2 -run_for_modules run ${GO_CMD} fmt || exit 2 -run_for_module tests bom_fix || exit 2 -log_callout "Fixing goimports..." -run_for_modules go_imports_fix || exit 2 -bash_ws_fix || exit 2 - - -log_success -e "\\nSUCCESS: etcd code is fixed :)" diff --git a/scripts/fuzzing.sh b/scripts/fuzzing.sh deleted file mode 100755 index 5e2a9b42f19..00000000000 --- a/scripts/fuzzing.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -e -source ./scripts/test_lib.sh - -GO_CMD="go" -fuzz_time=${FUZZ_TIME:-"300s"} -target_path=${TARGET_PATH:-"./server/etcdserver/api/v3rpc"} -TARGETS="FuzzTxnRangeRequest FuzzTxnPutRequest FuzzTxnDeleteRangeRequest" - - -for target in ${TARGETS}; do - log_callout -e "\\nExecuting fuzzing with target ${target} in $target_path with a timeout of $fuzz_time\\n" - run pushd "${target_path}" - $GO_CMD test -fuzz "${target}" -fuzztime "${fuzz_time}" - run popd - log_success -e "\\COMPLETED: fuzzing with target $target in $target_path \\n" -done - diff --git a/scripts/genproto.sh b/scripts/genproto.sh index 834e1313f41..77ba4a68d0e 100755 --- a/scripts/genproto.sh +++ b/scripts/genproto.sh @@ -4,7 +4,6 @@ # Run from repository root directory named etcd. # set -e -shopt -s globstar if ! [[ "$0" =~ scripts/genproto.sh ]]; then echo "must be run from repository root" @@ -23,7 +22,6 @@ GRPC_GATEWAY_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/protoc-ge SWAGGER_BIN=$(tool_get_bin github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger) GOGOPROTO_ROOT="$(tool_pkg_dir github.com/gogo/protobuf/proto)/.." GRPC_GATEWAY_ROOT="$(tool_pkg_dir github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway)/.." -RAFT_ROOT="$(tool_pkg_dir go.etcd.io/raft/v3/raftpb)/.." echo echo "Resolved binary and packages versions:" @@ -32,26 +30,24 @@ echo " - protoc-gen-grpc-gateway: ${GRPC_GATEWAY_BIN}" echo " - swagger: ${SWAGGER_BIN}" echo " - gogoproto-root: ${GOGOPROTO_ROOT}" echo " - grpc-gateway-root: ${GRPC_GATEWAY_ROOT}" -echo " - raft-root: ${RAFT_ROOT}" GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf" # directories containing protos to be built -DIRS="./server/storage/wal/walpb ./api/etcdserverpb ./server/etcdserver/api/snap/snappb ./api/mvccpb ./server/lease/leasepb ./api/authpb ./server/etcdserver/api/v3lock/v3lockpb ./server/etcdserver/api/v3election/v3electionpb ./api/membershippb ./tests/functional ./api/versionpb" +DIRS="./server/wal/walpb ./api/etcdserverpb ./server/etcdserver/api/snap/snappb ./raft/raftpb ./api/mvccpb ./server/lease/leasepb ./api/authpb ./server/etcdserver/api/v3lock/v3lockpb ./server/etcdserver/api/v3election/v3electionpb ./api/membershippb" log_callout -e "\\nRunning gofast (gogo) proto generation..." for dir in ${DIRS}; do run pushd "${dir}" - run protoc --gofast_out=plugins=grpc:. -I=".:${GOGOPROTO_PATH}:${ETCD_ROOT_DIR}/..:${RAFT_ROOT}:${ETCD_ROOT_DIR}:${GRPC_GATEWAY_ROOT}/third_party/googleapis" \ - --plugin="${GOFAST_BIN}" ./**/*.proto + run protoc --gofast_out=plugins=grpc:. -I=".:${GOGOPROTO_PATH}:${ETCD_ROOT_DIR}/..:${ETCD_ROOT_DIR}:${GRPC_GATEWAY_ROOT}/third_party/googleapis" \ + --plugin="${GOFAST_BIN}" ./*.proto - run sed -i.bak -E 's|"etcd/api/|"go.etcd.io/etcd/api/v3/|g' ./**/*.pb.go - run sed -i.bak -E 's|"raftpb"|"go.etcd.io/raft/v3/raftpb"|g' ./**/*.pb.go - run sed -i.bak -E 's|"google/protobuf"|"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"|g' ./**/*.pb.go + run sed -i.bak -E 's|"etcd/api/|"github.com/ls-2018/etcd_cn/offical/api/v3/|g' ./*.pb.go + run sed -i.bak -E 's|"raft/raftpb"|"github.com/ls-2018/etcd_cn/raft/raftpb"|g' ./*.pb.go - rm -f ./**/*.bak - run gofmt -s -w ./**/*.pb.go - run_go_tool "golang.org/x/tools/cmd/goimports" -w ./**/*.pb.go + rm -f ./*.bak + run gofmt -s -w ./*.pb.go + run goimports -w ./*.pb.go run popd done @@ -59,13 +55,12 @@ log_callout -e "\\nRunning swagger & grpc_gateway proto generation..." # remove old swagger files so it's obvious whether the files fail to generate rm -rf Documentation/dev-guide/apispec/swagger/*json -for pb in api/etcdserverpb/rpc server/etcdserver/api/v3lock/v3lockpb/v3lock server/etcdserver/api/v3election/v3electionpb/v3election; do +for pb in api/etcdserverpb/rpc etcd/etcdserver/api/v3lock/v3lockpb/v3lock etcd/etcdserver/api/v3election/v3electionpb/v3election; do log_callout "grpc & swagger for: ${pb}.proto" run protoc -I. \ -I"${GRPC_GATEWAY_ROOT}"/third_party/googleapis \ -I"${GOGOPROTO_PATH}" \ -I"${ETCD_ROOT_DIR}/.." \ - -I"${RAFT_ROOT}" \ --grpc-gateway_out=logtostderr=true,paths=source_relative:. \ --swagger_out=logtostderr=true:./Documentation/dev-guide/apispec/swagger/. \ --plugin="${SWAGGER_BIN}" --plugin="${GRPC_GATEWAY_BIN}" \ @@ -76,13 +71,13 @@ for pb in api/etcdserverpb/rpc server/etcdserver/api/v3lock/v3lockpb/v3lock serv gwfile="${pb}.pb.gw.go" run sed -i -E "s#package $pkg#package gw#g" "${gwfile}" - run sed -i -E "s#import \\(#import \\(\"go.etcd.io/etcd/${pkgpath}\"#g" "${gwfile}" + run sed -i -E "s#import \\(#import \\(\"github.com/ls-2018/etcd_cn/${pkgpath}\"#g" "${gwfile}" run sed -i -E "s#([ (])([a-zA-Z0-9_]*(Client|Server|Request)([^(]|$))#\\1${pkg}.\\2#g" "${gwfile}" run sed -i -E "s# (New[a-zA-Z0-9_]*Client\\()# ${pkg}.\\1#g" "${gwfile}" - run sed -i -E "s|go.etcd.io/etcd|go.etcd.io/etcd/v3|g" "${gwfile}" - run sed -i -E "s|go.etcd.io/etcd/v3/api|go.etcd.io/etcd/api/v3|g" "${gwfile}" - run sed -i -E "s|go.etcd.io/etcd/v3/server|go.etcd.io/etcd/server/v3|g" "${gwfile}" - + run sed -i -E "s|go.etcd.io/etcd|github.com/ls-2018|g" "${gwfile}" + run sed -i -E "s|github.com/ls-2018/etcd_cn/api|github.com/ls-2018/etcd_cn/offical/api/v3|g" "${gwfile}" + run sed -i -E "s|github.com/ls-2018/etcd_cn/server|github.com/ls-2018/etcd_cn/server/v3|g" "${gwfile}" + run go fmt "${gwfile}" gwdir="${pkgpath}/gw/" @@ -97,46 +92,23 @@ done log_callout -e "\\nRunning swagger ..." run_go_tool github.com/hexfusion/schwag -input=Documentation/dev-guide/apispec/swagger/rpc.swagger.json - if [ "$1" != "--skip-protodoc" ]; then log_callout "protodoc is auto-generating grpc API reference documentation..." - # API reference - API_REFERENCE_FILE="Documentation/dev-guide/api_reference_v3.md" - run rm -rf ${API_REFERENCE_FILE} + run rm -rf Documentation/dev-guide/api_reference_v3.md run_go_tool go.etcd.io/protodoc --directories="api/etcdserverpb=service_message,api/mvccpb=service_message,server/lease/leasepb=service_message,api/authpb=service_message" \ - --output="${API_REFERENCE_FILE}" \ + --title="etcd API Reference" \ + --output="Documentation/dev-guide/api_reference_v3.md" \ --message-only-from-this-file="api/etcdserverpb/rpc.proto" \ - --disclaimer="--- -title: API reference ---- - -This API reference is autogenerated from the named \`.proto\` files." || exit 2 + --disclaimer="This is a generated documentation. Please read the proto files for more." || exit 2 - # remove the first 3 lines of the doc as an empty --title adds '### ' to the top of the file. - run sed -i -e 1,3d ${API_REFERENCE_FILE} - - # API reference: concurrency - API_REFERENCE_CONCURRENCY_FILE="Documentation/dev-guide/api_concurrency_reference_v3.md" - run rm -rf ${API_REFERENCE_CONCURRENCY_FILE} + run rm -rf Documentation/dev-guide/api_concurrency_reference_v3.md run_go_tool go.etcd.io/protodoc --directories="server/etcdserver/api/v3lock/v3lockpb=service_message,server/etcdserver/api/v3election/v3electionpb=service_message,api/mvccpb=service_message" \ - --output="${API_REFERENCE_CONCURRENCY_FILE}" \ - --disclaimer="--- -title: \"API reference: concurrency\" ---- - -This API reference is autogenerated from the named \`.proto\` files." || exit 2 - - # remove the first 3 lines of the doc as an empty --title adds '### ' to the top of the file. - run sed -i -e 1,3d ${API_REFERENCE_CONCURRENCY_FILE} + --title="etcd concurrency API Reference" \ + --output="Documentation/dev-guide/api_concurrency_reference_v3.md" \ + --disclaimer="This is a generated documentation. Please read the proto files for more." || exit 2 log_success "protodoc is finished." - log_warning -e "\\nThe API references have NOT been automatically published on the website." - log_success -e "\\nTo publish the API references, copy the following files" - log_success " - ${API_REFERENCE_FILE}" - log_success " - ${API_REFERENCE_CONCURRENCY_FILE}" - log_success "to the etcd-io/website repo under the /content/en/docs/next/dev-guide/ folder." - log_success "(https://github.com/etcd-io/website/tree/main/content/en/docs/next/dev-guide)" else log_warning "skipping grpc API reference document auto-generation..." fi diff --git a/scripts/install-marker.sh b/scripts/install-marker.sh deleted file mode 100755 index 3da67bb10e7..00000000000 --- a/scripts/install-marker.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -set -e - -ARCH=$1 - -if [ -z "$1" ]; then - echo "Usage: ${0} [amd64 or darwin], defaulting to 'amd64'" >> /dev/stderr - ARCH=amd64 -fi - -MARKER_URL="https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-unknown-linux-gnu" -if [ "${ARCH}" == "darwin" ]; then - MARKER_URL="https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-apple-darwin" -fi - -echo "Installing marker" -curl -L "${MARKER_URL}" -o "${GOPATH}"/bin/marker -chmod 755 "${GOPATH}"/bin/marker - -"${GOPATH}"/bin/marker --version diff --git a/scripts/measure-test-flakiness.sh b/scripts/measure-test-flakiness.sh deleted file mode 100755 index 84f9782ddad..00000000000 --- a/scripts/measure-test-flakiness.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -set -o pipefail - -if [[ -z ${GITHUB_TOKEN} ]] -then - echo "Please set the \$GITHUB_TOKEN environment variable for the script to work" - exit 1 -fi - -temp_dir=$(mktemp -d) - -trap '{ rm -rf -- "${temp_dir}"; }' EXIT - -json_file="${temp_dir}/commit-and-check-data.json" - -curl --fail --show-error --silent -H "Authorization: token ${GITHUB_TOKEN}" \ - -X POST \ - -d '{ - "query": "query { repository(owner: \"etcd-io\", name: \"etcd\") { defaultBranchRef { target { ... on Commit { history(first: 100) { edges { node { ... on Commit { commitUrl statusCheckRollup { state } } } } } } } } } }" - }' \ - https://api.github.com/graphql | jq . > "${json_file}" - -failure_percentage=$(jq '.data.repository.defaultBranchRef.target.history.edges | reduce .[] as $item (0; if $item.node.statusCheckRollup.state == "FAILURE" then (. + 1) else . end)' "${json_file}") - -echo "Commit status failure percentage is - ${failure_percentage} %" diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 7eddc29ab92..00000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -source ./scripts/test_lib.sh -source ./scripts/release_mod.sh - -DRY_RUN=${DRY_RUN:-true} - -# Following preparation steps help with the release process: - -# If you use password-protected gpg key, make sure the password is managed -# by agent: -# -# % gpg-connect-agent reloadagent /bye -# % gpg -s --default-key [git-email]@google.com -o /dev/null -s /dev/null -# -# Refresh your google credentials: -# % gcloud auth login -# or -# % gcloud auth activate-service-account --key-file=gcp-key-etcd-development.json -# -# Make sure gcloud-docker plugin is configured: -# % gcloud auth configure-docker - - -help() { - echo "$(basename "$0") [version]" - echo "Release etcd using the same approach as the etcd-release-runbook (https://goo.gl/Gxwysq)" - echo "" - echo "WARNING: This does not perform the 'Add API capabilities', 'Performance testing' " - echo " or 'Documentation' steps. These steps must be performed manually BEFORE running this tool." - echo "" - echo "WARNING: This script does not sign releases, publish releases to github or sent announcement" - echo " emails. These steps must be performed manually AFTER running this tool." - echo "" - echo " args:" - echo " version: version of etcd to release, e.g. 'v3.2.18'" - echo " flags:" - echo " --no-upload: skip gs://etcd binary artifact uploads." - echo " --no-docker-push: skip docker image pushes." - echo " --in-place: build binaries using current branch." - echo "" - echo "One can perform a (dry-run) test release from any (uncommitted) branch using:" - echo " DRY_RUN=true REPOSITORY=\`pwd\` BRANCH='local-branch-name' ./scripts/release 3.5.0-foobar.2" -} - -main() { - VERSION=$1 - if [[ ! "${VERSION}" =~ [0-9]+.[0-9]+.[0-9]+ ]]; then - log_error "Expected 'version' param of the form '..' but got '${VERSION}'" - exit 1 - fi - RELEASE_VERSION="v${VERSION}" - MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2) - - if [ "${IN_PLACE}" == 1 ]; then - # Trigger release in current branch - REPOSITORY=$(pwd) - BRANCH=$(git rev-parse --abbrev-ref HEAD) - else - REPOSITORY=${REPOSITORY:-"https://github.com/etcd-io/etcd.git"} - BRANCH=${BRANCH:-"release-${MINOR_VERSION}"} - fi - - log_warning "DRY_RUN=${DRY_RUN}" - log_callout "RELEASE_VERSION=${RELEASE_VERSION}" - log_callout "MINOR_VERSION=${MINOR_VERSION}" - log_callout "BRANCH=${BRANCH}" - log_callout "REPOSITORY=${REPOSITORY}" - log_callout "" - - # Required to enable 'docker manifest ...' - export DOCKER_CLI_EXPERIMENTAL=enabled - - if ! command -v docker >/dev/null; then - log_error "cannot find docker" - exit 1 - fi - - # Expected umask for etcd release artifacts - umask 022 - - # Set up release directory. - local reldir="/tmp/etcd-release-${VERSION}" - log_callout "Preparing temporary directory: ${reldir}" - if [ ! -d "${reldir}/etcd" ] && [ "${IN_PLACE}" == 0 ]; then - mkdir -p "${reldir}" - cd "${reldir}" - run git clone "${REPOSITORY}" --branch "${BRANCH}" - run cd "${reldir}/etcd" || exit 2 - run git checkout "${BRANCH}" || exit 2 - run git pull origin - - git_assert_branch_in_sync || exit 2 - fi - - # mark local directory as root for test_lib scripts executions - set_root_dir - - # If a release version tag already exists, use it. - local remote_tag_exists - remote_tag_exists=$(run git ls-remote origin "refs/tags/${RELEASE_VERSION}" | grep -c "${RELEASE_VERSION}" || true) - - if [ "${remote_tag_exists}" -gt 0 ]; then - log_callout "Release version tag exists on remote. Checking out refs/tags/${RELEASE_VERSION}" - git checkout -q "tags/${RELEASE_VERSION}" - fi - - # Check go version. - log_callout "Check go version" - local go_version current_go_version - go_version="go$(grep go-version .github/workflows/build.yaml | awk '{print $2}' | tr -d '"')" - current_go_version=$(go version | awk '{ print $3 }') - if [[ "${current_go_version}" != "${go_version}" ]]; then - log_error "Current go version is ${current_go_version}, but etcd ${RELEASE_VERSION} requires ${go_version} (see .github/workflows/build.yaml)." - exit 1 - fi - - # If the release tag does not already exist remotely, create it. - log_callout "Create tag if not present" - if [ "${remote_tag_exists}" -eq 0 ]; then - # Bump version/version.go to release version. - local source_version - source_version=$(grep -E "\s+Version\s*=" api/version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g") - if [[ "${source_version}" != "${VERSION}" ]]; then - source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2) - if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then - log_error "Wrong etcd minor version in api/version/version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting." - exit 1 - fi - log_callout "Updating modules definitions" - TARGET_VERSION="v${VERSION}" update_versions_cmd - - log_callout "Updating version from ${source_version} to ${VERSION} in api/version/version.go" - sed -i "s/${source_version}/${VERSION}/g" api/version/version.go - fi - - - log_callout "Building etcd and checking --version output" - run ./scripts/build.sh - local etcd_version - etcd_version=$(bin/etcd --version | grep "etcd Version" | awk '{ print $3 }') - if [[ "${etcd_version}" != "${VERSION}" ]]; then - log_error "Wrong etcd version in version/version.go. Expected ${etcd_version} but got ${VERSION}. Aborting." - exit 1 - fi - - if [[ -n $(git status -s) ]]; then - log_callout "Committing mods & api/version/version.go update." - run git add api/version/version.go - # shellcheck disable=SC2038,SC2046 - run git add $(find . -name go.mod ! -path './release/*'| xargs) - run git diff --staged | cat - run git commit -m "version: bump up to ${VERSION}" - run git diff --staged | cat - fi - - # Push the version change if it's not already been pushed. - if [ "${DRY_RUN}" != "true" ] && [ "$(git rev-list --count "origin/${BRANCH}..${BRANCH}")" -gt 0 ]; then - read -p "Push version bump up to ${VERSION} to '$(git remote get-url origin)' [y/N]? " -r confirm - [[ "${confirm,,}" == "y" ]] || exit 1 - maybe_run git push - fi - - # Tag release. - if [ "$(git tag --list | grep -c "${RELEASE_VERSION}")" -gt 0 ]; then - log_callout "Skipping tag step. git tag ${RELEASE_VERSION} already exists." - else - log_callout "Tagging release..." - REMOTE_REPO="origin" push_mod_tags_cmd - fi - - if [ "${IN_PLACE}" == 0 ]; then - # Tried with `local branch=$(git branch -a --contains tags/"${RELEASE_VERSION}")` - # so as to work with both current branch and main/release-3.X. - # But got error below on current branch mode, - # Error: Git tag v3.6.99 should be on branch '* (HEAD detached at pull/14860/merge)' but is on '* (HEAD detached from pull/14860/merge)' - # - # Verify the version tag is on the right branch - # shellcheck disable=SC2155 - local branch=$(git for-each-ref --contains "${RELEASE_VERSION}" --format="%(refname)" 'refs/heads' | cut -d '/' -f 3) - if [ "${branch}" != "${BRANCH}" ]; then - log_error "Error: Git tag ${RELEASE_VERSION} should be on branch '${BRANCH}' but is on '${branch}'" - exit 1 - fi - fi - fi - - log_callout "Verify the latest commit has the version tag" - # Verify the latest commit has the version tag - # shellcheck disable=SC2155 - local tag="$(git describe --exact-match HEAD)" - if [ "${tag}" != "${RELEASE_VERSION}" ]; then - log_error "Error: Expected HEAD to be tagged with ${RELEASE_VERSION}, but 'git describe --exact-match HEAD' reported: ${tag}" - exit 1 - fi - - log_callout "Verify the work space is clean" - # Verify the clean working tree - # shellcheck disable=SC2155 - local diff="$(git diff HEAD --stat)" - if [[ "${diff}" != '' ]]; then - log_error "Error: Expected clean working tree, but 'git diff --stat' reported: ${diff}" - exit 1 - fi - - # Build release. - # TODO: check the release directory for all required build artifacts. - if [ -d release ]; then - log_warning "Skipping release build step. /release directory already exists." - else - log_callout "Building release..." - REPOSITORY=$(pwd) ./scripts/build-release.sh "${RELEASE_VERSION}" - fi - - # Sanity checks. - "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep -q "etcd Version: ${VERSION}" || true - "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcdctl" version | grep -q "etcdctl version: ${VERSION}" || true - "./release/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcdutl" version | grep -q "etcdutl version: ${VERSION}" || true - - # Generate SHA256SUMS - log_callout "Generating sha256sums of release artifacts." - pushd ./release - # shellcheck disable=SC2010 - ls . | grep -E '\.tar.gz$|\.zip$' | xargs shasum -a 256 > ./SHA256SUMS - popd - if [ -s ./release/SHA256SUMS ]; then - cat ./release/SHA256SUMS - else - log_error "sha256sums is not valid. Aborting." - exit 1 - fi - - # Upload artifacts. - if [ "${DRY_RUN}" == "true" ] || [ "${NO_UPLOAD}" == 1 ]; then - log_callout "Skipping artifact upload to gs://etcd. --no-upload flat is set." - else - read -p "Upload etcd ${RELEASE_VERSION} release artifacts to gs://etcd [y/N]? " -r confirm - [[ "${confirm,,}" == "y" ]] || exit 1 - maybe_run gsutil -m cp ./release/SHA256SUMS "gs://etcd/${RELEASE_VERSION}/" - maybe_run gsutil -m cp ./release/*.zip "gs://etcd/${RELEASE_VERSION}/" - maybe_run gsutil -m cp ./release/*.tar.gz "gs://etcd/${RELEASE_VERSION}/" - maybe_run gsutil -m acl ch -u allUsers:R -r "gs://etcd/${RELEASE_VERSION}/" - fi - - # Push images. - if [ "${DRY_RUN}" == "true" ] || [ "${NO_DOCKER_PUSH}" == 1 ]; then - log_callout "Skipping docker push. --no-docker-push flat is set." - else - read -p "Publish etcd ${RELEASE_VERSION} docker images to quay.io [y/N]? " -r confirm - [[ "${confirm,,}" == "y" ]] || exit 1 - # shellcheck disable=SC2034 - for i in {1..5}; do - docker login quay.io && break - log_warning "login failed, retrying" - done - - for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do - log_callout "Pushing container images to quay.io ${RELEASE_VERSION}-${TARGET_ARCH}" - maybe_run docker push "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" - log_callout "Pushing container images to gcr.io ${RELEASE_VERSION}-${TARGET_ARCH}" - maybe_run docker push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" - done - - log_callout "Creating manifest-list (multi-image)..." - - for TARGET_ARCH in "amd64" "arm64" "ppc64le" "s390x"; do - maybe_run docker manifest create --amend "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" - maybe_run docker manifest annotate "quay.io/coreos/etcd:${RELEASE_VERSION}" "quay.io/coreos/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}" - - maybe_run docker manifest create --amend "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" - maybe_run docker manifest annotate "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}-${TARGET_ARCH}" --arch "${TARGET_ARCH}" - done - - log_callout "Pushing container manifest list to quay.io ${RELEASE_VERSION}" - maybe_run docker manifest push "quay.io/coreos/etcd:${RELEASE_VERSION}" - - log_callout "Pushing container manifest list to gcr.io ${RELEASE_VERSION}" - maybe_run docker manifest push "gcr.io/etcd-development/etcd:${RELEASE_VERSION}" - - log_callout "Setting permissions using gsutil..." - maybe_run gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com - fi - - ### Release validation - mkdir -p downloads - - # Check image versions - for IMAGE in "quay.io/coreos/etcd:${RELEASE_VERSION}" "gcr.io/etcd-development/etcd:${RELEASE_VERSION}"; do - if [ "${DRY_RUN}" == "true" ] || [ "${NO_DOCKER_PUSH}" == 1 ]; then - IMAGE="${IMAGE}-amd64" - fi - # shellcheck disable=SC2155 - local image_version=$(docker run --rm "${IMAGE}" etcd --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]') - if [ "${image_version}" != "${VERSION}" ]; then - log_error "Check failed: etcd --version output for ${IMAGE} is incorrect: ${image_version}" - exit 1 - fi - done - - # Check gsutil binary versions - # shellcheck disable=SC2155 - local BINARY_TGZ="etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64.tar.gz" - if [ "${DRY_RUN}" == "true" ] || [ "${NO_UPLOAD}" == 1 ]; then - cp "./release/${BINARY_TGZ}" downloads - else - gsutil cp "gs://etcd/${RELEASE_VERSION}/${BINARY_TGZ}" downloads - fi - tar -zx -C downloads -f "downloads/${BINARY_TGZ}" - # shellcheck disable=SC2155 - local binary_version=$("./downloads/etcd-${RELEASE_VERSION}-$(go env GOOS)-amd64/etcd" --version | grep "etcd Version" | awk -F: '{print $2}' | tr -d '[:space:]') - if [ "${binary_version}" != "${VERSION}" ]; then - log_error "Check failed: etcd --version output for ${BINARY_TGZ} from gs://etcd/${RELEASE_VERSION} is incorrect: ${binary_version}" - exit 1 - fi - - # TODO: signing process - log_warning "" - log_warning "WARNING: The release has not been signed and published to github. This must be done manually." - log_warning "" - log_success "Success." - exit 0 -} - -POSITIONAL=() -NO_UPLOAD=0 -NO_DOCKER_PUSH=0 -IN_PLACE=0 - -while test $# -gt 0; do - case "$1" in - -h|--help) - shift - help - exit 0 - ;; - --in-place) - IN_PLACE=1 - shift - ;; - --no-upload) - NO_UPLOAD=1 - shift - ;; - --no-docker-push) - NO_DOCKER_PUSH=1 - shift - ;; - *) - POSITIONAL+=("$1") # save it in an array for later - shift # past argument - ;; - esac -done -set -- "${POSITIONAL[@]}" # restore positional parameters - -if [[ ! $# -eq 1 ]]; then - help - exit 1 -fi - -# Note that we shouldn't upload artifacts in --in-place mode, so it -# must be called with DRY_RUN=true -if [ "${DRY_RUN}" != "true" ] && [ "${IN_PLACE}" == 1 ]; then - log_error "--in-place should only be called with DRY_RUN=true" - exit 1 -fi - -main "$1" diff --git a/scripts/release_mod.sh b/scripts/release_mod.sh deleted file mode 100755 index b0e9d253efc..00000000000 --- a/scripts/release_mod.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env bash - -# Examples: - -# Edit go.mod files such that all etcd modules are pointing on given version: -# -# % DRY_RUN=false TARGET_VERSION="v3.5.13" ./scripts/release_mod.sh update_versions - -# Tag latest commit with current version number for all the modules and push upstream: -# -# % DRY_RUN=false REMOTE_REPO="origin" ./scripts/release_mod.sh push_mod_tags - -set -e - -source ./scripts/test_lib.sh - -DRY_RUN=${DRY_RUN:-true} - -# _cmd prints help message -function _cmd() { - log_error "Command required: ${0} [cmd]" - log_info "Available commands:" - log_info " - update_versions - Updates all cross-module versions to \${TARGET_VERSION} in the local client." - log_info " - push_mod_tags - Tags HEAD with all modules versions tags and pushes it to \${REMOTE_REPO}." -} - -# update_module_version [v2version] [v3version] -# Updates versions of cross-references in all internal references in current module. -function update_module_version() { - local v3version="${1}" - local v2version="${2}" - local modules - run go mod tidy - modules=$(run go list -f '{{if not .Main}}{{if not .Indirect}}{{.Path}}{{end}}{{end}}' -m all) - - v3deps=$(echo "${modules}" | grep -E "${ROOT_MODULE}/.*/v3") - for dep in ${v3deps}; do - run go mod edit -require "${dep}@${v3version}" - done - - v2deps=$(echo "${modules}" | grep -E "${ROOT_MODULE}/.*/v2") - for dep in ${v2deps}; do - run go mod edit -require "${dep}@${v2version}" - done - - run go mod tidy -} - -function mod_tidy_fix { - run rm ./go.sum - run go mod tidy || return 2 -} - -# Updates all cross-module versions to ${TARGET_VERSION} in local client. -function update_versions_cmd() { - assert_no_git_modifications || return 2 - - if [ -z "${TARGET_VERSION}" ]; then - log_error "TARGET_VERSION environment variable not set. Set it to e.g. v3.5.10-alpha.0" - return 2 - fi - - local v3version="${TARGET_VERSION}" - local v2version - # converts e.g. v3.5.0-alpha.0 --> v2.305.0-alpha.0 - # shellcheck disable=SC2001 - v2version="$(echo "${TARGET_VERSION}" | sed 's|^v3.\([0-9]*\).|v2.30\1.|g')" - - log_info "DRY_RUN : ${DRY_RUN}" - log_info "TARGET_VERSION: ${TARGET_VERSION}" - log_info "" - log_info "v3version: ${v3version}" - log_info "v2version: ${v2version}" - - run_for_modules update_module_version "${v3version}" "${v2version}" - run_for_modules mod_tidy_fix || exit 2 -} - -function get_gpg_key { - gitemail=$(git config --get user.email) - keyid=$(run gpg --list-keys --with-colons "${gitemail}" | awk -F: '/^pub:/ { print $5 }') - if [[ -z "${keyid}" ]]; then - log_error "Failed to load gpg key. Is gpg set up correctly for etcd releases?" - return 2 - fi - echo "$keyid" -} - -function push_mod_tags_cmd { - assert_no_git_modifications || return 2 - - if [ -z "${REMOTE_REPO}" ]; then - log_error "REMOTE_REPO environment variable not set" - return 2 - fi - log_info "REMOTE_REPO: ${REMOTE_REPO}" - - # Any module ccan be used for this - local main_version - main_version=$(go list -f '{{.Version}}' -m "${ROOT_MODULE}/api/v3") - local tags=() - - keyid=$(get_gpg_key) || return 2 - - for module in $(modules); do - local version - version=$(go list -f '{{.Version}}' -m "${module}") - local path - path=$(go list -f '{{.Path}}' -m "${module}") - local subdir="${path//${ROOT_MODULE}\//}" - local tag - if [ -z "${version}" ]; then - tag="${main_version}" - version="${main_version}" - else - tag="${subdir///v[23]/}/${version}" - fi - - log_info "Tags for: ${module} version:${version} tag:${tag}" - # The sleep is ugly hack that guarantees that 'git describe' will - # consider main-module's tag as the latest. - run sleep 2 - run git tag --local-user "${keyid}" --sign "${tag}" --message "${version}" - tags=("${tags[@]}" "${tag}") - done - maybe_run git push -f "${REMOTE_REPO}" "${tags[@]}" -} - -# only release_mod when called directly, not sourced -if echo "$0" | grep -E "release_mod.sh$" >/dev/null; then - "${1}_cmd" - - if "${DRY_RUN}"; then - log_info - log_warning "WARNING: It was a DRY_RUN. No files were modified." - fi -fi diff --git a/scripts/test.sh b/scripts/test.sh deleted file mode 100755 index 9042cce2f54..00000000000 --- a/scripts/test.sh +++ /dev/null @@ -1,705 +0,0 @@ -#!/usr/bin/env bash -# -# Run all etcd tests -# ./scripts/test.sh -# ./scripts/test.sh -v -# -# -# Run specified test pass -# -# $ PASSES=unit ./scripts/test.sh -# $ PASSES=integration ./scripts/test.sh -# -# -# Run tests for one package -# Each pass has different default timeout, if you just run tests in one package or 1 test case then you can set TIMEOUT -# flag for different expectation -# -# $ PASSES=unit PKG=./wal TIMEOUT=1m ./scripts/test.sh -# $ PASSES=integration PKG=./clientv3 TIMEOUT=1m ./scripts/test.sh -# -# Run specified unit tests in one package -# To run all the tests with prefix of "TestNew", set "TESTCASE=TestNew "; -# to run only "TestNew", set "TESTCASE="\bTestNew\b"" -# -# $ PASSES=unit PKG=./wal TESTCASE=TestNew TIMEOUT=1m ./scripts/test.sh -# $ PASSES=unit PKG=./wal TESTCASE="\bTestNew\b" TIMEOUT=1m ./scripts/test.sh -# $ PASSES=integration PKG=./client/integration TESTCASE="\bTestV2NoRetryEOF\b" TIMEOUT=1m ./scripts/test.sh -# -# -# Run code coverage -# COVERDIR must either be a absolute path or a relative path to the etcd root -# $ COVERDIR=coverage PASSES="build build_cov cov" ./scripts/test.sh -# $ go tool cover -html ./coverage/cover.out -set -e - -# Consider command as failed when any component of the pipe fails: -# https://stackoverflow.com/questions/1221833/pipe-output-and-capture-exit-status-in-bash -set -o pipefail - -# The test script is not supposed to make any changes to the files -# e.g. add/update missing dependencies. Such divergences should be -# detected and trigger a failure that needs explicit developer's action. -export GOFLAGS=-mod=readonly -export ETCD_VERIFY=all - -source ./scripts/test_lib.sh -source ./scripts/build_lib.sh - -if [ -n "${OUTPUT_FILE}" ]; then - log_callout "Dumping output to: ${OUTPUT_FILE}" - exec > >(tee -a "${OUTPUT_FILE}") 2>&1 -fi - -PASSES=${PASSES:-"gofmt bom dep build unit"} -PKG=${PKG:-} -SHELLCHECK_VERSION=${SHELLCHECK_VERSION:-"v0.8.0"} - -if [ -z "$GOARCH" ]; then - GOARCH=$(go env GOARCH); -fi - -# determine whether target supports race detection -if [ -z "${RACE}" ] ; then - if [ "$GOARCH" == "amd64" ]; then - RACE="--race" - else - RACE="--race=false" - fi -else - RACE="--race=${RACE:-true}" -fi - -# This options make sense for cases where SUT (System Under Test) is compiled by test. -COMMON_TEST_FLAGS=("${RACE}") -if [[ -n "${CPU}" ]]; then - COMMON_TEST_FLAGS+=("--cpu=${CPU}") -fi - -log_callout "Running with ${COMMON_TEST_FLAGS[*]}" - -RUN_ARG=() -if [ -n "${TESTCASE}" ]; then - RUN_ARG=("-run=${TESTCASE}") -fi - -function build_pass { - log_callout "Building etcd" - run_for_modules run go build "${@}" || return 2 - GO_BUILD_FLAGS="-v" etcd_build "${@}" - GO_BUILD_FLAGS="-v" tools_build "${@}" -} - -################# REGULAR TESTS ################################################ - -# run_unit_tests [pkgs] runs unit tests for a current module and givesn set of [pkgs] -function run_unit_tests { - local pkgs="${1:-./...}" - shift 1 - # shellcheck disable=SC2086 - GOLANG_TEST_SHORT=true go_test "${pkgs}" "parallel" : -short -timeout="${TIMEOUT:-3m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" "$@" -} - -function unit_pass { - run_for_modules run_unit_tests "$@" -} - -function integration_extra { - if [ -z "${PKG}" ] ; then - run_for_module "tests" go_test "./integration/v2store/..." "keep_going" : -timeout="${TIMEOUT:-5m}" "${RUN_ARG[@]}" "${COMMON_TEST_FLAGS[@]}" "$@" || return $? - else - log_warning "integration_extra ignored when PKG is specified" - fi -} - -function integration_pass { - run_for_module "tests" go_test "./integration/..." "parallel" : -timeout="${TIMEOUT:-15m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" -p=2 "$@" || return $? - run_for_module "tests" go_test "./common/..." "parallel" : --tags=integration -timeout="${TIMEOUT:-15m}" "${COMMON_TEST_FLAGS[@]}" -p=2 "${RUN_ARG[@]}" "$@" || return $? - integration_extra "$@" -} - -function e2e_pass { - # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact. - run_for_module "tests" go_test "./e2e/..." "keep_going" : -timeout="${TIMEOUT:-30m}" "${RUN_ARG[@]}" "$@" - run_for_module "tests" go_test "./common/..." "keep_going" : --tags=e2e -timeout="${TIMEOUT:-30m}" "${RUN_ARG[@]}" "$@" -} - -function linearizability_pass { - # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact. - run_for_module "tests" go_test "./linearizability/..." "keep_going" : -timeout="${TIMEOUT:-30m}" "${RUN_ARG[@]}" "$@" -} - -function integration_e2e_pass { - run_pass "integration" "${@}" - run_pass "e2e" "${@}" -} - -# generic_checker [cmd...] -# executes given command in the current module, and clearly fails if it -# failed or returned output. -function generic_checker { - local cmd=("$@") - if ! output=$("${cmd[@]}"); then - echo "${output}" - log_error -e "FAIL: '${cmd[*]}' checking failed (!=0 return code)" - return 255 - fi - if [ -n "${output}" ]; then - echo "${output}" - log_error -e "FAIL: '${cmd[*]}' checking failed (printed output)" - return 255 - fi -} - -function killall_functional_test { - log_callout "Killing all etcd-agent and etcd processes..." - killall -9 etcd-agent - # When functional test is successful, the etcd processes have already been - # stopped by the agent, so we should ignore the error in this case. - killall -9 etcd || true -} - -function functional_pass { - run ./tests/functional/build.sh || exit 1 - - # Clean up any data and logs from previous runs - rm -rf /tmp/etcd-functional-* /tmp/etcd-functional-*.backup - - # TODO: These ports should be dynamically allocated instead of hard-coded. - for a in 1 2 3; do - ./bin/etcd-agent --network tcp --address 127.0.0.1:${a}9027 < /dev/null & - done - - for a in 1 2 3; do - log_callout "Waiting for 'etcd-agent' on ${a}9027..." - while ! nc -z localhost ${a}9027; do - sleep 1 - done - done - - trap killall_functional_test 0 - - log_callout "functional test START!" - run ./bin/etcd-tester --config ./tests/functional/functional.yaml -test.v && log_success "'etcd-tester' succeeded" - local etcd_tester_exit_code=$? - - if [[ "${etcd_tester_exit_code}" -ne "0" ]]; then - log_error "ETCD_TESTER_EXIT_CODE:" ${etcd_tester_exit_code} - - log_error -e "\\nFAILED! 'tail -100 /tmp/etcd-functional-1/etcd.log'" - tail -100 /tmp/etcd-functional-1/etcd.log - - log_error -e "\\nFAILED! 'tail -100 /tmp/etcd-functional-2/etcd.log'" - tail -100 /tmp/etcd-functional-2/etcd.log - - log_error -e "\\nFAILED! 'tail -100 /tmp/etcd-functional-3/etcd.log'" - tail -100 /tmp/etcd-functional-3/etcd.log - - log_error "--- FAIL: exit code" ${etcd_tester_exit_code} - exit ${etcd_tester_exit_code} - fi - - log_success "functional test PASS!" -} - -function grpcproxy_pass { - run_pass "grpcproxy_integration" "${@}" - run_pass "grpcproxy_e2e" "${@}" -} - -function grpcproxy_integration_pass { - run_for_module "tests" go_test "./integration/..." "fail_fast" : \ - -timeout=30m -tags cluster_proxy "${COMMON_TEST_FLAGS[@]}" "$@" -} - -function grpcproxy_e2e_pass { - run_for_module "tests" go_test "./e2e" "fail_fast" : \ - -timeout=30m -tags cluster_proxy "${COMMON_TEST_FLAGS[@]}" "$@" -} - -################# COVERAGE ##################################################### - -# Builds artifacts used by tests/e2e in coverage mode. -function build_cov_pass { - run_for_module "server" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcd_test" - run_for_module "etcdctl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdctl_test" - run_for_module "etcdutl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdutl_test" -} - -# pkg_to_coverflag [prefix] [pkgs] -# produces name of .coverprofile file to be used for tests of this package -function pkg_to_coverprofileflag { - local prefix="${1}" - local pkgs="${2}" - local pkgs_normalized - prefix_normalized=$(echo "${prefix}" | tr "./ " "__+") - if [ "${pkgs}" == "./..." ]; then - pkgs_normalized="all" - else - pkgs_normalized=$(echo "${pkgs}" | tr "./ " "__+") - fi - mkdir -p "${coverdir}/${prefix_normalized}" - echo -n "-coverprofile=${coverdir}/${prefix_normalized}/${pkgs_normalized}.coverprofile" -} - -function not_test_packages { - for m in $(modules); do - if [[ $m =~ .*/etcd/tests/v3 ]]; then continue; fi - if [[ $m =~ .*/etcd/v3 ]]; then continue; fi - echo "${m}/..." - done -} - -# split_dir [dir] [num] -function split_dir { - local d="${1}" - local num="${2}" - local i=0 - for f in "${d}/"*; do - local g=$(( i % num )) - mkdir -p "${d}_${g}" - mv "${f}" "${d}_${g}/" - (( i++ )) - done -} - -function split_dir_pass { - split_dir ./covdir/integration 4 -} - - -# merge_cov_files [coverdir] [outfile] -# merges all coverprofile files into a single file in the given directory. -function merge_cov_files { - local coverdir="${1}" - local cover_out_file="${2}" - log_callout "Merging coverage results in: ${coverdir}" - # gocovmerge requires not-empty test to start with: - echo "mode: set" > "${cover_out_file}" - - local i=0 - local count - count=$(find "${coverdir}"/*.coverprofile | wc -l) - for f in "${coverdir}"/*.coverprofile; do - # print once per 20 files - if ! (( "${i}" % 20 )); then - log_callout "${i} of ${count}: Merging file: ${f}" - fi - run_go_tool "github.com/gyuho/gocovmerge" "${f}" "${cover_out_file}" > "${coverdir}/cover.tmp" 2>/dev/null - if [ -s "${coverdir}"/cover.tmp ]; then - mv "${coverdir}/cover.tmp" "${cover_out_file}" - fi - (( i++ )) - done -} - -# merge_cov [coverdir] -function merge_cov { - log_callout "[$(date)] Merging coverage files ..." - coverdir="${1}" - for d in "${coverdir}"/*/; do - d=${d%*/} # remove the trailing "/" - merge_cov_files "${d}" "${d}.coverprofile" & - done - wait - merge_cov_files "${coverdir}" "${coverdir}/all.coverprofile" -} - -function cov_pass { - # shellcheck disable=SC2153 - if [ -z "$COVERDIR" ]; then - log_error "COVERDIR undeclared" - return 255 - fi - - if [ ! -f "bin/etcd_test" ]; then - log_error "etcd_test binary not found. Call: PASSES='build_cov' ./scripts/test.sh" - return 255 - fi - - local coverdir - coverdir=$(readlink -f "${COVERDIR}") - mkdir -p "${coverdir}" - find "${coverdir}" -print0 -name '*.coverprofile' | xargs -0 rm - - local covpkgs - covpkgs=$(not_test_packages) - local coverpkg_comma - coverpkg_comma=$(echo "${covpkgs[@]}" | xargs | tr ' ' ',') - local gocov_build_flags=("-covermode=set" "-coverpkg=$coverpkg_comma") - - local failed="" - - log_callout "[$(date)] Collecting coverage from unit tests ..." - for m in $(module_dirs); do - GOLANG_TEST_SHORT=true run_for_module "${m}" go_test "./..." "parallel" "pkg_to_coverprofileflag unit_${m}" -short -timeout=30m \ - "${gocov_build_flags[@]}" "$@" || failed="$failed unit" - done - - log_callout "[$(date)] Collecting coverage from integration tests ..." - run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration" \ - -timeout=30m "${gocov_build_flags[@]}" "$@" || failed="$failed integration" - # integration-store-v2 - run_for_module "tests" go_test "./integration/v2store/..." "keep_going" "pkg_to_coverprofileflag store_v2" \ - -timeout=5m "${gocov_build_flags[@]}" "$@" || failed="$failed integration_v2" - # integration_cluster_proxy - run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration_cluster_proxy" \ - -tags cluster_proxy -timeout=30m "${gocov_build_flags[@]}" || failed="$failed integration_cluster_proxy" - - log_callout "[$(date)] Collecting coverage from e2e tests ..." - # We don't pass 'gocov_build_flags' nor 'pkg_to_coverprofileflag' here, - # as the coverage is collected from the ./bin/etcd_test & ./bin/etcdctl_test internally spawned. - mkdir -p "${coverdir}/e2e" - COVERDIR="${coverdir}/e2e" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags=cov -timeout 30m "$@" || failed="$failed tests_e2e" - split_dir "${coverdir}/e2e" 10 - - log_callout "[$(date)] Collecting coverage from e2e tests with proxy ..." - mkdir -p "${coverdir}/e2e_proxy" - COVERDIR="${coverdir}/e2e_proxy" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags="cov cluster_proxy" -timeout 30m "$@" || failed="$failed tests_e2e_proxy" - split_dir "${coverdir}/e2e_proxy" 10 - - local cover_out_file="${coverdir}/all.coverprofile" - merge_cov "${coverdir}" - - # strip out generated files (using GNU-style sed) - sed --in-place -E "/[.]pb[.](gw[.])?go/d" "${cover_out_file}" || true - - sed --in-place -E "s|go.etcd.io/etcd/api/v3/|api/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/v3/|client/v3/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/v2/|client/v2/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/pkg/v3|client/pkg/v3/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/etcdctl/v3/|etcdctl/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/etcdutl/v3/|etcdutl/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/pkg/v3/|pkg/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/server/v3/|server/|g" "${cover_out_file}" || true - - # held failures to generate the full coverage file, now fail - if [ -n "$failed" ]; then - for f in $failed; do - log_error "--- FAIL:" "$f" - done - log_warning "Despite failures, you can see partial report:" - log_warning " go tool cover -html ${cover_out_file}" - return 255 - fi - - log_success "done :) [see report: go tool cover -html ${cover_out_file}]" -} - -######### Code formatting checkers ############################################# - -function shellcheck_pass { - SHELLCHECK=shellcheck - if ! tool_exists "shellcheck" "https://github.com/koalaman/shellcheck#installing"; then - log_callout "Installing shellcheck $SHELLCHECK_VERSION" - wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar -xJv -C /tmp/ --strip-components=1 - mkdir -p ./bin - mv /tmp/shellcheck ./bin/ - SHELLCHECK=./bin/shellcheck - fi - generic_checker run ${SHELLCHECK} -fgcc scripts/*.sh -} - -function shellws_pass { - TAB=$'\t' - log_callout "Ensuring no tab-based indention in shell scripts" - local files - files=$(find ./ -name '*.sh' -print0 | xargs -0 ) - # shellcheck disable=SC2206 - files=( ${files[@]} "./scripts/build-binary.sh" "./scripts/build-docker.sh" "./scripts/release.sh" ) - log_cmd "grep -E -n $'^ *${TAB}' ${files[*]}" - # shellcheck disable=SC2086 - if grep -E -n $'^ *${TAB}' "${files[@]}" | sed $'s|${TAB}|[\\\\tab]|g'; then - log_error "FAIL: found tab-based indention in bash scripts. Use ' ' (double space)." - local files_with_tabs - files_with_tabs=$(grep -E -l $'^ *\\t' "${files[@]}") - log_warning "Try: sed -i 's|\\t| |g' $files_with_tabs" - return 1 - else - log_success "SUCCESS: no tabulators found." - return 0 - fi -} - -function markdown_you_find_eschew_you { - local find_you_cmd="find . -name \\*.md ! -path '*/vendor/*' ! -path './Documentation/*' ! -path './gopath.proto/*' ! -path './release/*' -exec grep -E --color '[Yy]ou[r]?[ '\\''.,;]' {} + || true" - run eval "${find_you_cmd}" -} - -function markdown_you_pass { - generic_checker markdown_you_find_eschew_you -} - -function markdown_marker_pass { - # TODO: check other markdown files when marker handles headers with '[]' - if tool_exists "marker" "https://crates.io/crates/marker"; then - generic_checker run marker --skip-http --root ./Documentation 2>&1 - fi -} - -function govet_pass { - run_for_modules generic_checker run go vet -} - -function govet_shadow_pass { - local shadow - shadow=$(tool_get_bin "golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow") - run_for_modules generic_checker run go vet -all -vettool="${shadow}" -} - -function unparam_pass { - run_for_modules generic_checker run_go_tool "mvdan.cc/unparam" -} - -function staticcheck_pass { - run_for_modules generic_checker run_go_tool "honnef.co/go/tools/cmd/staticcheck" -} - -function revive_pass { - run_for_modules generic_checker run_go_tool "github.com/mgechev/revive" -config "${ETCD_ROOT_DIR}/tests/revive.toml" -exclude "vendor/..." -exclude "out/..." -} - -function unconvert_pass { - run_for_modules generic_checker run_go_tool "github.com/mdempsky/unconvert" unconvert -v -} - -function ineffassign_per_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - run_go_tool github.com/gordonklaus/ineffassign "${gofiles[@]}" -} - -function ineffassign_pass { - run_for_modules generic_checker ineffassign_per_package -} - -function nakedret_pass { - run_for_modules generic_checker run_go_tool "github.com/alexkohler/nakedret" -} - -function license_header_per_module { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - run_go_tool "github.com/google/addlicense" --check "${gofiles[@]}" -} - -function license_header_pass { - run_for_modules generic_checker license_header_per_module -} - -function receiver_name_for_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - - recvs=$(grep 'func ([^*]' "${gofiles[@]}" | tr ':' ' ' | \ - awk ' { print $2" "$3" "$4" "$1 }' | sed "s/[a-zA-Z\\.]*go//g" | sort | uniq | \ - grep -Ev "(Descriptor|Proto|_)" | awk ' { print $3" "$4 } ' | sort | uniq -c | grep -v ' 1 ' | awk ' { print $2 } ') - if [ -n "${recvs}" ]; then - # shellcheck disable=SC2206 - recvs=($recvs) - for recv in "${recvs[@]}"; do - log_error "Mismatched receiver for $recv..." - grep "$recv" "${gofiles[@]}" | grep 'func (' - done - return 255 - fi -} - -function receiver_name_pass { - run_for_modules receiver_name_for_package -} - -# goword_for_package package -# checks spelling and comments in the 'package' in the current module -# -function goword_for_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - - local gowordRes - - # spellchecking can be enabled with GOBINARGS="--tags=spell" - # but it requires heavy dependencies installation, like: - # apt-get install libaspell-dev libhunspell-dev hunspell-en-us aspell-en - - # only check for broke exported godocs - if gowordRes=$(run_go_tool "github.com/chzchzchz/goword" -use-spell=false "${gofiles[@]}" | grep godoc-export | sort); then - log_error -e "goword checking failed:\\n${gowordRes}" - return 255 - fi - if [ -n "$gowordRes" ]; then - log_error -e "goword checking returned output:\\n${gowordRes}" - return 255 - fi -} - - -function goword_pass { - run_for_modules goword_for_package || return 255 -} - -function go_fmt_for_package { - # We utilize 'go fmt' to find all files suitable for formatting, - # but reuse full power gofmt to perform just RO check. - go fmt -n "$1" | sed 's| -w | -d |g' | sh -} - -function gofmt_pass { - run_for_modules generic_checker go_fmt_for_package -} - -function bom_pass { - log_callout "Checking bill of materials..." - # https://github.com/golang/go/commit/7c388cc89c76bc7167287fb488afcaf5a4aa12bf - # shellcheck disable=SC2207 - modules=($(modules_exp)) - - # Internally license-bill-of-materials tends to modify go.sum - run cp go.sum go.sum.tmp || return 2 - run cp go.mod go.mod.tmp || return 2 - - output=$(GOFLAGS=-mod=mod run_go_tool github.com/coreos/license-bill-of-materials \ - --override-file ./bill-of-materials.override.json \ - "${modules[@]}") - code="$?" - - run cp go.sum.tmp go.sum || return 2 - run cp go.mod.tmp go.mod || return 2 - - if [ "${code}" -ne 0 ] ; then - log_error -e "license-bill-of-materials (code: ${code}) failed with:\\n${output}" - return 255 - else - echo "${output}" > "bom-now.json.tmp" - fi - if ! diff ./bill-of-materials.json bom-now.json.tmp; then - log_error "modularized licenses do not match given bill of materials" - return 255 - fi - rm bom-now.json.tmp -} - -######## VARIOUS CHECKERS ###################################################### - -function dump_deps_of_module() { - local module - if ! module=$(run go list -m); then - return 255 - fi - run go list -f "{{if not .Indirect}}{{if .Version}}{{.Path}},{{.Version}},${module}{{end}}{{end}}" -m all -} - -# Checks whether dependencies are consistent across modules -function dep_pass { - local all_dependencies - all_dependencies=$(run_for_modules dump_deps_of_module | sort) || return 2 - - local duplicates - duplicates=$(echo "${all_dependencies}" | cut -d ',' -f 1,2 | sort | uniq | cut -d ',' -f 1 | sort | uniq -d) || return 2 - - for dup in ${duplicates}; do - log_error "FAIL: inconsistent versions for depencency: ${dup}" - echo "${all_dependencies}" | grep "${dup}" | sed "s|\\([^,]*\\),\\([^,]*\\),\\([^,]*\\)| - \\1@\\2 from: \\3|g" - done - if [[ -n "${duplicates}" ]]; then - log_error "FAIL: inconsistent dependencies" - return 2 - else - log_success "SUCCESS: dependencies are consistent across modules" - fi -} - -function release_pass { - rm -f ./bin/etcd-last-release - # to grab latest patch release; bump this up for every minor release - UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.5.*" | head -1 | cut -d- -f1) - if [ -n "$MANUAL_VER" ]; then - # in case, we need to test against different version - UPGRADE_VER=$MANUAL_VER - fi - if [[ -z ${UPGRADE_VER} ]]; then - UPGRADE_VER="v3.5.0" - log_warning "fallback to" ${UPGRADE_VER} - fi - - local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz" - log_callout "Downloading $file" - - set +e - curl --fail -L "https://github.com/etcd-io/etcd/releases/download/$UPGRADE_VER/$file" -o "/tmp/$file" - local result=$? - set -e - case $result in - 0) ;; - *) log_error "--- FAIL:" ${result} - return $result - ;; - esac - - tar xzvf "/tmp/$file" -C /tmp/ --strip-components=1 - mkdir -p ./bin - mv /tmp/etcd ./bin/etcd-last-release -} - -function mod_tidy_for_module { - # Watch for upstream solution: https://github.com/golang/go/issues/27005 - local tmpModDir - tmpModDir=$(mktemp -d -t 'tmpModDir.XXXXXX') - run cp "./go.mod" "${tmpModDir}" || return 2 - - # Guarantees keeping go.sum minimal - # If this is causing too much problems, we should - # stop controlling go.sum at all. - rm go.sum - run go mod tidy || return 2 - - set +e - local tmpFileGoModInSync - diff -C 5 "${tmpModDir}/go.mod" "./go.mod" - tmpFileGoModInSync="$?" - - # Bring back initial state - mv "${tmpModDir}/go.mod" "./go.mod" - - if [ "${tmpFileGoModInSync}" -ne 0 ]; then - log_error "${PWD}/go.mod is not in sync with 'go mod tidy'" - return 255 - fi -} - -function mod_tidy_pass { - run_for_modules mod_tidy_for_module -} - -function proto_annotations_pass { - "${ETCD_ROOT_DIR}/scripts/verify_proto_annotations.sh" -} - -function genproto_pass { - "${ETCD_ROOT_DIR}/scripts/verify_genproto.sh" -} - -########### MAIN ############################################################### - -function run_pass { - local pass="${1}" - shift 1 - log_callout -e "\\n'${pass}' started at $(date)" - if "${pass}_pass" "$@" ; then - log_success "'${pass}' completed at $(date)" - else - log_error "FAIL: '${pass}' failed at $(date)" - exit 255 - fi -} - -log_callout "Starting at: $(date)" -for pass in $PASSES; do - run_pass "${pass}" "${@}" -done - -log_success "SUCCESS" diff --git a/scripts/test_lib.sh b/scripts/test_lib.sh deleted file mode 100644 index 1199b411087..00000000000 --- a/scripts/test_lib.sh +++ /dev/null @@ -1,434 +0,0 @@ -#!/usr/bin/env bash - -ROOT_MODULE="go.etcd.io/etcd" - -if [[ "$(go list)" != "${ROOT_MODULE}/v3" ]]; then - echo "must be run from '${ROOT_MODULE}/v3' module directory" - exit 255 -fi - -function set_root_dir { - ETCD_ROOT_DIR=$(go list -f '{{.Dir}}' "${ROOT_MODULE}/v3") -} - -set_root_dir - -#### Convenient IO methods ##### - -COLOR_RED='\033[0;31m' -COLOR_ORANGE='\033[0;33m' -COLOR_GREEN='\033[0;32m' -COLOR_LIGHTCYAN='\033[0;36m' -COLOR_BLUE='\033[0;94m' -COLOR_MAGENTA='\033[95m' -COLOR_BOLD='\033[1m' -COLOR_NONE='\033[0m' # No Color - - -function log_error { - >&2 echo -n -e "${COLOR_BOLD}${COLOR_RED}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -function log_warning { - >&2 echo -n -e "${COLOR_ORANGE}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -function log_callout { - >&2 echo -n -e "${COLOR_LIGHTCYAN}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -function log_cmd { - >&2 echo -n -e "${COLOR_BLUE}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -function log_success { - >&2 echo -n -e "${COLOR_GREEN}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -function log_info { - >&2 echo -n -e "${COLOR_NONE}" - >&2 echo "$@" - >&2 echo -n -e "${COLOR_NONE}" -} - -# From http://stackoverflow.com/a/12498485 -function relativePath { - # both $1 and $2 are absolute paths beginning with / - # returns relative path to $2 from $1 - local source=$1 - local target=$2 - - local commonPart=$source - local result="" - - while [[ "${target#"$commonPart"}" == "${target}" ]]; do - # no match, means that candidate common part is not correct - # go up one level (reduce common part) - commonPart="$(dirname "$commonPart")" - # and record that we went back, with correct / handling - if [[ -z $result ]]; then - result=".." - else - result="../$result" - fi - done - - if [[ $commonPart == "/" ]]; then - # special case for root (no common path) - result="$result/" - fi - - # since we now have identified the common part, - # compute the non-common part - local forwardPart="${target#"$commonPart"}" - - # and now stick all parts together - if [[ -n $result ]] && [[ -n $forwardPart ]]; then - result="$result$forwardPart" - elif [[ -n $forwardPart ]]; then - # extra slash removal - result="${forwardPart:1}" - fi - - echo "$result" -} - -#### Discovery of files/packages within a go module ##### - -# go_srcs_in_module [package] -# returns list of all not-generated go sources in the current (dir) module. -function go_srcs_in_module { - go list -f "{{with \$c:=.}}{{range \$f:=\$c.GoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{range \$f:=\$c.TestGoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{range \$f:=\$c.XTestGoFiles }}{{\$c.Dir}}/{{\$f}}{{\"\n\"}}{{end}}{{end}}" ./... | grep -vE "(\\.pb\\.go|\\.pb\\.gw.go)" -} - -# pkgs_in_module [optional:package_pattern] -# returns list of all packages in the current (dir) module. -# if the package_pattern is given, its being resolved. -function pkgs_in_module { - go list -mod=mod "${1:-./...}"; -} - -# Prints subdirectory (from the repo root) for the current module. -function module_subdir { - relativePath "${ETCD_ROOT_DIR}" "${PWD}" -} - -#### Running actions against multiple modules #### - -# run [command...] - runs given command, printing it first and -# again if it failed (in RED). Use to wrap important test commands -# that user might want to re-execute to shorten the feedback loop when fixing -# the test. -function run { - local rpath - local command - rpath=$(module_subdir) - # Quoting all components as the commands are fully copy-parsable: - command=("${@}") - command=("${command[@]@Q}") - if [[ "${rpath}" != "." && "${rpath}" != "" ]]; then - repro="(cd ${rpath} && ${command[*]})" - else - repro="${command[*]}" - fi - - log_cmd "% ${repro}" - "${@}" 2> >(while read -r line; do echo -e "${COLOR_NONE}stderr: ${COLOR_MAGENTA}${line}${COLOR_NONE}">&2; done) - local error_code=$? - if [ ${error_code} -ne 0 ]; then - log_error -e "FAIL: (code:${error_code}):\\n % ${repro}" - return ${error_code} - fi -} - -# run_for_module [module] [cmd] -# executes given command in the given module for given pkgs. -# module_name - "." (in future: tests, client, server) -# cmd - cmd to be executed - that takes package as last argument -function run_for_module { - local module=${1:-"."} - shift 1 - ( - cd "${ETCD_ROOT_DIR}/${module}" && "$@" - ) -} - -function module_dirs() { - echo "api pkg client/pkg client/v2 client/v3 server etcdutl etcdctl tests ." -} - -# maybe_run [cmd...] runs given command depending on the DRY_RUN flag. -function maybe_run() { - if ${DRY_RUN}; then - log_warning -e "# DRY_RUN:\\n % ${*}" - else - run "${@}" - fi -} - -function modules() { - modules=( - "${ROOT_MODULE}/api/v3" - "${ROOT_MODULE}/pkg/v3" - "${ROOT_MODULE}/client/pkg/v3" - "${ROOT_MODULE}/client/v2" - "${ROOT_MODULE}/client/v3" - "${ROOT_MODULE}/server/v3" - "${ROOT_MODULE}/etcdutl/v3" - "${ROOT_MODULE}/etcdctl/v3" - "${ROOT_MODULE}/tests/v3" - "${ROOT_MODULE}/v3") - echo "${modules[@]}" -} - -function modules_exp() { - for m in $(modules); do - echo -n "${m}/... " - done -} - -# run_for_modules [cmd] -# run given command across all modules and packages -# (unless the set is limited using ${PKG} or / ${USERMOD}) -function run_for_modules { - local pkg="${PKG:-./...}" - if [ -z "${USERMOD:-}" ]; then - for m in $(module_dirs); do - run_for_module "${m}" "$@" "${pkg}" || return "$?" - done - else - run_for_module "${USERMOD}" "$@" "${pkg}" || return "$?" - fi -} - -junitFilenamePrefix() { - if [[ -z "${JUNIT_REPORT_DIR}" ]]; then - echo "" - return - fi - mkdir -p "${JUNIT_REPORT_DIR}" - DATE=$( date +%s | base64 | head -c 15 ) - echo "${JUNIT_REPORT_DIR}/junit_$DATE" -} - -function produce_junit_xmlreport { - local -r junit_filename_prefix=$1 - if [[ -z "${junit_filename_prefix}" ]]; then - return - fi - - local junit_xml_filename - junit_xml_filename="${junit_filename_prefix}.xml" - - # Ensure that gotestsum is run without cross-compiling - run_go_tool gotest.tools/gotestsum --junitfile "${junit_xml_filename}" --raw-command cat "${junit_filename_prefix}"*.stdout || exit 1 - if [ "${VERBOSE}" != "1" ]; then - rm "${junit_filename_prefix}"*.stdout - fi - - log_callout "Saved JUnit XML test report to ${junit_xml_filename}" -} - - -#### Running go test ######## - -# go_test [packages] [mode] [flags_for_package_func] [$@] -# [mode] supports 3 states: -# - "parallel": fastest as concurrently processes multiple packages, but silent -# till the last package. See: https://github.com/golang/go/issues/2731 -# - "keep_going" : executes tests package by package, but postpones reporting error to the last -# - "fail_fast" : executes tests packages 1 by 1, exits on the first failure. -# -# [flags_for_package_func] is a name of function that takes list of packages as parameter -# and computes additional flags to the go_test commands. -# Use 'true' or ':' if you dont need additional arguments. -# -# depends on the VERBOSE top-level variable. -# -# Example: -# go_test "./..." "keep_going" ":" --short -# -# The function returns != 0 code in case of test failure. -function go_test { - local packages="${1}" - local mode="${2}" - local flags_for_package_func="${3}" - local junit_filename_prefix - - shift 3 - - local goTestFlags="" - local goTestEnv="" - - ##### Create a junit-style XML test report in this directory if set. ##### - JUNIT_REPORT_DIR=${JUNIT_REPORT_DIR:-} - - # If JUNIT_REPORT_DIR is unset, and ARTIFACTS is set, then have them match. - if [[ -z "${JUNIT_REPORT_DIR:-}" && -n "${ARTIFACTS:-}" ]]; then - export JUNIT_REPORT_DIR="${ARTIFACTS}" - fi - - # Used to filter verbose test output. - go_test_grep_pattern=".*" - - if [[ -n "${JUNIT_REPORT_DIR}" ]] ; then - goTestFlags+="-v " - goTestFlags+="-json " - # Show only summary lines by matching lines like "status package/test" - go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+" - fi - - junit_filename_prefix=$(junitFilenamePrefix) - - if [ "${VERBOSE}" == "1" ]; then - goTestFlags="-v" - fi - - # Expanding patterns (like ./...) into list of packages - - local unpacked_packages=("${packages}") - if [ "${mode}" != "parallel" ]; then - # shellcheck disable=SC2207 - # shellcheck disable=SC2086 - if ! unpacked_packages=($(go list ${packages})); then - log_error "Cannot resolve packages: ${packages}" - return 255 - fi - fi - - local failures="" - - # execution of tests against packages: - for pkg in "${unpacked_packages[@]}"; do - local additional_flags - # shellcheck disable=SC2086 - additional_flags=$(${flags_for_package_func} ${pkg}) - - # shellcheck disable=SC2206 - local cmd=( go test ${goTestFlags} ${additional_flags} "$@" ${pkg} ) - - # shellcheck disable=SC2086 - if ! run env ${goTestEnv} ETCD_VERIFY="${ETCD_VERIFY}" "${cmd[@]}" | tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} | grep --binary-files=text "${go_test_grep_pattern}" ; then - if [ "${mode}" != "keep_going" ]; then - produce_junit_xmlreport "${junit_filename_prefix}" - return 2 - else - failures=("${failures[@]}" "${pkg}") - fi - fi - produce_junit_xmlreport "${junit_filename_prefix}" - done - - if [ -n "${failures[*]}" ] ; then - log_error -e "ERROR: Tests for following packages failed:\\n ${failures[*]}" - return 2 - fi -} - -#### Other #### - -# tool_exists [tool] [instruction] -# Checks whether given [tool] is installed. In case of failure, -# prints a warning with installation [instruction] and returns !=0 code. -# -# WARNING: This depend on "any" version of the 'binary' that might be tricky -# from hermetic build perspective. For go binaries prefer 'tool_go_run' -function tool_exists { - local tool="${1}" - local instruction="${2}" - if ! command -v "${tool}" >/dev/null; then - log_warning "Tool: '${tool}' not found on PATH. ${instruction}" - return 255 - fi -} - -# tool_get_bin [tool] - returns absolute path to a tool binary (or returns error) -function tool_get_bin { - local tool="$1" - local pkg_part="$1" - if [[ "$tool" == *"@"* ]]; then - pkg_part=$(echo "${tool}" | cut -d'@' -f1) - # shellcheck disable=SC2086 - run go install ${GOBINARGS:-} "${tool}" || return 2 - else - # shellcheck disable=SC2086 - run_for_module ./tools/mod run go install ${GOBINARGS:-} "${tool}" || return 2 - fi - - # remove the version suffix, such as removing "/v3" from "go.etcd.io/etcd/v3". - local cmd_base_name - cmd_base_name=$(basename "${pkg_part}") - if [[ ${cmd_base_name} =~ ^v[0-9]*$ ]]; then - pkg_part=$(dirname "${pkg_part}") - fi - - run_for_module ./tools/mod go list -f '{{.Target}}' "${pkg_part}" -} - -# tool_pkg_dir [pkg] - returns absolute path to a directory that stores given pkg. -# The pkg versions must be defined in ./tools/mod directory. -function tool_pkg_dir { - run_for_module ./tools/mod run go list -f '{{.Dir}}' "${1}" -} - -# tool_get_bin [tool] -function run_go_tool { - local cmdbin - if ! cmdbin=$(GOARCH="" tool_get_bin "${1}"); then - log_warning "Failed to install tool '${1}'" - return 2 - fi - shift 1 - GOARCH="" run "${cmdbin}" "$@" || return 2 -} - -# assert_no_git_modifications fails if there are any uncommited changes. -function assert_no_git_modifications { - log_callout "Making sure everything is committed." - if ! git diff --cached --exit-code; then - log_error "Found staged by uncommited changes. Do commit/stash your changes first." - return 2 - fi - if ! git diff --exit-code; then - log_error "Found unstaged and uncommited changes. Do commit/stash your changes first." - return 2 - fi -} - -# makes sure that the current branch is in sync with the origin branch: -# - no uncommitted nor unstaged changes -# - no differencing commits in relation to the origin/$branch -function git_assert_branch_in_sync { - local branch - # TODO: When git 2.22 popular, change to: - # branch=$(git branch --show-current) - branch=$(run git rev-parse --abbrev-ref HEAD) - log_callout "Verify the current branch '${branch}' is clean" - if [[ $(run git status --porcelain --untracked-files=no) ]]; then - log_error "The workspace in '$(pwd)' for branch: ${branch} has uncommitted changes" - log_error "Consider cleaning up / renaming this directory or (cd $(pwd) && git reset --hard)" - return 2 - fi - log_callout "Verify the current branch '${branch}' is in sync with the 'origin/${branch}'" - if [ -n "${branch}" ]; then - ref_local=$(run git rev-parse "${branch}") - ref_origin=$(run git rev-parse "origin/${branch}") - if [ "x${ref_local}" != "x${ref_origin}" ]; then - log_error "In workspace '$(pwd)' the branch: ${branch} diverges from the origin." - log_error "Consider cleaning up / renaming this directory or (cd $(pwd) && git reset --hard origin/${branch})" - return 2 - fi - else - log_warning "Cannot verify consistency with the origin, as git is on detached branch." - fi -} diff --git a/scripts/update_dep.sh b/scripts/update_dep.sh deleted file mode 100755 index e0c79b4a3de..00000000000 --- a/scripts/update_dep.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Usage: -# ./scripts/update_dep.sh module version -# or ./scripts/update_dep.sh module -# e.g. -# ./scripts/update_dep.sh github.com/golang/groupcache -# ./scripts/update_dep.sh github.com/soheilhy/cmux v0.1.5 -# -# Updates version of given dependency in all the modules that depend on the mod. - -source ./scripts/test_lib.sh - -mod="$1" -ver="$2" - -function maybe_update_module { - run go mod tidy - - deps=$(go list -f '{{if not .Indirect}}{{if .Version}}{{.Path}},{{.Version}}{{end}}{{end}}' -m all) - if [[ "$deps" == *"${mod}"* ]]; then - if [ -z "${ver}" ]; then - run go get "${mod}" - else - run go get "${mod}@${ver}" - fi - fi - } - -go mod tidy -run_for_modules maybe_update_module diff --git a/scripts/update_proto_annotations.sh b/scripts/update_proto_annotations.sh deleted file mode 100755 index 75089e77222..00000000000 --- a/scripts/update_proto_annotations.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# Updates etcd_version_annotations.txt based on state of annotations in proto files. -# Developers can run this script to avoid manually updating etcd_version_annotations.txt. -# Before running this script please ensure that fields/messages that you added are annotated with next etcd version. - -set -o errexit -set -o nounset -set -o pipefail - -tmpfile=$(mktemp) -go run ./tools/proto-annotations/main.go --annotation etcd_version > "${tmpfile}" -mv "${tmpfile}" ./scripts/etcd_version_annotations.txt diff --git a/scripts/updatebom.sh b/scripts/updatebom.sh deleted file mode 100755 index e879aace934..00000000000 --- a/scripts/updatebom.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -e -source ./scripts/test_lib.sh - -function bom_fixlet { - log_callout "generating bill-of-materials.json" - - cp go.mod go.mod.tmp - cp go.sum go.sum.tmp - - local modules - # shellcheck disable=SC2207 - modules=($(modules_exp)) - - if GOFLAGS=-mod=mod run_go_tool "github.com/coreos/license-bill-of-materials" \ - --override-file ./bill-of-materials.override.json \ - "${modules[@]}" > ./bill-of-materials.json.tmp; then - cp ./bill-of-materials.json.tmp ./bill-of-materials.json - log_success "bom refreshed" - else - log_error "FAIL: bom refreshing failed" - mv go.mod.tmp go.mod - mv go.sum.tmp go.sum - return 2 - fi - mv go.mod.tmp go.mod - mv go.sum.tmp go.sum -} - -function bom_fix { - # We regenerate bom from the tests directory, as it's a module - # that depends on all other modules, so we can generate comprehensive content. - # TODO: Migrate to root module, when root module depends on everything (including server & tests). - run_for_module "." bom_fixlet -} - -# only build when called directly, not sourced -if [[ "$0" =~ updatebom.sh$ ]]; then - bom_fix -fi diff --git a/scripts/verify_genproto.sh b/scripts/verify_genproto.sh deleted file mode 100755 index a66875657cf..00000000000 --- a/scripts/verify_genproto.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# This scripts is automatically run by CI to prevent pull requests missing running genproto.sh -# after changing *.proto file. - -set -o errexit -set -o nounset -set -o pipefail - -tmpWorkDir=$(mktemp -d -t 'twd.XXXXXX') -mkdir "$tmpWorkDir/etcd" -tmpWorkDir="$tmpWorkDir/etcd" -cp -r . "$tmpWorkDir" -pushd "$tmpWorkDir" -git add -A -git commit -m init || true # maybe fail because nothing to commit -./scripts/genproto.sh -diff=$(git diff --numstat | awk '{print $3}') -popd -if [ -z "$diff" ]; then - echo "PASSED genproto-verification!" - exit 0 -fi -echo "Failed genproto-verification!" >&2 -printf "* Found changed files:\n%s\n" "$diff" >&2 -echo "* Please rerun genproto.sh after changing *.proto file" >&2 -echo "* Run ./scripts/genproto.sh" >&2 -exit 1 diff --git a/scripts/verify_proto_annotations.sh b/scripts/verify_proto_annotations.sh deleted file mode 100755 index 17da593baab..00000000000 --- a/scripts/verify_proto_annotations.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Verifies proto annotations to ensure all new proto fields and messages are annotated by comparing it with etcd_version_annotations.txt file. -# This scripts is automatically run by CI to prevent pull requests missing adding a proto annotation. - -set -o errexit -set -o nounset -set -o pipefail - -tmpfile=$(mktemp) -go run ./tools/proto-annotations/main.go --annotation=etcd_version > "${tmpfile}" -if diff -u ./scripts/etcd_version_annotations.txt "${tmpfile}"; then - echo "PASSED proto-annotations verification!" - exit 0 -fi -echo "Failed proto-annotations-verification!" >&2 -echo "If you are adding new proto fields/messages that will be included in raft log:" >&2 -echo "* Please add etcd_version annotation in *.proto file with next etcd version" >&2 -echo "* Run ./scripts/genproto.sh" >&2 -echo "* Run ./scripts/update_proto_annotations.sh" >&2 -exit 1 diff --git a/security/FUZZING_AUDIT_2022.PDF b/security/FUZZING_AUDIT_2022.PDF deleted file mode 100644 index 695ce764ee0..00000000000 Binary files a/security/FUZZING_AUDIT_2022.PDF and /dev/null differ diff --git a/security/README.md b/security/README.md deleted file mode 100644 index 1bedf550bfd..00000000000 --- a/security/README.md +++ /dev/null @@ -1,48 +0,0 @@ -## Security Announcements - -Join the [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) group for emails about security and major announcements. - -## Report a Vulnerability - -We’re extremely grateful for security researchers and users that report vulnerabilities to the etcd Open Source Community. All reports are thoroughly investigated by a dedicated committee of community volunteers called [Product Security Committee](security-release-process.md#product-security-committee). - -To make a report, please email the private [security@etcd.io](mailto:security@etcd.io) list with the security details and the details expected for [all etcd bug reports](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md). - -### When Should I Report a Vulnerability? - -- When discovered a potential security vulnerability in etcd -- When unsure how a vulnerability affects etcd -- When discovered a vulnerability in another project that etcd depends on - -### When Should I NOT Report a Vulnerability? - -- Need help tuning etcd for security -- Need help applying security related updates -- When an issue is not security related - -## Security Vulnerability Response - -Each report is acknowledged and analyzed by Product Security Committee members within 3 working days. This will set off the [Security Release Process](security-release-process.md). - -Any vulnerability information shared with Product Security Committee stays within etcd project and will not be disseminated to other projects unless it is necessary to get the issue fixed. - -As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated. - -## Public Disclosure Timing - -A public disclosure date is negotiated by the etcd Product Security Committee and the bug reporter. We prefer to fully disclose the bug as soon as possible once user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. As a basic default, we expect report date to disclosure date to be on the order of 7 days. The etcd Product Security Committee holds the final say when setting a disclosure date. - -## Security Audit - -A third party security audit was performed by Trail of Bits, find the full report [here](SECURITY_AUDIT.pdf). -A third party fuzzing audit was performed by Ada Logics, find the full report [here](FUZZING_AUDIT_2022.pdf). - -## Private Distributor List - -This list provides actionable information regarding etcd security to multiple distributors. Members of the list may not use the information for anything other than fixing the issue for respective distribution's users. If you continue to leak information and break the policy outlined here, you will be removed from the list. - -### Request to Join - -New membership requests are sent to security@etcd.io. - -File an issue [here](https://github.com/etcd-io/etcd/issues/new?template=distributors-application.md), filling in the criteria template. diff --git a/security/SECURITY_AUDIT.pdf b/security/SECURITY_AUDIT.pdf deleted file mode 100644 index edd829c4892..00000000000 Binary files a/security/SECURITY_AUDIT.pdf and /dev/null differ diff --git a/security/email-templates.md b/security/email-templates.md deleted file mode 100644 index b2854b763be..00000000000 --- a/security/email-templates.md +++ /dev/null @@ -1,88 +0,0 @@ -# etcd Security Process Email Templates - -This is a collection of email templates to handle various situations the security team encounters. - -## Upcoming security release - -``` -Subject: Upcoming security release of etcd $VERSION -To: etcd-dev@googlegroups.com -Cc: security@etcd-io - -Hello etcd Community, - -The etcd Product Security Committee and maintainers would like to announce the forthcoming release -of etcd $VERSION. - -This release will be made available on the $ORDINALDAY of $MONTH $YEAR at -$PDTHOUR PDT ($GMTHOUR GMT). This release will fix $NUMDEFECTS security -defect(s). The highest rated security defect is considered $SEVERITY severity. - -No further details or patches will be made available in advance of the release. - -**Thanks** - -Thanks to $REPORTER, $DEVELOPERS, and the $RELEASELEADS for the coordination is making this release. - -Thanks, - -$PERSON on behalf of the etcd Product Security Committee and maintainers -``` - -## Security Fix Announcement - -``` -Subject: Security release of etcd $VERSION is now available -To: etcd-dev@googlegroups.com -Cc: security@etcd-io - -Hello etcd Community, - -The Product Security Committee and maintainers would like to announce the availability of etcd $VERSION. -This addresses the following CVE(s): - -* CVE-YEAR-ABCDEF (CVSS score $CVSS): $CVESUMMARY -... - -Upgrading to $VERSION is encouraged to fix these issues. - -**Am I vulnerable?** - -Run `etcd --version` and if it indicates a base version of $OLDVERSION or -older that means it is a vulnerable version. - - - -**How do I mitigate the vulnerability?** - - - -**How do I upgrade?** - -Follow the upgrade instructions at https://etcd.io/docs - -**Vulnerability Details** - - - -***CVE-YEAR-ABCDEF*** - -$CVESUMMARY - -This issue is filed as $CVE. We have rated it as [$CVSSSTRING]($CVSSURL) -($CVSS, $SEVERITY) [See the GitHub issue for more details]($GITHUBISSUEURL) - -**Thanks** - -Thanks to $REPORTER, $DEVELOPERS, and the $RELEASELEADS for the -coordination in making this release. - -Thanks, - -$PERSON on behalf of the etcd Product Security Committee and maintainers -``` diff --git a/security/security-release-process.md b/security/security-release-process.md deleted file mode 100644 index 79342d5c743..00000000000 --- a/security/security-release-process.md +++ /dev/null @@ -1,114 +0,0 @@ -# Security Release Process - -etcd is a growing community of volunteers, users, and vendors. The etcd community has adopted this security disclosures and response policy to ensure we responsibly handle critical issues. - -## Product Security Committee (PSC) - -Security vulnerabilities should be handled quickly and sometimes privately. The primary goal of this process is to reduce the total time users are vulnerable to publicly known exploits. - -The PSC is responsible for organizing the entire response including internal communication and external disclosure but will need help from relevant developers and release leads to successfully run this process. - -The PSC consists of the following: - -- Maintainers -- Volunteer members as described in the [Product Security Committee Membership](#Product-Security-Committee-Membership) - -The PSC members will share various tasks as listed below: - -- Triage: make sure the people who should be in "the know" (aka notified) are notified, also responds to issues that are not actually issues and let the etcd maintainers know that. This person is the escalation path for a bug if it is one. -- Infra: make sure we can test the fixes appropriately. -- Disclosure: handles public messaging around the bug. Documentation on how to upgrade. Changelog. Explaining to public the severity. notifications of bugs sent to mailing lists etc. Requests CVEs. -- Release: Create new release addressing a security fix. - -### Contacting the Product Security Committee - -Contact the team by sending email to [security@etcd.io](mailto:security@etcd.io) - -### Product Security Committee Membership - -#### Joining - -New potential members to the PSC can express their interest to the PSC members. These individuals can be nominated by PSC members or etcd maintainers. - -If representation changes due to job shifts then PSC members are encouraged to grow the team or replace themselves through mentoring new members. - -##### Product Security Committee Lazy Consensus Selection - -Selection of new members will be done by lazy consensus amongst members for adding new people with fallback on majority vote. - -#### Stepping Down - -Members may step down at any time and propose a replacement from existing active contributors of etcd. - -#### Responsibilities - -- Members must remain active and responsive. -- Members taking an extended leave of two weeks or more should coordinate with other members to ensure the role is adequately staffed during the leave. -- Members going on leave for 1-3 months may identify a temporary replacement. -- Members of a role should remove any other members that have not communicated a leave of absence and either cannot be reached for more than 1 month or are not fulfilling their documented responsibilities for more than 1 month. This may be done through a super-majority vote of members. - -## Disclosures - -### Private Disclosure Processes - -The etcd Community asks that all suspected vulnerabilities be privately and responsibly disclosed as explained in the [README](README.md). - -### Public Disclosure Processes - -If anyone knows of a publicly disclosed security vulnerability please IMMEDIATELY email [security@etcd.io](mailto:security@etcd.io) to inform the PSC about the vulnerability so they may start the patch, release, and communication process. - -If possible the PSC will ask the person making the public report if the issue can be handled via a private disclosure process. If the reporter denies the PSC will move swiftly with the fix and release process. In extreme cases GitHub can be asked to delete the issue but this generally isn't necessary and is unlikely to make a public disclosure less damaging. - -## Patch, Release, and Public Communication - -For each vulnerability, the PSC members will coordinate to create the fix and release, and sending email to the rest of the community. - -All of the timelines below are suggestions and assume a Private Disclosure. -The PSC drives the schedule using their best judgment based on severity, -development time, and release work. If the PSC is dealing with -a Public Disclosure all timelines become ASAP. If the fix relies on another -upstream project's disclosure timeline, that will adjust the process as well. -We will work with the upstream project to fit their timeline and best protect -etcd users. - -### Fix Team Organization - -These steps should be completed within the first 24 hours of Disclosure. - -- The PSC will work quickly to identify relevant engineers from the affected projects and packages and CC those engineers into the disclosure thread. These selected developers are the Fix Team. A best guess is to invite all maintainers. - -### Fix Development Process - -These steps should be completed within the 1-7 days of Disclosure. - -- The PSC and the Fix Team will create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0) to determine the effect and severity of the bug. The PSC makes the final call on the calculated risk; it is better to move quickly than make the perfect assessment. -- The PSC will request a [CVE](https://cveform.mitre.org/). -- The Fix Team will notify the PSC that work on the fix branch is complete once there are LGTMs on all commits from one or more maintainers. - -If the CVSS score is under ~4.0 -([a low severity score](https://www.first.org/cvss/specification-document#i5)) or the assessed risk is low the Fix Team can decide to slow the release process down in the face of holidays, developer bandwidth, etc. - -Note: CVSS is convenient but imperfect. Ultimately, the PSC has discretion on classifying the severity of a vulnerability. - -The severity of the bug and related handling decisions must be discussed on the security@etcd.io mailing list. - -### Fix Disclosure Process - -With the Fix Development underway, the PSC needs to come up with an overall communication plan for the wider community. This Disclosure process should begin after the Fix Team has developed a Fix or mitigation so that a realistic timeline can be communicated to users. - -**Fix Release Day** (Completed within 1-21 days of Disclosure) - -- The PSC will cherry-pick the patches onto the main branch and all relevant release branches. The Fix Team will `lgtm` and `approve`. -- The etcd maintainers will merge these PRs as quickly as possible. -- The PSC will ensure all the binaries are built, publicly available, and functional. -- The PSC will announce the new releases, the CVE number, severity, and impact, and the location of the binaries to get wide distribution and user action. As much as possible this announcement should be actionable, and include any mitigating steps users can take prior to upgrading to a fixed version. The recommended target time is 4pm UTC on a non-Friday weekday. This means the announcement will be seen morning Pacific, early evening Europe, and late evening Asia. The announcement will be sent via the following channels: - - etcd-dev@googlegroups.com - - [Kubernetes announcement slack channel](https://kubernetes.slack.com/messages/C9T0QMNG4) - - [etcd slack channel](https://kubernetes.slack.com/messages/C3HD8ARJ5) - -## Retrospective - -These steps should be completed 1-3 days after the Release Date. The retrospective process [should be blameless](https://landing.google.com/sre/book/chapters/postmortem-culture.html). - -- The PSC will send a retrospective of the process to etcd-dev@googlegroups.com including details on everyone involved, the timeline of the process, links to relevant PRs that introduced the issue, if relevant, and any critiques of the response and release process. -- The PSC and Fix Team are also encouraged to send their own feedback on the process to etcd-dev@googlegroups.com. Honest critique is the only way we are going to get good at this as a community. diff --git a/server/LICENSE b/server/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/server/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/server/auth/jwt.go b/server/auth/jwt.go deleted file mode 100644 index 82648e41ece..00000000000 --- a/server/auth/jwt.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "crypto/ecdsa" - "crypto/rsa" - "errors" - "time" - - jwt "github.com/golang-jwt/jwt/v4" - "go.uber.org/zap" -) - -type tokenJWT struct { - lg *zap.Logger - signMethod jwt.SigningMethod - key interface{} - ttl time.Duration - verifyOnly bool -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - if token.Method.Alg() != t.signMethod.Alg() { - return nil, errors.New("invalid signing method") - } - switch k := t.key.(type) { - case *rsa.PrivateKey: - return &k.PublicKey, nil - case *ecdsa.PrivateKey: - return &k.PublicKey, nil - default: - return t.key, nil - } - }) - - if err != nil { - t.lg.Warn( - "failed to parse a JWT token", - zap.Error(err), - ) - return nil, false - } - - claims, ok := parsed.Claims.(jwt.MapClaims) - if !parsed.Valid || !ok { - t.lg.Warn("failed to obtain claims from a JWT token") - return nil, false - } - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - if t.verifyOnly { - return "", ErrVerifyOnly - } - - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(t.signMethod, - jwt.MapClaims{ - "username": username, - "revision": revision, - "exp": time.Now().Add(t.ttl).Unix(), - }) - - token, err := tk.SignedString(t.key) - if err != nil { - t.lg.Debug( - "failed to sign a JWT token", - zap.String("user-name", username), - zap.Uint64("revision", revision), - zap.Error(err), - ) - return "", err - } - - t.lg.Debug( - "created/assigned a new JWT token", - zap.String("user-name", username), - zap.Uint64("revision", revision), - zap.String("token", token), - ) - return token, err -} - -func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) { - if lg == nil { - lg = zap.NewNop() - } - var err error - var opts jwtOptions - err = opts.ParseWithDefaults(optMap) - if err != nil { - lg.Error("problem loading JWT options", zap.Error(err)) - return nil, ErrInvalidAuthOpts - } - - var keys = make([]string, 0, len(optMap)) - for k := range optMap { - if !knownOptions[k] { - keys = append(keys, k) - } - } - if len(keys) > 0 { - lg.Warn("unknown JWT options", zap.Strings("keys", keys)) - } - - key, err := opts.Key() - if err != nil { - return nil, err - } - - t := &tokenJWT{ - lg: lg, - ttl: opts.TTL, - signMethod: opts.SignMethod, - key: key, - } - - switch t.signMethod.(type) { - case *jwt.SigningMethodECDSA: - if _, ok := t.key.(*ecdsa.PublicKey); ok { - t.verifyOnly = true - } - case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: - if _, ok := t.key.(*rsa.PublicKey); ok { - t.verifyOnly = true - } - } - - return t, nil -} diff --git a/server/auth/jwt_test.go b/server/auth/jwt_test.go deleted file mode 100644 index a3983cc5a56..00000000000 --- a/server/auth/jwt_test.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "fmt" - "testing" - - "go.uber.org/zap" -) - -const ( - jwtRSAPubKey = "../../tests/fixtures/server.crt" - jwtRSAPrivKey = "../../tests/fixtures/server.key.insecure" - - jwtECPubKey = "../../tests/fixtures/server-ecdsa.crt" - jwtECPrivKey = "../../tests/fixtures/server-ecdsa.key.insecure" -) - -func TestJWTInfo(t *testing.T) { - optsMap := map[string]map[string]string{ - "RSA-priv": { - "priv-key": jwtRSAPrivKey, - "sign-method": "RS256", - "ttl": "1h", - }, - "RSA": { - "pub-key": jwtRSAPubKey, - "priv-key": jwtRSAPrivKey, - "sign-method": "RS256", - }, - "RSAPSS-priv": { - "priv-key": jwtRSAPrivKey, - "sign-method": "PS256", - }, - "RSAPSS": { - "pub-key": jwtRSAPubKey, - "priv-key": jwtRSAPrivKey, - "sign-method": "PS256", - }, - "ECDSA-priv": { - "priv-key": jwtECPrivKey, - "sign-method": "ES256", - }, - "ECDSA": { - "pub-key": jwtECPubKey, - "priv-key": jwtECPrivKey, - "sign-method": "ES256", - }, - "HMAC": { - "priv-key": jwtECPrivKey, // any file, raw bytes used as shared secret - "sign-method": "HS256", - }, - } - - for k, opts := range optsMap { - t.Run(k, func(tt *testing.T) { - testJWTInfo(tt, opts) - }) - } -} - -func testJWTInfo(t *testing.T, opts map[string]string) { - lg := zap.NewNop() - jwt, err := newTokenProviderJWT(lg, opts) - if err != nil { - t.Fatal(err) - } - - ctx := context.TODO() - - token, aerr := jwt.assign(ctx, "abc", 123) - if aerr != nil { - t.Fatalf("%#v", aerr) - } - ai, ok := jwt.info(ctx, token, 123) - if !ok { - t.Fatalf("failed to authenticate with token %s", token) - } - if ai.Revision != 123 { - t.Fatalf("expected revision 123, got %d", ai.Revision) - } - ai, ok = jwt.info(ctx, "aaa", 120) - if ok || ai != nil { - t.Fatalf("expected aaa to fail to authenticate, got %+v", ai) - } - - // test verify-only provider - if opts["pub-key"] != "" && opts["priv-key"] != "" { - t.Run("verify-only", func(t *testing.T) { - newOpts := make(map[string]string, len(opts)) - for k, v := range opts { - newOpts[k] = v - } - delete(newOpts, "priv-key") - verify, err := newTokenProviderJWT(lg, newOpts) - if err != nil { - t.Fatal(err) - } - - ai, ok := verify.info(ctx, token, 123) - if !ok { - t.Fatalf("failed to authenticate with token %s", token) - } - if ai.Revision != 123 { - t.Fatalf("expected revision 123, got %d", ai.Revision) - } - ai, ok = verify.info(ctx, "aaa", 120) - if ok || ai != nil { - t.Fatalf("expected aaa to fail to authenticate, got %+v", ai) - } - - _, aerr := verify.assign(ctx, "abc", 123) - if aerr != ErrVerifyOnly { - t.Fatalf("unexpected error when attempting to sign with public key: %v", aerr) - } - - }) - } -} - -func TestJWTBad(t *testing.T) { - - var badCases = map[string]map[string]string{ - "no options": {}, - "invalid method": { - "sign-method": "invalid", - }, - "rsa no key": { - "sign-method": "RS256", - }, - "invalid ttl": { - "sign-method": "RS256", - "ttl": "forever", - }, - "rsa invalid public key": { - "sign-method": "RS256", - "pub-key": jwtRSAPrivKey, - "priv-key": jwtRSAPrivKey, - }, - "rsa invalid private key": { - "sign-method": "RS256", - "pub-key": jwtRSAPubKey, - "priv-key": jwtRSAPubKey, - }, - "hmac no key": { - "sign-method": "HS256", - }, - "hmac pub key": { - "sign-method": "HS256", - "pub-key": jwtRSAPubKey, - }, - "missing public key file": { - "sign-method": "HS256", - "pub-key": "missing-file", - }, - "missing private key file": { - "sign-method": "HS256", - "priv-key": "missing-file", - }, - "ecdsa no key": { - "sign-method": "ES256", - }, - "ecdsa invalid public key": { - "sign-method": "ES256", - "pub-key": jwtECPrivKey, - "priv-key": jwtECPrivKey, - }, - "ecdsa invalid private key": { - "sign-method": "ES256", - "pub-key": jwtECPubKey, - "priv-key": jwtECPubKey, - }, - } - - lg := zap.NewNop() - - for k, v := range badCases { - t.Run(k, func(t *testing.T) { - _, err := newTokenProviderJWT(lg, v) - if err == nil { - t.Errorf("expected error for options %v", v) - } - }) - } -} - -// testJWTOpts is useful for passing to NewTokenProvider which requires a string. -func testJWTOpts() string { - return fmt.Sprintf("%s,pub-key=%s,priv-key=%s,sign-method=RS256", tokenTypeJWT, jwtRSAPubKey, jwtRSAPrivKey) -} diff --git a/server/auth/main_test.go b/server/auth/main_test.go deleted file mode 100644 index 30ff6fb9a36..00000000000 --- a/server/auth/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package auth - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/server/auth/metrics.go b/server/auth/metrics.go deleted file mode 100644 index f7ce2792022..00000000000 --- a/server/auth/metrics.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - currentAuthRevision = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "auth", - Name: "revision", - Help: "The current revision of auth store.", - }, - func() float64 { - reportCurrentAuthRevMu.RLock() - defer reportCurrentAuthRevMu.RUnlock() - return reportCurrentAuthRev() - }, - ) - // overridden by auth store initialization - reportCurrentAuthRevMu sync.RWMutex - reportCurrentAuthRev = func() float64 { return 0 } -) - -func init() { - prometheus.MustRegister(currentAuthRevision) -} diff --git a/server/auth/options.go b/server/auth/options.go deleted file mode 100644 index 7bc635b0f07..00000000000 --- a/server/auth/options.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "crypto/ecdsa" - "crypto/rsa" - "fmt" - "os" - "time" - - jwt "github.com/golang-jwt/jwt/v4" -) - -const ( - optSignMethod = "sign-method" - optPublicKey = "pub-key" - optPrivateKey = "priv-key" - optTTL = "ttl" -) - -var knownOptions = map[string]bool{ - optSignMethod: true, - optPublicKey: true, - optPrivateKey: true, - optTTL: true, -} - -var ( - // DefaultTTL will be used when a 'ttl' is not specified - DefaultTTL = 5 * time.Minute -) - -type jwtOptions struct { - SignMethod jwt.SigningMethod - PublicKey []byte - PrivateKey []byte - TTL time.Duration -} - -// ParseWithDefaults will load options from the specified map or set defaults where appropriate -func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error { - if opts.TTL == 0 && optMap[optTTL] == "" { - opts.TTL = DefaultTTL - } - - return opts.Parse(optMap) -} - -// Parse will load options from the specified map -func (opts *jwtOptions) Parse(optMap map[string]string) error { - var err error - if ttl := optMap[optTTL]; ttl != "" { - opts.TTL, err = time.ParseDuration(ttl) - if err != nil { - return err - } - } - - if file := optMap[optPublicKey]; file != "" { - opts.PublicKey, err = os.ReadFile(file) - if err != nil { - return err - } - } - - if file := optMap[optPrivateKey]; file != "" { - opts.PrivateKey, err = os.ReadFile(file) - if err != nil { - return err - } - } - - // signing method is a required field - method := optMap[optSignMethod] - opts.SignMethod = jwt.GetSigningMethod(method) - if opts.SignMethod == nil { - return ErrInvalidAuthMethod - } - - return nil -} - -// Key will parse and return the appropriately typed key for the selected signature method -func (opts *jwtOptions) Key() (interface{}, error) { - switch opts.SignMethod.(type) { - case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: - return opts.rsaKey() - case *jwt.SigningMethodECDSA: - return opts.ecKey() - case *jwt.SigningMethodHMAC: - return opts.hmacKey() - default: - return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod) - } -} - -func (opts *jwtOptions) hmacKey() (interface{}, error) { - if len(opts.PrivateKey) == 0 { - return nil, ErrMissingKey - } - return opts.PrivateKey, nil -} - -func (opts *jwtOptions) rsaKey() (interface{}, error) { - var ( - priv *rsa.PrivateKey - pub *rsa.PublicKey - err error - ) - - if len(opts.PrivateKey) > 0 { - priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey) - if err != nil { - return nil, err - } - } - - if len(opts.PublicKey) > 0 { - pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey) - if err != nil { - return nil, err - } - } - - if priv == nil { - if pub == nil { - // Neither key given - return nil, ErrMissingKey - } - // Public key only, can verify tokens - return pub, nil - } - - // both keys provided, make sure they match - if pub != nil && !pub.Equal(priv.Public()) { - return nil, ErrKeyMismatch - } - - return priv, nil -} - -func (opts *jwtOptions) ecKey() (interface{}, error) { - var ( - priv *ecdsa.PrivateKey - pub *ecdsa.PublicKey - err error - ) - - if len(opts.PrivateKey) > 0 { - priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey) - if err != nil { - return nil, err - } - } - - if len(opts.PublicKey) > 0 { - pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey) - if err != nil { - return nil, err - } - } - - if priv == nil { - if pub == nil { - // Neither key given - return nil, ErrMissingKey - } - // Public key only, can verify tokens - return pub, nil - } - - // both keys provided, make sure they match - if pub != nil && !pub.Equal(priv.Public()) { - return nil, ErrKeyMismatch - } - - return priv, nil -} diff --git a/server/auth/range_perm_cache.go b/server/auth/range_perm_cache.go deleted file mode 100644 index 0d639c413a6..00000000000 --- a/server/auth/range_perm_cache.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/pkg/v3/adt" -) - -func getMergedPerms(tx AuthReadTx, userName string) *unifiedRangePermissions { - user := tx.UnsafeGetUser(userName) - if user == nil { - return nil - } - - readPerms := adt.NewIntervalTree() - writePerms := adt.NewIntervalTree() - - for _, roleName := range user.Roles { - role := tx.UnsafeGetRole(roleName) - if role == nil { - continue - } - - for _, perm := range role.KeyPermission { - var ivl adt.Interval - var rangeEnd []byte - - if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { - rangeEnd = perm.RangeEnd - } - - if len(perm.RangeEnd) != 0 { - ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) - } else { - ivl = adt.NewBytesAffinePoint(perm.Key) - } - - switch perm.PermType { - case authpb.READWRITE: - readPerms.Insert(ivl, struct{}{}) - writePerms.Insert(ivl, struct{}{}) - - case authpb.READ: - readPerms.Insert(ivl, struct{}{}) - - case authpb.WRITE: - writePerms.Insert(ivl, struct{}{}) - } - } - } - - return &unifiedRangePermissions{ - readPerms: readPerms, - writePerms: writePerms, - } -} - -func checkKeyInterval( - lg *zap.Logger, - cachedPerms *unifiedRangePermissions, - key, rangeEnd []byte, - permtyp authpb.Permission_Type) bool { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - rangeEnd = nil - } - - ivl := adt.NewBytesAffineInterval(key, rangeEnd) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Contains(ivl) - case authpb.WRITE: - return cachedPerms.writePerms.Contains(ivl) - default: - lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) - } - return false -} - -func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { - pt := adt.NewBytesAffinePoint(key) - switch permtyp { - case authpb.READ: - return cachedPerms.readPerms.Intersects(pt) - case authpb.WRITE: - return cachedPerms.writePerms.Intersects(pt) - default: - lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) - } - return false -} - -func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - // assumption: tx is Lock()ed - as.rangePermCacheMu.RLock() - defer as.rangePermCacheMu.RUnlock() - - rangePerm, ok := as.rangePermCache[userName] - if !ok { - as.lg.Error( - "user doesn't exist", - zap.String("user-name", userName), - ) - return false - } - - if len(rangeEnd) == 0 { - return checkKeyPoint(as.lg, rangePerm, key, permtyp) - } - - return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp) -} - -func (as *authStore) refreshRangePermCache(tx AuthReadTx) { - // Note that every authentication configuration update calls this method and it invalidates the entire - // rangePermCache and reconstruct it based on information of users and roles stored in the backend. - // This can be a costly operation. - as.rangePermCacheMu.Lock() - defer as.rangePermCacheMu.Unlock() - - as.lg.Debug("Refreshing rangePermCache") - - as.rangePermCache = make(map[string]*unifiedRangePermissions) - - users := tx.UnsafeGetAllUsers() - for _, user := range users { - userName := string(user.Name) - perms := getMergedPerms(tx, userName) - if perms == nil { - as.lg.Error( - "failed to create a merged permission", - zap.String("user-name", userName), - ) - continue - } - as.rangePermCache[userName] = perms - } -} - -type unifiedRangePermissions struct { - readPerms adt.IntervalTree - writePerms adt.IntervalTree -} diff --git a/server/auth/range_perm_cache_test.go b/server/auth/range_perm_cache_test.go deleted file mode 100644 index ef26f55843f..00000000000 --- a/server/auth/range_perm_cache_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/pkg/v3/adt" -) - -func TestRangePermission(t *testing.T) { - tests := []struct { - perms []adt.Interval - begin []byte - end []byte - want bool - }{ - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("c")), adt.NewBytesAffineInterval([]byte("x"), []byte("z"))}, - []byte("a"), []byte("z"), - false, - }, - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("f")), adt.NewBytesAffineInterval([]byte("c"), []byte("d")), adt.NewBytesAffineInterval([]byte("f"), []byte("z"))}, - []byte("a"), []byte("z"), - true, - }, - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))}, - []byte("a"), []byte("f"), - true, - }, - } - - for i, tt := range tests { - readPerms := adt.NewIntervalTree() - for _, p := range tt.perms { - readPerms.Insert(p, struct{}{}) - } - - result := checkKeyInterval(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.begin, tt.end, authpb.READ) - if result != tt.want { - t.Errorf("#%d: result=%t, want=%t", i, result, tt.want) - } - } -} - -func TestKeyPermission(t *testing.T) { - tests := []struct { - perms []adt.Interval - key []byte - want bool - }{ - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("c")), adt.NewBytesAffineInterval([]byte("x"), []byte("z"))}, - []byte("f"), - false, - }, - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("f")), adt.NewBytesAffineInterval([]byte("c"), []byte("d")), adt.NewBytesAffineInterval([]byte("f"), []byte("z"))}, - []byte("b"), - true, - }, - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))}, - []byte("d"), - true, - }, - { - []adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))}, - []byte("f"), - false, - }, - } - - for i, tt := range tests { - readPerms := adt.NewIntervalTree() - for _, p := range tt.perms { - readPerms.Insert(p, struct{}{}) - } - - result := checkKeyPoint(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.key, authpb.READ) - if result != tt.want { - t.Errorf("#%d: result=%t, want=%t", i, result, tt.want) - } - } -} diff --git a/server/auth/simple_token_test.go b/server/auth/simple_token_test.go deleted file mode 100644 index 13db76efe4a..00000000000 --- a/server/auth/simple_token_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "testing" - - "go.uber.org/zap/zaptest" -) - -// TestSimpleTokenDisabled ensures that TokenProviderSimple behaves correctly when -// disabled. -func TestSimpleTokenDisabled(t *testing.T) { - initialState := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault) - - explicitlyDisabled := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault) - explicitlyDisabled.enable() - explicitlyDisabled.disable() - - for _, tp := range []*tokenSimple{initialState, explicitlyDisabled} { - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - token, err := tp.assign(ctx, "user1", 0) - if err != nil { - t.Fatal(err) - } - authInfo, ok := tp.info(ctx, token, 0) - if ok { - t.Errorf("expected (true, \"user1\") got (%t, %s)", ok, authInfo.Username) - } - - tp.invalidateUser("user1") // should be no-op - } -} - -// TestSimpleTokenAssign ensures that TokenProviderSimple can correctly assign a -// token, look it up with info, and invalidate it by user. -func TestSimpleTokenAssign(t *testing.T) { - tp := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault) - tp.enable() - defer tp.disable() - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - token, err := tp.assign(ctx, "user1", 0) - if err != nil { - t.Fatal(err) - } - authInfo, ok := tp.info(ctx, token, 0) - if !ok || authInfo.Username != "user1" { - t.Errorf("expected (true, \"token2\") got (%t, %s)", ok, authInfo.Username) - } - - tp.invalidateUser("user1") - - _, ok = tp.info(context.TODO(), token, 0) - if ok { - t.Errorf("expected ok == false after user is invalidated") - } -} diff --git a/server/auth/store.go b/server/auth/store.go deleted file mode 100644 index 40262c76d62..00000000000 --- a/server/auth/store.go +++ /dev/null @@ -1,1214 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -var ( - authEnabled = []byte{1} - authDisabled = []byte{0} - - rootPerm = authpb.Permission{PermType: authpb.READWRITE, Key: []byte{}, RangeEnd: []byte{0}} - - ErrRootUserNotExist = errors.New("auth: root user does not exist") - ErrRootRoleNotExist = errors.New("auth: root user does not have root role") - ErrUserAlreadyExist = errors.New("auth: user already exists") - ErrUserEmpty = errors.New("auth: user name is empty") - ErrUserNotFound = errors.New("auth: user not found") - ErrRoleAlreadyExist = errors.New("auth: role already exists") - ErrRoleNotFound = errors.New("auth: role not found") - ErrRoleEmpty = errors.New("auth: role name is empty") - ErrPermissionNotGiven = errors.New("auth: permission not given") - ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") - ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user") - ErrPermissionDenied = errors.New("auth: permission denied") - ErrRoleNotGranted = errors.New("auth: role is not granted to the user") - ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") - ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") - ErrAuthOldRevision = errors.New("auth: revision in header is old") - ErrInvalidAuthToken = errors.New("auth: invalid auth token") - ErrInvalidAuthOpts = errors.New("auth: invalid auth options") - ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") - ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method") - ErrMissingKey = errors.New("auth: missing key data") - ErrKeyMismatch = errors.New("auth: public and private keys don't match") - ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key") -) - -const ( - rootUser = "root" - rootRole = "root" - - tokenTypeSimple = "simple" - tokenTypeJWT = "jwt" -) - -type AuthInfo struct { - Username string - Revision uint64 -} - -// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate() -type AuthenticateParamIndex struct{} - -// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate() -type AuthenticateParamSimpleTokenPrefix struct{} - -// AuthStore defines auth storage interface. -type AuthStore interface { - // AuthEnable turns on the authentication feature - AuthEnable() error - - // AuthDisable turns off the authentication feature - AuthDisable() - - // IsAuthEnabled returns true if the authentication feature is enabled. - IsAuthEnabled() bool - - // Authenticate does authentication based on given user name and password - Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) - - // Recover recovers the state of auth store from the given backend - Recover(be AuthBackend) - - // UserAdd adds a new user - UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - - // UserDelete deletes a user - UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user - UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to the user - UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - - // UserGet gets the detailed information of a users - UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - - // UserRevokeRole revokes a role of a user - UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role - RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role - RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - - // RoleGet gets the detailed information of a role - RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - - // RoleRevokePermission gets the detailed information of a role - RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - - // RoleDelete gets the detailed information of a role - RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - - // UserList gets a list of all users - UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - - // RoleList gets a list of all roles - RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - - // IsPutPermitted checks put permission of the user - IsPutPermitted(authInfo *AuthInfo, key []byte) error - - // IsRangePermitted checks range permission of the user - IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsDeleteRangePermitted checks delete-range permission of the user - IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsAdminPermitted checks admin permission of the user - IsAdminPermitted(authInfo *AuthInfo) error - - // GenTokenPrefix produces a random string in a case of simple token - // in a case of JWT, it produces an empty string - GenTokenPrefix() (string, error) - - // Revision gets current revision of authStore - Revision() uint64 - - // CheckPassword checks a given pair of username and password is correct - CheckPassword(username, password string) (uint64, error) - - // Close does cleanup of AuthStore - Close() error - - // AuthInfoFromCtx gets AuthInfo from gRPC's context - AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) - - // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context - AuthInfoFromTLS(ctx context.Context) *AuthInfo - - // WithRoot generates and installs a token that can be used as a root credential - WithRoot(ctx context.Context) context.Context - - // HasRole checks that user has role - HasRole(user, role string) bool - - // BcryptCost gets strength of hashing bcrypted auth password - BcryptCost() int -} - -type TokenProvider interface { - info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) - assign(ctx context.Context, username string, revision uint64) (string, error) - enable() - disable() - - invalidateUser(string) - genTokenPrefix() (string, error) -} - -type AuthBackend interface { - CreateAuthBuckets() - ForceCommit() - ReadTx() AuthReadTx - BatchTx() AuthBatchTx - - GetUser(string) *authpb.User - GetAllUsers() []*authpb.User - GetRole(string) *authpb.Role - GetAllRoles() []*authpb.Role -} - -type AuthBatchTx interface { - AuthReadTx - UnsafeSaveAuthEnabled(enabled bool) - UnsafeSaveAuthRevision(rev uint64) - UnsafePutUser(*authpb.User) - UnsafeDeleteUser(string) - UnsafePutRole(*authpb.Role) - UnsafeDeleteRole(string) -} - -type AuthReadTx interface { - UnsafeReadAuthEnabled() bool - UnsafeReadAuthRevision() uint64 - UnsafeGetUser(string) *authpb.User - UnsafeGetRole(string) *authpb.Role - UnsafeGetAllUsers() []*authpb.User - UnsafeGetAllRoles() []*authpb.Role - Lock() - Unlock() -} - -type authStore struct { - // atomic operations; need 64-bit align, or 32-bit tests will crash - revision uint64 - - lg *zap.Logger - be AuthBackend - enabled bool - enabledMu sync.RWMutex - - // rangePermCache needs to be protected by rangePermCacheMu - // rangePermCacheMu needs to be write locked only in initialization phase or configuration changes - // Hot paths like Range(), needs to acquire read lock for improving performance - // - // Note that BatchTx and ReadTx cannot be a mutex for rangePermCache because they are independent resources - // see also: https://github.com/etcd-io/etcd/pull/13920#discussion_r849114855 - rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - rangePermCacheMu sync.RWMutex - - tokenProvider TokenProvider - bcryptCost int // the algorithm cost / strength for hashing auth passwords -} - -func (as *authStore) AuthEnable() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if as.enabled { - as.lg.Info("authentication is already enabled; ignored auth enable request") - return nil - } - tx := as.be.BatchTx() - tx.Lock() - defer func() { - tx.Unlock() - as.be.ForceCommit() - }() - - u := tx.UnsafeGetUser(rootUser) - if u == nil { - return ErrRootUserNotExist - } - - if !hasRootRole(u) { - return ErrRootRoleNotExist - } - - tx.UnsafeSaveAuthEnabled(true) - as.enabled = true - as.tokenProvider.enable() - - as.refreshRangePermCache(tx) - - as.setRevision(tx.UnsafeReadAuthRevision()) - - as.lg.Info("enabled authentication") - return nil -} - -func (as *authStore) AuthDisable() { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return - } - b := as.be - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeSaveAuthEnabled(false) - as.commitRevision(tx) - tx.Unlock() - - b.ForceCommit() - - as.enabled = false - as.tokenProvider.disable() - - as.lg.Info("disabled authentication") -} - -func (as *authStore) Close() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return nil - } - as.tokenProvider.disable() - return nil -} - -func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.IsAuthEnabled() { - return nil, ErrAuthNotEnabled - } - user := as.be.GetUser(username) - if user == nil { - return nil, ErrAuthFailed - } - - if user.Options != nil && user.Options.NoPassword { - return nil, ErrAuthFailed - } - - // Password checking is already performed in the API layer, so we don't need to check for now. - // Staleness of password can be detected with OCC in the API layer, too. - - token, err := as.tokenProvider.assign(ctx, username, as.Revision()) - if err != nil { - return nil, err - } - - as.lg.Debug( - "authenticated a user", - zap.String("user-name", username), - zap.String("token", token), - ) - return &pb.AuthenticateResponse{Token: token}, nil -} - -func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.IsAuthEnabled() { - return 0, ErrAuthNotEnabled - } - - var user *authpb.User - // CompareHashAndPassword is very expensive, so we use closures - // to avoid putting it in the critical section of the tx lock. - revision, err := func() (uint64, error) { - tx := as.be.ReadTx() - tx.Lock() - defer tx.Unlock() - - user = tx.UnsafeGetUser(username) - if user == nil { - return 0, ErrAuthFailed - } - - if user.Options != nil && user.Options.NoPassword { - return 0, ErrNoPasswordUser - } - - return tx.UnsafeReadAuthRevision(), nil - }() - if err != nil { - return 0, err - } - - if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - as.lg.Info("invalid password", zap.String("user-name", username)) - return 0, ErrAuthFailed - } - return revision, nil -} - -func (as *authStore) Recover(be AuthBackend) { - as.be = be - tx := be.ReadTx() - tx.Lock() - - enabled := tx.UnsafeReadAuthEnabled() - as.setRevision(tx.UnsafeReadAuthRevision()) - as.refreshRangePermCache(tx) - - tx.Unlock() - - as.enabledMu.Lock() - as.enabled = enabled - if enabled { - as.tokenProvider.enable() - } - as.enabledMu.Unlock() -} - -func (as *authStore) selectPassword(password string, hashedPassword string) ([]byte, error) { - if password != "" && hashedPassword == "" { - // This path is for processing log entries created by etcd whose version is older than 3.5 - return bcrypt.GenerateFromPassword([]byte(password), as.bcryptCost) - } - return base64.StdEncoding.DecodeString(hashedPassword) -} - -func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - if len(r.Name) == 0 { - return nil, ErrUserEmpty - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(r.Name) - if user != nil { - return nil, ErrUserAlreadyExist - } - - options := r.Options - if options == nil { - options = &authpb.UserAddOptions{ - NoPassword: false, - } - } - - var password []byte - var err error - - if !options.NoPassword { - password, err = as.selectPassword(r.Password, r.HashedPassword) - if err != nil { - return nil, ErrNoPasswordUser - } - } - - newUser := &authpb.User{ - Name: []byte(r.Name), - Password: password, - Options: options, - } - tx.UnsafePutUser(newUser) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info("added a user", zap.String("user-name", r.Name)) - return &pb.AuthUserAddResponse{}, nil -} - -func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && r.Name == rootUser { - as.lg.Error("cannot delete 'root' user", zap.String("user-name", r.Name)) - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(r.Name) - if user == nil { - return nil, ErrUserNotFound - } - tx.UnsafeDeleteUser(r.Name) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.tokenProvider.invalidateUser(r.Name) - - as.lg.Info( - "deleted a user", - zap.String("user-name", r.Name), - zap.Strings("user-roles", user.Roles), - ) - return &pb.AuthUserDeleteResponse{}, nil -} - -func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - var password []byte - var err error - - if !user.Options.NoPassword { - password, err = as.selectPassword(r.Password, r.HashedPassword) - if err != nil { - return nil, ErrNoPasswordUser - } - } - - updatedUser := &authpb.User{ - Name: []byte(r.Name), - Roles: user.Roles, - Password: password, - Options: user.Options, - } - tx.UnsafePutUser(updatedUser) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.tokenProvider.invalidateUser(r.Name) - - as.lg.Info( - "changed a password of a user", - zap.String("user-name", r.Name), - zap.Strings("user-roles", user.Roles), - ) - return &pb.AuthUserChangePasswordResponse{}, nil -} - -func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(r.User) - if user == nil { - return nil, ErrUserNotFound - } - - if r.Role != rootRole { - role := tx.UnsafeGetRole(r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - } - - idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && user.Roles[idx] == r.Role { - as.lg.Warn( - "ignored grant role request to a user", - zap.String("user-name", r.User), - zap.Strings("user-roles", user.Roles), - zap.String("duplicate-role-name", r.Role), - ) - return &pb.AuthUserGrantRoleResponse{}, nil - } - - user.Roles = append(user.Roles, r.Role) - sort.Strings(user.Roles) - - tx.UnsafePutUser(user) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info( - "granted a role to a user", - zap.String("user-name", r.User), - zap.Strings("user-roles", user.Roles), - zap.String("added-role-name", r.Role), - ) - return &pb.AuthUserGrantRoleResponse{}, nil -} - -func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - user := as.be.GetUser(r.Name) - - if user == nil { - return nil, ErrUserNotFound - } - - var resp pb.AuthUserGetResponse - resp.Roles = append(resp.Roles, user.Roles...) - return &resp, nil -} - -func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - users := as.be.GetAllUsers() - - resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} - for i := range users { - resp.Users[i] = string(users[i].Name) - } - return resp, nil -} - -func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && r.Name == rootUser && r.Role == rootRole { - as.lg.Error( - "'root' user cannot revoke 'root' role", - zap.String("user-name", r.Name), - zap.String("role-name", r.Role), - ) - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - Options: user.Options, - } - - for _, role := range user.Roles { - if role != r.Role { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - return nil, ErrRoleNotGranted - } - - tx.UnsafePutUser(updatedUser) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info( - "revoked a role from a user", - zap.String("user-name", r.Name), - zap.Strings("old-user-roles", user.Roles), - zap.Strings("new-user-roles", updatedUser.Roles), - zap.String("revoked-role-name", r.Role), - ) - return &pb.AuthUserRevokeRoleResponse{}, nil -} - -func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - var resp pb.AuthRoleGetResponse - - role := as.be.GetRole(r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - if rootRole == string(role.Name) { - resp.Perm = append(resp.Perm, &rootPerm) - } else { - resp.Perm = append(resp.Perm, role.KeyPermission...) - } - return &resp, nil -} - -func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - roles := as.be.GetAllRoles() - - resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} - for i := range roles { - resp.Roles[i] = string(roles[i].Name) - } - return resp, nil -} - -func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := tx.UnsafeGetRole(r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - updatedRole := &authpb.Role{ - Name: role.Name, - } - - for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, r.Key) || !bytes.Equal(perm.RangeEnd, r.RangeEnd) { - updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) - } - } - - if len(role.KeyPermission) == len(updatedRole.KeyPermission) { - return nil, ErrPermissionNotGranted - } - - tx.UnsafePutRole(updatedRole) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info( - "revoked a permission on range", - zap.String("role-name", r.Role), - zap.String("key", string(r.Key)), - zap.String("range-end", string(r.RangeEnd)), - ) - return &pb.AuthRoleRevokePermissionResponse{}, nil -} - -func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && r.Role == rootRole { - as.lg.Error("cannot delete 'root' role", zap.String("role-name", r.Role)) - return nil, ErrInvalidAuthMgmt - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := tx.UnsafeGetRole(r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - tx.UnsafeDeleteRole(r.Role) - - users := tx.UnsafeGetAllUsers() - for _, user := range users { - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - Options: user.Options, - } - - for _, role := range user.Roles { - if role != r.Role { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - continue - } - - tx.UnsafePutUser(updatedUser) - - } - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info("deleted a role", zap.String("role-name", r.Role)) - return &pb.AuthRoleDeleteResponse{}, nil -} - -func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - if len(r.Name) == 0 { - return nil, ErrRoleEmpty - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := tx.UnsafeGetRole(r.Name) - if role != nil { - return nil, ErrRoleAlreadyExist - } - - newRole := &authpb.Role{ - Name: []byte(r.Name), - } - - tx.UnsafePutRole(newRole) - - as.commitRevision(tx) - - as.lg.Info("created a role", zap.String("role-name", r.Name)) - return &pb.AuthRoleAddResponse{}, nil -} - -func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { - return as.tokenProvider.info(ctx, token, as.Revision()) -} - -type permSlice []*authpb.Permission - -func (perms permSlice) Len() int { - return len(perms) -} - -func (perms permSlice) Less(i, j int) bool { - return bytes.Compare(perms[i].Key, perms[j].Key) < 0 -} - -func (perms permSlice) Swap(i, j int) { - perms[i], perms[j] = perms[j], perms[i] -} - -func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - if r.Perm == nil { - return nil, ErrPermissionNotGiven - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := tx.UnsafeGetRole(r.Name) - if role == nil { - return nil, ErrRoleNotFound - } - - idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0 - }) - - if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { - // update existing permission - role.KeyPermission[idx].PermType = r.Perm.PermType - } else { - // append new permission to the role - newPerm := &authpb.Permission{ - Key: r.Perm.Key, - RangeEnd: r.Perm.RangeEnd, - PermType: r.Perm.PermType, - } - - role.KeyPermission = append(role.KeyPermission, newPerm) - sort.Sort(permSlice(role.KeyPermission)) - } - - tx.UnsafePutRole(role) - - as.commitRevision(tx) - as.refreshRangePermCache(tx) - - as.lg.Info( - "granted/updated a permission to a user", - zap.String("user-name", r.Name), - zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]), - zap.ByteString("key", r.Perm.Key), - zap.ByteString("range-end", r.Perm.RangeEnd), - ) - return &pb.AuthRoleGrantPermissionResponse{}, nil -} - -func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { - // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.IsAuthEnabled() { - return nil - } - - // only gets rev == 0 when passed AuthInfo{}; no user given - if revision == 0 { - return ErrUserEmpty - } - rev := as.Revision() - if revision < rev { - as.lg.Warn("request auth revision is less than current node auth revision", - zap.Uint64("current node auth revision", rev), - zap.Uint64("request auth revision", revision), - zap.ByteString("request key", key), - zap.Error(ErrAuthOldRevision)) - return ErrAuthOldRevision - } - - tx := as.be.ReadTx() - tx.Lock() - defer tx.Unlock() - - user := tx.UnsafeGetUser(userName) - if user == nil { - as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName)) - return ErrPermissionDenied - } - - // root role should have permission on all ranges - if hasRootRole(user) { - return nil - } - - if as.isRangeOpPermitted(userName, key, rangeEnd, permTyp) { - return nil - } - - return ErrPermissionDenied -} - -func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) -} - -func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) -} - -func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) -} - -func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.IsAuthEnabled() { - return nil - } - if authInfo == nil || authInfo.Username == "" { - return ErrUserEmpty - } - - tx := as.be.ReadTx() - tx.Lock() - defer tx.Unlock() - u := tx.UnsafeGetUser(authInfo.Username) - - if u == nil { - return ErrUserNotFound - } - - if !hasRootRole(u) { - return ErrPermissionDenied - } - - return nil -} - -func (as *authStore) IsAuthEnabled() bool { - as.enabledMu.RLock() - defer as.enabledMu.RUnlock() - return as.enabled -} - -// NewAuthStore creates a new AuthStore. -func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) *authStore { - if lg == nil { - lg = zap.NewNop() - } - - if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost { - lg.Warn( - "use default bcrypt cost instead of the invalid given cost", - zap.Int("min-cost", bcrypt.MinCost), - zap.Int("max-cost", bcrypt.MaxCost), - zap.Int("default-cost", bcrypt.DefaultCost), - zap.Int("given-cost", bcryptCost), - ) - bcryptCost = bcrypt.DefaultCost - } - - be.CreateAuthBuckets() - tx := be.BatchTx() - // We should call LockOutsideApply here, but the txPostLockHoos isn't set - // to EtcdServer yet, so it's OK. - tx.Lock() - enabled := tx.UnsafeReadAuthEnabled() - as := &authStore{ - revision: tx.UnsafeReadAuthRevision(), - lg: lg, - be: be, - enabled: enabled, - rangePermCache: make(map[string]*unifiedRangePermissions), - tokenProvider: tp, - bcryptCost: bcryptCost, - } - - if enabled { - as.tokenProvider.enable() - } - - if as.Revision() == 0 { - as.commitRevision(tx) - } - - as.setupMetricsReporter() - - as.refreshRangePermCache(tx) - - tx.Unlock() - be.ForceCommit() - - return as -} - -func hasRootRole(u *authpb.User) bool { - // u.Roles is sorted in UserGrantRole(), so we can use binary search. - idx := sort.SearchStrings(u.Roles, rootRole) - return idx != len(u.Roles) && u.Roles[idx] == rootRole -} - -func (as *authStore) commitRevision(tx AuthBatchTx) { - atomic.AddUint64(&as.revision, 1) - tx.UnsafeSaveAuthRevision(as.Revision()) -} - -func (as *authStore) setRevision(rev uint64) { - atomic.StoreUint64(&as.revision, rev) -} - -func (as *authStore) Revision() uint64 { - return atomic.LoadUint64(&as.revision) -} - -func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) { - peer, ok := peer.FromContext(ctx) - if !ok || peer == nil || peer.AuthInfo == nil { - return nil - } - - tlsInfo := peer.AuthInfo.(credentials.TLSInfo) - for _, chains := range tlsInfo.State.VerifiedChains { - if len(chains) < 1 { - continue - } - ai = &AuthInfo{ - Username: chains[0].Subject.CommonName, - Revision: as.Revision(), - } - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil - } - - // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept - // header. The proxy uses etcd client server certificate. If the certificate - // has a CommonName we should never use this for authentication. - if gw := md["grpcgateway-accept"]; len(gw) > 0 { - as.lg.Warn( - "ignoring common name in gRPC-gateway proxy request", - zap.String("common-name", ai.Username), - zap.String("user-name", ai.Username), - zap.Uint64("revision", ai.Revision), - ) - return nil - } - as.lg.Debug( - "found command name", - zap.String("common-name", ai.Username), - zap.String("user-name", ai.Username), - zap.Uint64("revision", ai.Revision), - ) - break - } - return ai -} - -func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - if !as.IsAuthEnabled() { - return nil, nil - } - - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, nil - } - - //TODO(mitake|hexfusion) review unifying key names - ts, ok := md[rpctypes.TokenFieldNameGRPC] - if !ok { - ts, ok = md[rpctypes.TokenFieldNameSwagger] - } - if !ok { - return nil, nil - } - - token := ts[0] - authInfo, uok := as.authInfoFromToken(ctx, token) - if !uok { - as.lg.Warn("invalid auth token", zap.String("token", token)) - return nil, ErrInvalidAuthToken - } - - return authInfo, nil -} - -func (as *authStore) GenTokenPrefix() (string, error) { - return as.tokenProvider.genTokenPrefix() -} - -func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) { - opts := strings.Split(optstr, ",") - tokenType := opts[0] - - typeSpecificOpts := make(map[string]string) - for i := 1; i < len(opts); i++ { - pair := strings.Split(opts[i], "=") - - if len(pair) != 2 { - if lg != nil { - lg.Error("invalid token option", zap.String("option", optstr)) - } - return "", nil, ErrInvalidAuthOpts - } - - if _, ok := typeSpecificOpts[pair[0]]; ok { - if lg != nil { - lg.Error( - "invalid token option", - zap.String("option", optstr), - zap.String("duplicate-parameter", pair[0]), - ) - } - return "", nil, ErrInvalidAuthOpts - } - - typeSpecificOpts[pair[0]] = pair[1] - } - - return tokenType, typeSpecificOpts, nil - -} - -// NewTokenProvider creates a new token provider. -func NewTokenProvider( - lg *zap.Logger, - tokenOpts string, - indexWaiter func(uint64) <-chan struct{}, - TokenTTL time.Duration) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - switch tokenType { - case tokenTypeSimple: - if lg != nil { - lg.Warn("simple token is not cryptographically signed") - } - return newTokenProviderSimple(lg, indexWaiter, TokenTTL), nil - - case tokenTypeJWT: - return newTokenProviderJWT(lg, typeSpecificOpts) - - case "": - return newTokenProviderNop() - - default: - if lg != nil { - lg.Warn( - "unknown token type", - zap.String("type", tokenType), - zap.Error(ErrInvalidAuthOpts), - ) - } - return nil, ErrInvalidAuthOpts - } -} - -func (as *authStore) WithRoot(ctx context.Context) context.Context { - if !as.IsAuthEnabled() { - return ctx - } - - var ctxForAssign context.Context - if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil { - ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0)) - prefix, err := ts.genTokenPrefix() - if err != nil { - as.lg.Error( - "failed to generate prefix of internally used token", - zap.Error(err), - ) - return ctx - } - ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix) - } else { - ctxForAssign = ctx - } - - token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision()) - if err != nil { - // this must not happen - as.lg.Error( - "failed to assign token for lease revoking", - zap.Error(err), - ) - return ctx - } - - mdMap := map[string]string{ - rpctypes.TokenFieldNameGRPC: token, - } - tokenMD := metadata.New(mdMap) - - // use "mdIncomingKey{}" since it's called from local etcdserver - return metadata.NewIncomingContext(ctx, tokenMD) -} - -func (as *authStore) HasRole(user, role string) bool { - tx := as.be.BatchTx() - tx.Lock() - u := tx.UnsafeGetUser(user) - tx.Unlock() - - if u == nil { - as.lg.Warn( - "'has-role' requested for non-existing user", - zap.String("user-name", user), - zap.String("role-name", role), - ) - return false - } - - for _, r := range u.Roles { - if role == r { - return true - } - } - return false -} - -func (as *authStore) BcryptCost() int { - return as.bcryptCost -} - -func (as *authStore) setupMetricsReporter() { - reportCurrentAuthRevMu.Lock() - reportCurrentAuthRev = func() float64 { - return float64(as.Revision()) - } - reportCurrentAuthRevMu.Unlock() -} diff --git a/server/auth/store_mock_test.go b/server/auth/store_mock_test.go deleted file mode 100644 index a8d7fcf81ac..00000000000 --- a/server/auth/store_mock_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import "go.etcd.io/etcd/api/v3/authpb" - -type backendMock struct { - users map[string]*authpb.User - roles map[string]*authpb.Role - enabled bool - revision uint64 -} - -func newBackendMock() *backendMock { - return &backendMock{ - users: make(map[string]*authpb.User), - roles: make(map[string]*authpb.Role), - } -} - -func (b *backendMock) CreateAuthBuckets() { -} - -func (b *backendMock) ForceCommit() { -} - -func (b *backendMock) ReadTx() AuthReadTx { - return &txMock{be: b} -} - -func (b *backendMock) BatchTx() AuthBatchTx { - return &txMock{be: b} -} - -func (b *backendMock) GetUser(s string) *authpb.User { - return b.users[s] -} - -func (b *backendMock) GetAllUsers() []*authpb.User { - return b.BatchTx().UnsafeGetAllUsers() -} - -func (b *backendMock) GetRole(s string) *authpb.Role { - return b.roles[s] -} - -func (b *backendMock) GetAllRoles() []*authpb.Role { - return b.BatchTx().UnsafeGetAllRoles() -} - -var _ AuthBackend = (*backendMock)(nil) - -type txMock struct { - be *backendMock -} - -var _ AuthBatchTx = (*txMock)(nil) - -func (t txMock) UnsafeReadAuthEnabled() bool { - return t.be.enabled -} - -func (t txMock) UnsafeReadAuthRevision() uint64 { - return t.be.revision -} - -func (t txMock) UnsafeGetUser(s string) *authpb.User { - return t.be.users[s] -} - -func (t txMock) UnsafeGetRole(s string) *authpb.Role { - return t.be.roles[s] -} - -func (t txMock) UnsafeGetAllUsers() []*authpb.User { - var users []*authpb.User - for _, u := range t.be.users { - users = append(users, u) - } - return users -} - -func (t txMock) UnsafeGetAllRoles() []*authpb.Role { - var roles []*authpb.Role - for _, r := range t.be.roles { - roles = append(roles, r) - } - return roles -} - -func (t txMock) Lock() { -} - -func (t txMock) Unlock() { -} - -func (t txMock) UnsafeSaveAuthEnabled(enabled bool) { - t.be.enabled = enabled -} - -func (t txMock) UnsafeSaveAuthRevision(rev uint64) { - t.be.revision = rev -} - -func (t txMock) UnsafePutUser(user *authpb.User) { - t.be.users[string(user.Name)] = user -} - -func (t txMock) UnsafeDeleteUser(s string) { - delete(t.be.users, s) -} - -func (t txMock) UnsafePutRole(role *authpb.Role) { - t.be.roles[string(role.Name)] = role -} - -func (t txMock) UnsafeDeleteRole(s string) { - delete(t.be.roles, s) -} diff --git a/server/auth/store_test.go b/server/auth/store_test.go deleted file mode 100644 index 0287c2e53d7..00000000000 --- a/server/auth/store_test.go +++ /dev/null @@ -1,1073 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "encoding/base64" - "fmt" - "strings" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "golang.org/x/crypto/bcrypt" - "google.golang.org/grpc/metadata" - - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/pkg/v3/adt" -) - -func dummyIndexWaiter(index uint64) <-chan struct{} { - ch := make(chan struct{}, 1) - go func() { - ch <- struct{}{} - }() - return ch -} - -// TestNewAuthStoreRevision ensures newly auth store -// keeps the old revision when there are no changes. -func TestNewAuthStoreRevision(t *testing.T) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - be := newBackendMock() - as := NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost) - err = enableAuthAndCreateRoot(as) - if err != nil { - t.Fatal(err) - } - old := as.Revision() - as.Close() - - // no changes to commit - as = NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost) - defer as.Close() - new := as.Revision() - - if old != new { - t.Fatalf("expected revision %d, got %d", old, new) - } -} - -// TestNewAuthStoreBcryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid -func TestNewAuthStoreBcryptCost(t *testing.T) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - - invalidCosts := [2]int{bcrypt.MinCost - 1, bcrypt.MaxCost + 1} - for _, invalidCost := range invalidCosts { - as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, invalidCost) - defer as.Close() - if as.BcryptCost() != bcrypt.DefaultCost { - t.Fatalf("expected DefaultCost when bcryptcost is invalid") - } - } -} - -func encodePassword(s string) string { - hashedPassword, _ := bcrypt.GenerateFromPassword([]byte(s), bcrypt.MinCost) - return base64.StdEncoding.EncodeToString(hashedPassword) -} - -func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) - err = enableAuthAndCreateRoot(as) - if err != nil { - t.Fatal(err) - } - - // adds a new role - _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test"}) - if err != nil { - t.Fatal(err) - } - - ua := &pb.AuthUserAddRequest{Name: "foo", HashedPassword: encodePassword("bar"), Options: &authpb.UserAddOptions{NoPassword: false}} - _, err = as.UserAdd(ua) // add a non-existing user - if err != nil { - t.Fatal(err) - } - - tearDown := func(_ *testing.T) { - as.Close() - } - return as, tearDown -} - -func enableAuthAndCreateRoot(as *authStore) error { - _, err := as.UserAdd(&pb.AuthUserAddRequest{Name: "root", HashedPassword: encodePassword("root"), Options: &authpb.UserAddOptions{NoPassword: false}}) - if err != nil { - return err - } - - _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"}) - if err != nil { - return err - } - - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"}) - if err != nil { - return err - } - - return as.AuthEnable() -} - -func TestUserAdd(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - const userName = "foo" - ua := &pb.AuthUserAddRequest{Name: userName, Options: &authpb.UserAddOptions{NoPassword: false}} - _, err := as.UserAdd(ua) // add an existing user - if err == nil { - t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err) - } - if err != ErrUserAlreadyExist { - t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err) - } - - ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}} - _, err = as.UserAdd(ua) // add a user with empty name - if err != ErrUserEmpty { - t.Fatal(err) - } - - if _, ok := as.rangePermCache[userName]; !ok { - t.Fatalf("user %s should be added but it doesn't exist in rangePermCache", userName) - - } -} - -func TestRecover(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer as.Close() - defer tearDown(t) - - as.enabled = false - as.Recover(as.be) - - if !as.IsAuthEnabled() { - t.Fatalf("expected auth enabled got disabled") - } -} - -func TestRecoverWithEmptyRangePermCache(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer as.Close() - defer tearDown(t) - - as.enabled = false - as.rangePermCache = map[string]*unifiedRangePermissions{} - as.Recover(as.be) - - if !as.IsAuthEnabled() { - t.Fatalf("expected auth enabled got disabled") - } - - if len(as.rangePermCache) != 2 { - t.Fatalf("rangePermCache should have permission information for 2 users (\"root\" and \"foo\"), but has %d information", len(as.rangePermCache)) - } - if _, ok := as.rangePermCache["root"]; !ok { - t.Fatal("user \"root\" should be created by setupAuthStore() but doesn't exist in rangePermCache") - } - if _, ok := as.rangePermCache["foo"]; !ok { - t.Fatal("user \"foo\" should be created by setupAuthStore() but doesn't exist in rangePermCache") - } -} - -func TestCheckPassword(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // auth a non-existing user - _, err := as.CheckPassword("foo-test", "bar") - if err == nil { - t.Fatalf("expected %v, got %v", ErrAuthFailed, err) - } - if err != ErrAuthFailed { - t.Fatalf("expected %v, got %v", ErrAuthFailed, err) - } - - // auth an existing user with correct password - _, err = as.CheckPassword("foo", "bar") - if err != nil { - t.Fatal(err) - } - - // auth an existing user but with wrong password - _, err = as.CheckPassword("foo", "") - if err == nil { - t.Fatalf("expected %v, got %v", ErrAuthFailed, err) - } - if err != ErrAuthFailed { - t.Fatalf("expected %v, got %v", ErrAuthFailed, err) - } -} - -func TestUserDelete(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // delete an existing user - const userName = "foo" - ud := &pb.AuthUserDeleteRequest{Name: userName} - _, err := as.UserDelete(ud) - if err != nil { - t.Fatal(err) - } - - // delete a non-existing user - _, err = as.UserDelete(ud) - if err == nil { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } - if err != ErrUserNotFound { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } - - if _, ok := as.rangePermCache[userName]; ok { - t.Fatalf("user %s should be deleted but it exists in rangePermCache", userName) - - } -} - -func TestUserDeleteAndPermCache(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // delete an existing user - const deletedUserName = "foo" - ud := &pb.AuthUserDeleteRequest{Name: deletedUserName} - _, err := as.UserDelete(ud) - if err != nil { - t.Fatal(err) - } - - // delete a non-existing user - _, err = as.UserDelete(ud) - if err != ErrUserNotFound { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } - - if _, ok := as.rangePermCache[deletedUserName]; ok { - t.Fatalf("user %s should be deleted but it exists in rangePermCache", deletedUserName) - } - - // add a new user - const newUser = "bar" - ua := &pb.AuthUserAddRequest{Name: newUser, HashedPassword: encodePassword("pwd1"), Options: &authpb.UserAddOptions{NoPassword: false}} - _, err = as.UserAdd(ua) - if err != nil { - t.Fatal(err) - } - - if _, ok := as.rangePermCache[newUser]; !ok { - t.Fatalf("user %s should exist but it doesn't exist in rangePermCache", deletedUserName) - - } -} - -func TestUserChangePassword(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - ctx1 := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err := as.Authenticate(ctx1, "foo", "bar") - if err != nil { - t.Fatal(err) - } - - _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo", HashedPassword: encodePassword("baz")}) - if err != nil { - t.Fatal(err) - } - - ctx2 := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err = as.Authenticate(ctx2, "foo", "baz") - if err != nil { - t.Fatal(err) - } - - // change a non-existing user - _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-test", HashedPassword: encodePassword("bar")}) - if err == nil { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } - if err != ErrUserNotFound { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } -} - -func TestRoleAdd(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // adds a new role - _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - // add a role with empty name - _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: ""}) - if err != ErrRoleEmpty { - t.Fatal(err) - } -} - -func TestUserGrant(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // grants a role to the user - _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) - if err != nil { - t.Fatal(err) - } - - // grants a role to a non-existing user - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo-test", Role: "role-test"}) - if err == nil { - t.Errorf("expected %v, got %v", ErrUserNotFound, err) - } - if err != ErrUserNotFound { - t.Errorf("expected %v, got %v", ErrUserNotFound, err) - } -} - -func TestHasRole(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // grants a role to the user - _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) - if err != nil { - t.Fatal(err) - } - - // checks role reflects correctly - hr := as.HasRole("foo", "role-test") - if !hr { - t.Fatal("expected role granted, got false") - } - - // checks non existent role - hr = as.HasRole("foo", "non-existent-role") - if hr { - t.Fatal("expected role not found, got true") - } - - // checks non existent user - hr = as.HasRole("nouser", "role-test") - if hr { - t.Fatal("expected user not found got true") - } -} - -func TestIsOpPermitted(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // add new role - _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - } - - _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "role-test-1", - Perm: perm, - }) - if err != nil { - t.Fatal(err) - } - - // grants a role to the user - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - // check permission reflected to user - - err = as.isOpPermitted("foo", as.Revision(), perm.Key, perm.RangeEnd, perm.PermType) - if err != nil { - t.Fatal(err) - } -} - -func TestGetUser(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - _, err := as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"}) - if err != nil { - t.Fatal(err) - } - - u, err := as.UserGet(&pb.AuthUserGetRequest{Name: "foo"}) - if err != nil { - t.Fatal(err) - } - if u == nil { - t.Fatal("expect user not nil, got nil") - } - expected := []string{"role-test"} - - assert.Equal(t, expected, u.Roles) - - // check non existent user - _, err = as.UserGet(&pb.AuthUserGetRequest{Name: "nouser"}) - if err == nil { - t.Errorf("expected %v, got %v", ErrUserNotFound, err) - } -} - -func TestListUsers(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - ua := &pb.AuthUserAddRequest{Name: "user1", HashedPassword: encodePassword("pwd1"), Options: &authpb.UserAddOptions{NoPassword: false}} - _, err := as.UserAdd(ua) // add a non-existing user - if err != nil { - t.Fatal(err) - } - - ul, err := as.UserList(&pb.AuthUserListRequest{}) - if err != nil { - t.Fatal(err) - } - if !contains(ul.Users, "root") { - t.Errorf("expected %v in %v", "root", ul.Users) - } - if !contains(ul.Users, "user1") { - t.Errorf("expected %v in %v", "user1", ul.Users) - } -} - -func TestRoleGrantPermission(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - } - _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "role-test-1", - Perm: perm, - }) - - if err != nil { - t.Error(err) - } - - r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, perm, r.Perm[0]) - - // trying to grant nil permissions returns an error (and doesn't change the actual permissions!) - _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "role-test-1", - }) - - if err != ErrPermissionNotGiven { - t.Error(err) - } - - r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, perm, r.Perm[0]) -} - -func TestRootRoleGrantPermission(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - } - _, err := as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "root", - Perm: perm, - }) - - if err != nil { - t.Error(err) - } - - r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "root"}) - if err != nil { - t.Fatal(err) - } - - //whatever grant permission to root, it always return root permission. - expectPerm := &authpb.Permission{ - PermType: authpb.READWRITE, - Key: []byte{}, - RangeEnd: []byte{0}, - } - - assert.Equal(t, expectPerm, r.Perm[0]) -} - -func TestRoleRevokePermission(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - } - _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "role-test-1", - Perm: perm, - }) - - if err != nil { - t.Fatal(err) - } - - _, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - _, err = as.RoleRevokePermission(&pb.AuthRoleRevokePermissionRequest{ - Role: "role-test-1", - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - }) - if err != nil { - t.Fatal(err) - } - - var r *pb.AuthRoleGetResponse - r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - if len(r.Perm) != 0 { - t.Errorf("expected %v, got %v", 0, len(r.Perm)) - } -} - -func TestUserRevokePermission(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - _, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - const userName = "foo" - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test"}) - if err != nil { - t.Fatal(err) - } - - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("WriteKeyBegin"), - RangeEnd: []byte("WriteKeyEnd"), - } - _, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{ - Name: "role-test-1", - Perm: perm, - }) - if err != nil { - t.Fatal(err) - } - - if _, ok := as.rangePermCache[userName]; !ok { - t.Fatalf("User %s should have its entry in rangePermCache", userName) - } - unifiedPerm := as.rangePermCache[userName] - pt1 := adt.NewBytesAffinePoint([]byte("WriteKeyBegin")) - if !unifiedPerm.writePerms.Contains(pt1) { - t.Fatal("rangePermCache should contain WriteKeyBegin") - } - pt2 := adt.NewBytesAffinePoint([]byte("OutOfRange")) - if unifiedPerm.writePerms.Contains(pt2) { - t.Fatal("rangePermCache should not contain OutOfRange") - } - - u, err := as.UserGet(&pb.AuthUserGetRequest{Name: userName}) - if err != nil { - t.Fatal(err) - } - - expected := []string{"role-test", "role-test-1"} - - assert.Equal(t, expected, u.Roles) - - _, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: userName, Role: "role-test-1"}) - if err != nil { - t.Fatal(err) - } - - u, err = as.UserGet(&pb.AuthUserGetRequest{Name: userName}) - if err != nil { - t.Fatal(err) - } - - expected = []string{"role-test"} - - assert.Equal(t, expected, u.Roles) -} - -func TestRoleDelete(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - _, err := as.RoleDelete(&pb.AuthRoleDeleteRequest{Role: "role-test"}) - if err != nil { - t.Fatal(err) - } - rl, err := as.RoleList(&pb.AuthRoleListRequest{}) - if err != nil { - t.Fatal(err) - } - expected := []string{"root"} - - assert.Equal(t, expected, rl.Roles) -} - -func TestAuthInfoFromCtx(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - ctx := context.Background() - ai, err := as.AuthInfoFromCtx(ctx) - if err != nil && ai != nil { - t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) - } - - // as if it came from RPC - ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"})) - ai, err = as.AuthInfoFromCtx(ctx) - if err != nil && ai != nil { - t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) - } - - ctx = context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - resp, err := as.Authenticate(ctx, "foo", "bar") - if err != nil { - t.Error(err) - } - - ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid Token"})) - _, err = as.AuthInfoFromCtx(ctx) - if err != ErrInvalidAuthToken { - t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) - } - - ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "Invalid.Token"})) - _, err = as.AuthInfoFromCtx(ctx) - if err != ErrInvalidAuthToken { - t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) - } - - ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: resp.Token})) - ai, err = as.AuthInfoFromCtx(ctx) - if err != nil { - t.Error(err) - } - if ai.Username != "foo" { - t.Errorf("expected %v, got %v", "foo", ai.Username) - } -} - -func TestAuthDisable(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - as.AuthDisable() - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err := as.Authenticate(ctx, "foo", "bar") - if err != ErrAuthNotEnabled { - t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err) - } - - // Disabling disabled auth to make sure it can return safely if store is already disabled. - as.AuthDisable() - _, err = as.Authenticate(ctx, "foo", "bar") - if err != ErrAuthNotEnabled { - t.Errorf("expected %v, got %v", ErrAuthNotEnabled, err) - } -} - -func TestIsAuthEnabled(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // enable authentication to test the first possible condition - as.AuthEnable() - - status := as.IsAuthEnabled() - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, _ = as.Authenticate(ctx, "foo", "bar") - if status != true { - t.Errorf("expected %v, got %v", true, false) - } - - // Disabling disabled auth to test the other condition that can be return - as.AuthDisable() - - status = as.IsAuthEnabled() - _, _ = as.Authenticate(ctx, "foo", "bar") - if status != false { - t.Errorf("expected %v, got %v", false, true) - } -} - -// TestAuthInfoFromCtxRace ensures that access to authStore.revision is thread-safe. -func TestAuthInfoFromCtxRace(t *testing.T) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) - defer as.Close() - - donec := make(chan struct{}) - go func() { - defer close(donec) - ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{rpctypes.TokenFieldNameGRPC: "test"})) - as.AuthInfoFromCtx(ctx) - }() - as.UserAdd(&pb.AuthUserAddRequest{Name: "test", Options: &authpb.UserAddOptions{NoPassword: false}}) - <-donec -} - -func TestIsAdminPermitted(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - err := as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1}) - if err != nil { - t.Errorf("expected nil, got %v", err) - } - - // invalid user - err = as.IsAdminPermitted(&AuthInfo{Username: "rooti", Revision: 1}) - if err != ErrUserNotFound { - t.Errorf("expected %v, got %v", ErrUserNotFound, err) - } - - // empty user - err = as.IsAdminPermitted(&AuthInfo{Username: "", Revision: 1}) - if err != ErrUserEmpty { - t.Errorf("expected %v, got %v", ErrUserEmpty, err) - } - - // non-admin user - err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1}) - if err != ErrPermissionDenied { - t.Errorf("expected %v, got %v", ErrPermissionDenied, err) - } - - // disabled auth should return nil - as.AuthDisable() - err = as.IsAdminPermitted(&AuthInfo{Username: "root", Revision: 1}) - if err != nil { - t.Errorf("expected nil, got %v", err) - } -} - -func TestRecoverFromSnapshot(t *testing.T) { - as, teardown := setupAuthStore(t) - defer teardown(t) - - ua := &pb.AuthUserAddRequest{Name: "foo", Options: &authpb.UserAddOptions{NoPassword: false}} - _, err := as.UserAdd(ua) // add an existing user - if err == nil { - t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err) - } - if err != ErrUserAlreadyExist { - t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err) - } - - ua = &pb.AuthUserAddRequest{Name: "", Options: &authpb.UserAddOptions{NoPassword: false}} - _, err = as.UserAdd(ua) // add a user with empty name - if err != ErrUserEmpty { - t.Fatal(err) - } - - as.Close() - - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - as2 := NewAuthStore(zaptest.NewLogger(t), as.be, tp, bcrypt.MinCost) - defer as2.Close() - - if !as2.IsAuthEnabled() { - t.Fatal("recovering authStore from existing backend failed") - } - - ul, err := as.UserList(&pb.AuthUserListRequest{}) - if err != nil { - t.Fatal(err) - } - if !contains(ul.Users, "root") { - t.Errorf("expected %v in %v", "root", ul.Users) - } -} - -func contains(array []string, str string) bool { - for _, s := range array { - if s == str { - return true - } - } - return false -} - -func TestHammerSimpleAuthenticate(t *testing.T) { - // set TTL values low to try to trigger races - oldTTL, oldTTLRes := simpleTokenTTLDefault, simpleTokenTTLResolution - defer func() { - simpleTokenTTLDefault = oldTTL - simpleTokenTTLResolution = oldTTLRes - }() - simpleTokenTTLDefault = 10 * time.Millisecond - simpleTokenTTLResolution = simpleTokenTTLDefault - users := make(map[string]struct{}) - - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - // create lots of users - for i := 0; i < 50; i++ { - u := fmt.Sprintf("user-%d", i) - ua := &pb.AuthUserAddRequest{Name: u, HashedPassword: encodePassword("123"), Options: &authpb.UserAddOptions{NoPassword: false}} - if _, err := as.UserAdd(ua); err != nil { - t.Fatal(err) - } - users[u] = struct{}{} - } - - // hammer on authenticate with lots of users - for i := 0; i < 10; i++ { - var wg sync.WaitGroup - wg.Add(len(users)) - for u := range users { - go func(user string) { - defer wg.Done() - token := fmt.Sprintf("%s(%d)", user, i) - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, token) - if _, err := as.Authenticate(ctx, user, "123"); err != nil { - t.Error(err) - } - if _, err := as.AuthInfoFromCtx(ctx); err != nil { - t.Error(err) - } - }(u) - } - time.Sleep(time.Millisecond) - wg.Wait() - } -} - -// TestRolesOrder tests authpb.User.Roles is sorted -func TestRolesOrder(t *testing.T) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault) - defer tp.disable() - if err != nil { - t.Fatal(err) - } - as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) - defer as.Close() - err = enableAuthAndCreateRoot(as) - if err != nil { - t.Fatal(err) - } - - username := "user" - _, err = as.UserAdd(&pb.AuthUserAddRequest{Name: username, HashedPassword: encodePassword("pass"), Options: &authpb.UserAddOptions{NoPassword: false}}) - if err != nil { - t.Fatal(err) - } - - roles := []string{"role1", "role2", "abc", "xyz", "role3"} - for _, role := range roles { - _, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: role}) - if err != nil { - t.Fatal(err) - } - - _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: username, Role: role}) - if err != nil { - t.Fatal(err) - } - } - - user, err := as.UserGet(&pb.AuthUserGetRequest{Name: username}) - if err != nil { - t.Fatal(err) - } - - for i := 1; i < len(user.Roles); i++ { - if strings.Compare(user.Roles[i-1], user.Roles[i]) != -1 { - t.Errorf("User.Roles isn't sorted (%s vs %s)", user.Roles[i-1], user.Roles[i]) - } - } -} - -func TestAuthInfoFromCtxWithRootSimple(t *testing.T) { - testAuthInfoFromCtxWithRoot(t, tokenTypeSimple) -} - -func TestAuthInfoFromCtxWithRootJWT(t *testing.T) { - opts := testJWTOpts() - testAuthInfoFromCtxWithRoot(t, opts) -} - -// testAuthInfoFromCtxWithRoot ensures "WithRoot" properly embeds token in the context. -func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) { - tp, err := NewTokenProvider(zaptest.NewLogger(t), opts, dummyIndexWaiter, simpleTokenTTLDefault) - if err != nil { - t.Fatal(err) - } - as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost) - defer as.Close() - - if err = enableAuthAndCreateRoot(as); err != nil { - t.Fatal(err) - } - - ctx := context.Background() - ctx = as.WithRoot(ctx) - - ai, aerr := as.AuthInfoFromCtx(ctx) - if aerr != nil { - t.Error(err) - } - if ai == nil { - t.Error("expected non-nil *AuthInfo") - } - if ai.Username != "root" { - t.Errorf("expected user name 'root', got %+v", ai) - } -} - -func TestUserNoPasswordAdd(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - username := "usernopass" - ua := &pb.AuthUserAddRequest{Name: username, Options: &authpb.UserAddOptions{NoPassword: true}} - _, err := as.UserAdd(ua) - if err != nil { - t.Fatal(err) - } - - ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err = as.Authenticate(ctx, username, "") - if err != ErrAuthFailed { - t.Fatalf("expected %v, got %v", ErrAuthFailed, err) - } -} - -func TestUserAddWithOldLog(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - ua := &pb.AuthUserAddRequest{Name: "bar", Password: "baz", Options: &authpb.UserAddOptions{NoPassword: false}} - _, err := as.UserAdd(ua) - if err != nil { - t.Fatal(err) - } -} - -func TestUserChangePasswordWithOldLog(t *testing.T) { - as, tearDown := setupAuthStore(t) - defer tearDown(t) - - ctx1 := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err := as.Authenticate(ctx1, "foo", "bar") - if err != nil { - t.Fatal(err) - } - - _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo", Password: "baz"}) - if err != nil { - t.Fatal(err) - } - - ctx2 := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(2)), AuthenticateParamSimpleTokenPrefix{}, "dummy") - _, err = as.Authenticate(ctx2, "foo", "baz") - if err != nil { - t.Fatal(err) - } - - // change a non-existing user - _, err = as.UserChangePassword(&pb.AuthUserChangePasswordRequest{Name: "foo-test", HashedPassword: encodePassword("bar")}) - if err == nil { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } - if err != ErrUserNotFound { - t.Fatalf("expected %v, got %v", ErrUserNotFound, err) - } -} diff --git a/server/config/config.go b/server/config/config.go deleted file mode 100644 index 48de650b8a1..00000000000 --- a/server/config/config.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "context" - "fmt" - "path/filepath" - "sort" - "strings" - "time" - - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" - "go.etcd.io/etcd/server/v3/storage/datadir" - - bolt "go.etcd.io/bbolt" -) - -// ServerConfig holds the configuration of etcd as taken from the command line or discovery. -type ServerConfig struct { - Name string - - DiscoveryURL string - DiscoveryProxy string - DiscoveryCfg v3discovery.DiscoveryConfig - - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string - // DedicatedWALDir config will make the etcd to write the WAL to the WALDir - // rather than the dataDir/member/wal. - DedicatedWALDir string - - SnapshotCount uint64 - - // SnapshotCatchUpEntries is the number of entries for a slow follower - // to catch-up after compacting the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - SnapshotCatchUpEntries uint64 - - MaxSnapFiles uint - MaxWALFiles uint - - // BackendBatchInterval is the maximum time before commit the backend transaction. - BackendBatchInterval time.Duration - // BackendBatchLimit is the maximum operations before commit the backend transaction. - BackendBatchLimit int - - // BackendFreelistType is the type of the backend boltdb freelist. - BackendFreelistType bolt.FreelistType - - InitialPeerURLsMap types.URLsMap - InitialClusterToken string - NewCluster bool - PeerTLSInfo transport.TLSInfo - - CORS map[string]struct{} - - // HostWhitelist lists acceptable hostnames from client requests. - // If server is insecure (no TLS), server only accepts requests - // whose Host header value exists in this white list. - HostWhitelist map[string]struct{} - - TickMs uint - ElectionTicks int - - // WaitClusterReadyTimeout is the maximum time to wait for the - // cluster to be ready on startup before serving client requests. - WaitClusterReadyTimeout time.Duration - - // InitialElectionTickAdvance is true, then local member fast-forwards - // election ticks to speed up "initial" leader election trigger. This - // benefits the case of larger election ticks. For instance, cross - // datacenter deployment may require longer election timeout of 10-second. - // If true, local node does not need wait up to 10-second. Instead, - // forwards its election ticks to 8-second, and have only 2-second left - // before leader election. - // - // Major assumptions are that: - // - cluster has no active leader thus advancing ticks enables faster - // leader election, or - // - cluster already has an established leader, and rejoining follower - // is likely to receive heartbeats from the leader after tick advance - // and before election timeout. - // - // However, when network from leader to rejoining follower is congested, - // and the follower does not receive leader heartbeat within left election - // ticks, disruptive election has to happen thus affecting cluster - // availabilities. - // - // Disabling this would slow down initial bootstrap process for cross - // datacenter deployments. Make your own tradeoffs by configuring - // --initial-election-tick-advance at the cost of slow initial bootstrap. - // - // If single-node, it advances ticks regardless. - // - // See https://github.com/etcd-io/etcd/issues/9333 for more detail. - InitialElectionTickAdvance bool - - BootstrapTimeout time.Duration - - AutoCompactionRetention time.Duration - AutoCompactionMode string - CompactionBatchLimit int - CompactionSleepInterval time.Duration - QuotaBackendBytes int64 - MaxTxnOps uint - - // MaxRequestBytes is the maximum request size to send over raft. - MaxRequestBytes uint - - // MaxConcurrentStreams specifies the maximum number of concurrent - // streams that each client can open at a time. - MaxConcurrentStreams uint32 - - WarningApplyDuration time.Duration - WarningUnaryRequestDuration time.Duration - - StrictReconfigCheck bool - - // ClientCertAuthEnabled is true when cert has been signed by the client CA. - ClientCertAuthEnabled bool - - AuthToken string - BcryptCost uint - TokenTTL uint - - // InitialCorruptCheck is true to check data corruption on boot - // before serving any peer/client traffic. - InitialCorruptCheck bool - CorruptCheckTime time.Duration - CompactHashCheckEnabled bool - CompactHashCheckTime time.Duration - - // PreVote is true to enable Raft Pre-Vote. - PreVote bool - - // SocketOpts are socket options passed to listener config. - SocketOpts transport.SocketOpts - - // Logger logs server-side operations. - Logger *zap.Logger - - ForceNewCluster bool - - // EnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. - EnableLeaseCheckpoint bool - // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints. - LeaseCheckpointInterval time.Duration - // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. - LeaseCheckpointPersist bool - - EnableGRPCGateway bool - - // ExperimentalEnableDistributedTracing enables distributed tracing using OpenTelemetry protocol. - ExperimentalEnableDistributedTracing bool - // ExperimentalTracerOptions are options for OpenTelemetry gRPC interceptor. - ExperimentalTracerOptions []otelgrpc.Option - - WatchProgressNotifyInterval time.Duration - - // UnsafeNoFsync disables all uses of fsync. - // Setting this is unsafe and will cause data loss. - UnsafeNoFsync bool `json:"unsafe-no-fsync"` - - DowngradeCheckTime time.Duration - - // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages. - // The setting improves etcd tail latency in environments were: - // - memory pressure might lead to swapping pages to disk - // - disk latency might be unstable - // Currently all etcd memory gets mlocked, but in future the flag can - // be refined to mlock in-use area of bbolt only. - ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"` - - // ExperimentalTxnModeWriteWithSharedBuffer enable write transaction to use - // a shared buffer in its readonly check operations. - ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"` - - // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to - // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. - ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` - - // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. - ExperimentalMaxLearners int `json:"experimental-max-learners"` - - // V2Deprecation defines a phase of v2store deprecation process. - V2Deprecation V2DeprecationEnum `json:"v2-deprecation"` -} - -// VerifyBootstrap sanity-checks the initial config for bootstrap case -// and returns an error for things that should never happen. -func (c *ServerConfig) VerifyBootstrap() error { - if err := c.hasLocalMember(); err != nil { - return err - } - if err := c.advertiseMatchesCluster(); err != nil { - return err - } - if CheckDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { - return fmt.Errorf("initial cluster unset and no discovery URL found") - } - return nil -} - -// VerifyJoinExisting sanity-checks the initial config for join existing cluster -// case and returns an error for things that should never happen. -func (c *ServerConfig) VerifyJoinExisting() error { - // The member has announced its peer urls to the cluster before starting; no need to - // set the configuration again. - if err := c.hasLocalMember(); err != nil { - return err - } - if CheckDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.DiscoveryURL != "" { - return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") - } - return nil -} - -// hasLocalMember checks that the cluster at least contains the local server. -func (c *ServerConfig) hasLocalMember() error { - if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { - return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) - } - return nil -} - -// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. -func (c *ServerConfig) advertiseMatchesCluster() error { - urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() - urls.Sort() - sort.Strings(apurls) - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice()) - if ok { - return nil - } - - initMap, apMap := make(map[string]struct{}), make(map[string]struct{}) - for _, url := range c.PeerURLs { - apMap[url.String()] = struct{}{} - } - for _, url := range c.InitialPeerURLsMap[c.Name] { - initMap[url.String()] = struct{}{} - } - - var missing []string - for url := range initMap { - if _, ok := apMap[url]; !ok { - missing = append(missing, url) - } - } - if len(missing) > 0 { - for i := range missing { - missing[i] = c.Name + "=" + missing[i] - } - mstr := strings.Join(missing, ",") - apStr := strings.Join(apurls, ",") - return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err) - } - - for url := range apMap { - if _, ok := initMap[url]; !ok { - missing = append(missing, url) - } - } - if len(missing) > 0 { - mstr := strings.Join(missing, ",") - umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) - return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String()) - } - - // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed - apStr := strings.Join(apurls, ",") - umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) - return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err) -} - -func (c *ServerConfig) MemberDir() string { return datadir.ToMemberDir(c.DataDir) } - -func (c *ServerConfig) WALDir() string { - if c.DedicatedWALDir != "" { - return c.DedicatedWALDir - } - return datadir.ToWalDir(c.DataDir) -} - -func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } - -func (c *ServerConfig) ShouldDiscover() bool { - return c.DiscoveryURL != "" || len(c.DiscoveryCfg.Endpoints) > 0 -} - -// ReqTimeout returns timeout for request to finish. -func (c *ServerConfig) ReqTimeout() time.Duration { - // 5s for queue waiting, computation and disk IO delay - // + 2 * election timeout for possible leader election - return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond -} - -func (c *ServerConfig) ElectionTimeout() time.Duration { - return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond -} - -func (c *ServerConfig) PeerDialTimeout() time.Duration { - // 1s for queue wait and election timeout - return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond -} - -func CheckDuplicateURL(urlsmap types.URLsMap) bool { - um := make(map[string]bool) - for _, urls := range urlsmap { - for _, url := range urls { - u := url.String() - if um[u] { - return true - } - um[u] = true - } - } - return false -} - -func (c *ServerConfig) BootstrapTimeoutEffective() time.Duration { - if c.BootstrapTimeout != 0 { - return c.BootstrapTimeout - } - return time.Second -} - -func (c *ServerConfig) BackendPath() string { return datadir.ToBackendFileName(c.DataDir) } diff --git a/server/config/config_test.go b/server/config/config_test.go deleted file mode 100644 index 069dc9e1315..00000000000 --- a/server/config/config_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "net/url" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -func mustNewURLs(t *testing.T, urls []string) []url.URL { - if len(urls) == 0 { - return nil - } - u, err := types.NewURLs(urls) - if err != nil { - t.Fatalf("error creating new URLs from %q: %v", urls, err) - } - return u -} - -func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) { - c := &ServerConfig{ - Name: "node1", - DiscoveryURL: "", - InitialPeerURLsMap: types.URLsMap{}, - Logger: zaptest.NewLogger(t), - } - if err := c.VerifyBootstrap(); err == nil { - t.Errorf("err = nil, want not nil") - } -} - -func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) { - cluster, err := types.NewURLsMap("node1=http://127.0.0.1:2380") - if err != nil { - t.Fatalf("NewCluster error: %v", err) - } - c := &ServerConfig{ - Name: "node1", - DiscoveryURL: "http://127.0.0.1:2379/abcdefg", - PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}), - InitialPeerURLsMap: cluster, - NewCluster: false, - Logger: zaptest.NewLogger(t), - } - if err := c.VerifyJoinExisting(); err == nil { - t.Errorf("err = nil, want not nil") - } -} - -func TestConfigVerifyLocalMember(t *testing.T) { - tests := []struct { - clusterSetting string - apurls []string - strict bool - shouldError bool - }{ - { - // Node must exist in cluster - "", - nil, - true, - - true, - }, - { - // Initial cluster set - "node1=http://localhost:7001,node2=http://localhost:7002", - []string{"http://localhost:7001"}, - true, - - false, - }, - { - // Default initial cluster - "node1=http://localhost:2380,node1=http://localhost:7001", - []string{"http://localhost:2380", "http://localhost:7001"}, - true, - - false, - }, - { - // Advertised peer URLs must match those in cluster-state - "node1=http://localhost:7001", - []string{"http://localhost:12345"}, - true, - - true, - }, - { - // Advertised peer URLs must match those in cluster-state - "node1=http://localhost:2380,node1=http://localhost:12345", - []string{"http://localhost:12345"}, - true, - - true, - }, - { - // Advertised peer URLs must match those in cluster-state - "node1=http://localhost:12345", - []string{"http://localhost:2380", "http://localhost:12345"}, - true, - - true, - }, - { - // Advertised peer URLs must match those in cluster-state - "node1=http://localhost:2380", - []string{}, - true, - - true, - }, - { - // do not care about the urls if strict is not set - "node1=http://localhost:2380", - []string{}, - false, - - false, - }, - } - - for i, tt := range tests { - cluster, err := types.NewURLsMap(tt.clusterSetting) - if err != nil { - t.Fatalf("#%d: Got unexpected error: %v", i, err) - } - cfg := ServerConfig{ - Name: "node1", - InitialPeerURLsMap: cluster, - Logger: zaptest.NewLogger(t), - } - if tt.apurls != nil { - cfg.PeerURLs = mustNewURLs(t, tt.apurls) - } - if err = cfg.hasLocalMember(); err == nil && tt.strict { - err = cfg.advertiseMatchesCluster() - } - if (err == nil) && tt.shouldError { - t.Errorf("#%d: Got no error where one was expected", i) - } - if (err != nil) && !tt.shouldError { - t.Errorf("#%d: Got unexpected error: %v", i, err) - } - } -} - -func TestSnapDir(t *testing.T) { - tests := map[string]string{ - "/": "/member/snap", - "/var/lib/etc": "/var/lib/etc/member/snap", - } - for dd, w := range tests { - cfg := ServerConfig{ - DataDir: dd, - Logger: zaptest.NewLogger(t), - } - if g := cfg.SnapDir(); g != w { - t.Errorf("DataDir=%q: SnapDir()=%q, want=%q", dd, g, w) - } - } -} - -func TestWALDir(t *testing.T) { - tests := map[string]string{ - "/": "/member/wal", - "/var/lib/etc": "/var/lib/etc/member/wal", - } - for dd, w := range tests { - cfg := ServerConfig{ - DataDir: dd, - Logger: zaptest.NewLogger(t), - } - if g := cfg.WALDir(); g != w { - t.Errorf("DataDir=%q: WALDir()=%q, want=%q", dd, g, w) - } - } -} - -func TestShouldDiscover(t *testing.T) { - tests := map[string]bool{ - "": false, - "foo": true, - "http://discovery.etcd.io/asdf": true, - } - for durl, w := range tests { - cfg := ServerConfig{ - DiscoveryURL: durl, - Logger: zaptest.NewLogger(t), - } - if g := cfg.ShouldDiscover(); g != w { - t.Errorf("durl=%q: ShouldDiscover()=%t, want=%t", durl, g, w) - } - } -} diff --git a/server/config/v2_deprecation_test.go b/server/config/v2_deprecation_test.go deleted file mode 100644 index c8d911d6076..00000000000 --- a/server/config/v2_deprecation_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import "testing" - -func TestV2DeprecationEnum_IsAtLeast(t *testing.T) { - tests := []struct { - e V2DeprecationEnum - v2d V2DeprecationEnum - want bool - }{ - {V2_DEPR_0_NOT_YET, V2_DEPR_0_NOT_YET, true}, - {V2_DEPR_0_NOT_YET, V2_DEPR_1_WRITE_ONLY_DROP, false}, - {V2_DEPR_0_NOT_YET, V2_DEPR_2_GONE, false}, - {V2_DEPR_2_GONE, V2_DEPR_1_WRITE_ONLY_DROP, true}, - {V2_DEPR_2_GONE, V2_DEPR_0_NOT_YET, true}, - {V2_DEPR_2_GONE, V2_DEPR_2_GONE, true}, - {V2_DEPR_1_WRITE_ONLY, V2_DEPR_1_WRITE_ONLY_DROP, false}, - {V2_DEPR_1_WRITE_ONLY_DROP, V2_DEPR_1_WRITE_ONLY, true}, - } - for _, tt := range tests { - t.Run(string(tt.e)+" >= "+string(tt.v2d), func(t *testing.T) { - if got := tt.e.IsAtLeast(tt.v2d); got != tt.want { - t.Errorf("IsAtLeast() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/server/embed/auth_test.go b/server/embed/auth_test.go deleted file mode 100644 index a09e618f66c..00000000000 --- a/server/embed/auth_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "testing" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" -) - -func TestEnableAuth(t *testing.T) { - tdir := t.TempDir() - cfg := NewConfig() - cfg.Dir = tdir - e, err := StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer e.Close() - client := v3client.New(e.Server) - defer client.Close() - - _, err = client.RoleAdd(context.TODO(), "root") - if err != nil { - t.Fatal(err) - } - _, err = client.UserAdd(context.TODO(), "root", "root") - if err != nil { - t.Fatal(err) - } - _, err = client.UserGrantRole(context.TODO(), "root", "root") - if err != nil { - t.Fatal(err) - } - _, err = client.AuthEnable(context.TODO()) - if err != nil { - t.Fatal(err) - } -} diff --git a/server/embed/config.go b/server/embed/config.go deleted file mode 100644 index 75bcbc34152..00000000000 --- a/server/embed/config.go +++ /dev/null @@ -1,1111 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "crypto/tls" - "errors" - "fmt" - "math" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/srv" - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/flags" - "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" - - "go.uber.org/multierr" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" - "google.golang.org/grpc" - "sigs.k8s.io/yaml" - - bolt "go.etcd.io/bbolt" -) - -const ( - ClusterStateFlagNew = "new" - ClusterStateFlagExisting = "existing" - - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 - DefaultMaxTxnOps = uint(128) - DefaultWarningApplyDuration = 100 * time.Millisecond - DefaultWarningUnaryRequestDuration = 300 * time.Millisecond - DefaultMaxRequestBytes = 1.5 * 1024 * 1024 - DefaultMaxConcurrentStreams = math.MaxUint32 - DefaultGRPCKeepAliveMinTime = 5 * time.Second - DefaultGRPCKeepAliveInterval = 2 * time.Hour - DefaultGRPCKeepAliveTimeout = 20 * time.Second - DefaultDowngradeCheckTime = 5 * time.Second - DefaultWaitClusterReadyTimeout = 5 * time.Second - - DefaultDiscoveryDialTimeout = 2 * time.Second - DefaultDiscoveryRequestTimeOut = 5 * time.Second - DefaultDiscoveryKeepAliveTime = 2 * time.Second - DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second - - DefaultListenPeerURLs = "http://localhost:2380" - DefaultListenClientURLs = "http://localhost:2379" - - DefaultLogOutput = "default" - JournalLogOutput = "systemd/journal" - StdErrLogOutput = "stderr" - StdOutLogOutput = "stdout" - - // DefaultLogRotationConfig is the default configuration used for log rotation. - // Log rotation is disabled by default. - // MaxSize = 100 // MB - // MaxAge = 0 // days (no limit) - // MaxBackups = 0 // no limit - // LocalTime = false // use computers local time, UTC by default - // Compress = false // compress the rotated log in gzip format - DefaultLogRotationConfig = `{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}` - - // ExperimentalDistributedTracingAddress is the default collector address. - ExperimentalDistributedTracingAddress = "localhost:4317" - // ExperimentalDistributedTracingServiceName is the default etcd service name. - ExperimentalDistributedTracingServiceName = "etcd" - - // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag. - // It's enabled by default. - DefaultStrictReconfigCheck = true - - // maxElectionMs specifies the maximum value of election timeout. - // More details are listed on etcd.io/docs > version > tuning/#time-parameters - maxElectionMs = 50000 - // backend freelist map type - freelistArrayType = "array" -) - -var ( - ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + - "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"") - ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") - ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined") - - DefaultInitialAdvertisePeerURLs = "http://localhost:2380" - DefaultAdvertiseClientURLs = "http://localhost:2379" - - defaultHostname string - defaultHostStatus error - - // indirection for testing - getCluster = srv.GetCluster -) - -var ( - // CompactorModePeriodic is periodic compaction mode - // for "Config.AutoCompactionMode" field. - // If "AutoCompactionMode" is CompactorModePeriodic and - // "AutoCompactionRetention" is "1h", it automatically compacts - // compacts storage every hour. - CompactorModePeriodic = v3compactor.ModePeriodic - - // CompactorModeRevision is revision-based compaction mode - // for "Config.AutoCompactionMode" field. - // If "AutoCompactionMode" is CompactorModeRevision and - // "AutoCompactionRetention" is "1000", it compacts log on - // revision 5000 when the current revision is 6000. - // This runs every 5-minute if enough of logs have proceeded. - CompactorModeRevision = v3compactor.ModeRevision -) - -func init() { - defaultHostname, defaultHostStatus = netutil.GetDefaultHost() -} - -// Config holds the arguments for configuring an etcd server. -type Config struct { - Name string `json:"name"` - Dir string `json:"data-dir"` - WalDir string `json:"wal-dir"` - - SnapshotCount uint64 `json:"snapshot-count"` - - // SnapshotCatchUpEntries is the number of entries for a slow follower - // to catch-up after compacting the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - SnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"` - - MaxSnapFiles uint `json:"max-snapshots"` - MaxWalFiles uint `json:"max-wals"` - - // TickMs is the number of milliseconds between heartbeat ticks. - // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). - // make ticks a cluster wide configuration. - TickMs uint `json:"heartbeat-interval"` - ElectionMs uint `json:"election-timeout"` - - // InitialElectionTickAdvance is true, then local member fast-forwards - // election ticks to speed up "initial" leader election trigger. This - // benefits the case of larger election ticks. For instance, cross - // datacenter deployment may require longer election timeout of 10-second. - // If true, local node does not need wait up to 10-second. Instead, - // forwards its election ticks to 8-second, and have only 2-second left - // before leader election. - // - // Major assumptions are that: - // - cluster has no active leader thus advancing ticks enables faster - // leader election, or - // - cluster already has an established leader, and rejoining follower - // is likely to receive heartbeats from the leader after tick advance - // and before election timeout. - // - // However, when network from leader to rejoining follower is congested, - // and the follower does not receive leader heartbeat within left election - // ticks, disruptive election has to happen thus affecting cluster - // availabilities. - // - // Disabling this would slow down initial bootstrap process for cross - // datacenter deployments. Make your own tradeoffs by configuring - // --initial-election-tick-advance at the cost of slow initial bootstrap. - // - // If single-node, it advances ticks regardless. - // - // See https://github.com/etcd-io/etcd/issues/9333 for more detail. - InitialElectionTickAdvance bool `json:"initial-election-tick-advance"` - - // BackendBatchInterval is the maximum time before commit the backend transaction. - BackendBatchInterval time.Duration `json:"backend-batch-interval"` - // BackendBatchLimit is the maximum operations before commit the backend transaction. - BackendBatchLimit int `json:"backend-batch-limit"` - // BackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types). - BackendFreelistType string `json:"backend-bbolt-freelist-type"` - QuotaBackendBytes int64 `json:"quota-backend-bytes"` - MaxTxnOps uint `json:"max-txn-ops"` - MaxRequestBytes uint `json:"max-request-bytes"` - - // MaxConcurrentStreams specifies the maximum number of concurrent - // streams that each client can open at a time. - MaxConcurrentStreams uint32 `json:"max-concurrent-streams"` - - LPUrls, LCUrls []url.URL - APUrls, ACUrls []url.URL - ClientTLSInfo transport.TLSInfo - ClientAutoTLS bool - PeerTLSInfo transport.TLSInfo - PeerAutoTLS bool - // SelfSignedCertValidity specifies the validity period of the client and peer certificates - // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS, - // the unit is year, and the default is 1 - SelfSignedCertValidity uint `json:"self-signed-cert-validity"` - - // CipherSuites is a list of supported TLS cipher suites between - // client/server and peers. If empty, Go auto-populates the list. - // Note that cipher suites are prioritized in the given order. - CipherSuites []string `json:"cipher-suites"` - - // TlsMinVersion is the minimum accepted TLS version between client/server and peers. - TlsMinVersion string `json:"tls-min-version"` - // TlsMaxVersion is the maximum accepted TLS version between client/server and peers. - TlsMaxVersion string `json:"tls-max-version"` - - ClusterState string `json:"initial-cluster-state"` - DNSCluster string `json:"discovery-srv"` - DNSClusterServiceName string `json:"discovery-srv-name"` - Dproxy string `json:"discovery-proxy"` - - Durl string `json:"discovery"` - DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"` - - InitialCluster string `json:"initial-cluster"` - InitialClusterToken string `json:"initial-cluster-token"` - StrictReconfigCheck bool `json:"strict-reconfig-check"` - ExperimentalWaitClusterReadyTimeout time.Duration `json:"wait-cluster-ready-timeout"` - - // AutoCompactionMode is either 'periodic' or 'revision'. - AutoCompactionMode string `json:"auto-compaction-mode"` - // AutoCompactionRetention is either duration string with time unit - // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000'). - // If no time unit is provided and compaction mode is 'periodic', - // the unit defaults to hour. For example, '5' translates into 5-hour. - AutoCompactionRetention string `json:"auto-compaction-retention"` - - // GRPCKeepAliveMinTime is the minimum interval that a client should - // wait before pinging server. When client pings "too fast", server - // sends goaway and closes the connection (errors: too_many_pings, - // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. - // Server expects client pings only when there is any active streams - // (PermitWithoutStream is set false). - GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` - // GRPCKeepAliveInterval is the frequency of server-to-client ping - // to check if a connection is alive. Close a non-responsive connection - // after an additional duration of Timeout. 0 to disable. - GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` - // GRPCKeepAliveTimeout is the additional duration of wait - // before closing a non-responsive connection. 0 to disable. - GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` - - // SocketOpts are socket options passed to listener config. - SocketOpts transport.SocketOpts `json:"socket-options"` - - // PreVote is true to enable Raft Pre-Vote. - // If enabled, Raft runs an additional election phase - // to check whether it would get enough votes to win - // an election, thus minimizing disruptions. - PreVote bool `json:"pre-vote"` - - CORS map[string]struct{} - - // HostWhitelist lists acceptable hostnames from HTTP client requests. - // Client origin policy protects against "DNS Rebinding" attacks - // to insecure etcd servers. That is, any website can simply create - // an authorized DNS name, and direct DNS to "localhost" (or any - // other address). Then, all HTTP endpoints of etcd server listening - // on "localhost" becomes accessible, thus vulnerable to DNS rebinding - // attacks. See "CVE-2018-5702" for more detail. - // - // 1. If client connection is secure via HTTPS, allow any hostnames. - // 2. If client connection is not secure and "HostWhitelist" is not empty, - // only allow HTTP requests whose Host field is listed in whitelist. - // - // Note that the client origin policy is enforced whether authentication - // is enabled or not, for tighter controls. - // - // By default, "HostWhitelist" is "*", which allows any hostnames. - // Note that when specifying hostnames, loopback addresses are not added - // automatically. To allow loopback interfaces, leave it empty or set it "*", - // or add them to whitelist manually (e.g. "localhost", "127.0.0.1", etc.). - // - // CVE-2018-5702 reference: - // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2 - // - https://github.com/transmission/transmission/pull/468 - // - https://github.com/etcd-io/etcd/issues/9353 - HostWhitelist map[string]struct{} - - // UserHandlers is for registering users handlers and only used for - // embedding etcd into other applications. - // The map key is the route path for the handler, and - // you must ensure it can't be conflicted with etcd's. - UserHandlers map[string]http.Handler `json:"-"` - // ServiceRegister is for registering users' gRPC services. A simple usage example: - // cfg := embed.NewConfig() - // cfg.ServerRegister = func(s *grpc.Server) { - // pb.RegisterFooServer(s, &fooServer{}) - // pb.RegisterBarServer(s, &barServer{}) - // } - // embed.StartEtcd(cfg) - ServiceRegister func(*grpc.Server) `json:"-"` - - AuthToken string `json:"auth-token"` - BcryptCost uint `json:"bcrypt-cost"` - - // AuthTokenTTL in seconds of the simple token - AuthTokenTTL uint `json:"auth-token-ttl"` - - ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` - ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` - ExperimentalCompactHashCheckEnabled bool `json:"experimental-compact-hash-check-enabled"` - ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time"` - - // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. - ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` - // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. - // Requires experimental-enable-lease-checkpoint to be enabled. - // Deprecated in v3.6. - // TODO: Delete in v3.7 - ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"` - ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` - // ExperimentalCompactionSleepInterval is the sleep interval between every etcd compaction loop. - ExperimentalCompactionSleepInterval time.Duration `json:"experimental-compaction-sleep-interval"` - ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"` - // ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request - // takes more time than this value. - ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"` - // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to - // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. - ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` - // WarningUnaryRequestDuration is the time duration after which a warning is generated if applying - // unary request takes more time than this value. - WarningUnaryRequestDuration time.Duration `json:"warning-unary-request-duration"` - // ExperimentalWarningUnaryRequestDuration is deprecated, please use WarningUnaryRequestDuration instead. - ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"` - // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. - ExperimentalMaxLearners int `json:"experimental-max-learners"` - - // ForceNewCluster starts a new cluster even if previously started; unsafe. - ForceNewCluster bool `json:"force-new-cluster"` - - EnablePprof bool `json:"enable-pprof"` - Metrics string `json:"metrics"` - ListenMetricsUrls []url.URL - ListenMetricsUrlsJSON string `json:"listen-metrics-urls"` - - // ExperimentalEnableDistributedTracing indicates if experimental tracing using OpenTelemetry is enabled. - ExperimentalEnableDistributedTracing bool `json:"experimental-enable-distributed-tracing"` - // ExperimentalDistributedTracingAddress is the address of the OpenTelemetry Collector. - // Can only be set if ExperimentalEnableDistributedTracing is true. - ExperimentalDistributedTracingAddress string `json:"experimental-distributed-tracing-address"` - // ExperimentalDistributedTracingServiceName is the name of the service. - // Can only be used if ExperimentalEnableDistributedTracing is true. - ExperimentalDistributedTracingServiceName string `json:"experimental-distributed-tracing-service-name"` - // ExperimentalDistributedTracingServiceInstanceID is the ID key of the service. - // This ID must be unique, as helps to distinguish instances of the same service - // that exist at the same time. - // Can only be used if ExperimentalEnableDistributedTracing is true. - ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"` - // ExperimentalDistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans. - // Defaults to 0. - ExperimentalDistributedTracingSamplingRatePerMillion int `json:"experimental-distributed-tracing-sampling-rate"` - - // Logger is logger options: currently only supports "zap". - // "capnslog" is removed in v3.5. - Logger string `json:"logger"` - // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'. - LogLevel string `json:"log-level"` - // LogFormat set log encoding. Only supports json, console. Default is 'json'. - LogFormat string `json:"log-format"` - // LogOutputs is either: - // - "default" as os.Stderr, - // - "stderr" as os.Stderr, - // - "stdout" as os.Stdout, - // - file path to append server logs to. - // It can be multiple when "Logger" is zap. - LogOutputs []string `json:"log-outputs"` - // EnableLogRotation enables log rotation of a single LogOutputs file target. - EnableLogRotation bool `json:"enable-log-rotation"` - // LogRotationConfigJSON is a passthrough allowing a log rotation JSON config to be passed directly. - LogRotationConfigJSON string `json:"log-rotation-config-json"` - // ZapLoggerBuilder is used to build the zap logger. - ZapLoggerBuilder func(*Config) error - - // logger logs server-side operations. The default is nil, - // and "setupLogging" must be called before starting server. - // Do not set logger directly. - loggerMu *sync.RWMutex - logger *zap.Logger - // EnableGRPCGateway enables grpc gateway. - // The gateway translates a RESTful HTTP API into gRPC. - EnableGRPCGateway bool `json:"enable-grpc-gateway"` - - // UnsafeNoFsync disables all uses of fsync. - // Setting this is unsafe and will cause data loss. - UnsafeNoFsync bool `json:"unsafe-no-fsync"` - - ExperimentalDowngradeCheckTime time.Duration `json:"experimental-downgrade-check-time"` - - // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages. - // The setting improves etcd tail latency in environments were: - // - memory pressure might lead to swapping pages to disk - // - disk latency might be unstable - // Currently all etcd memory gets mlocked, but in future the flag can - // be refined to mlock in-use area of bbolt only. - ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"` - - // ExperimentalTxnModeWriteWithSharedBuffer enables write transaction to use a shared buffer in its readonly check operations. - ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"` - - // V2Deprecation describes phase of API & Storage V2 support - V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"` -} - -// configYAML holds the config suitable for yaml parsing -type configYAML struct { - Config - configJSON -} - -// configJSON has file options that are translated into Config options -type configJSON struct { - LPUrlsJSON string `json:"listen-peer-urls"` - LCUrlsJSON string `json:"listen-client-urls"` - APUrlsJSON string `json:"initial-advertise-peer-urls"` - ACUrlsJSON string `json:"advertise-client-urls"` - - CORSJSON string `json:"cors"` - HostWhitelistJSON string `json:"host-whitelist"` - - ClientSecurityJSON securityConfig `json:"client-transport-security"` - PeerSecurityJSON securityConfig `json:"peer-transport-security"` -} - -type securityConfig struct { - CertFile string `json:"cert-file"` - KeyFile string `json:"key-file"` - ClientCertFile string `json:"client-cert-file"` - ClientKeyFile string `json:"client-key-file"` - CertAuth bool `json:"client-cert-auth"` - TrustedCAFile string `json:"trusted-ca-file"` - AutoTLS bool `json:"auto-tls"` -} - -// NewConfig creates a new Config populated with default values. -func NewConfig() *Config { - lpurl, _ := url.Parse(DefaultListenPeerURLs) - apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) - lcurl, _ := url.Parse(DefaultListenClientURLs) - acurl, _ := url.Parse(DefaultAdvertiseClientURLs) - cfg := &Config{ - MaxSnapFiles: DefaultMaxSnapshots, - MaxWalFiles: DefaultMaxWALs, - - Name: DefaultName, - - SnapshotCount: etcdserver.DefaultSnapshotCount, - SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries, - - MaxTxnOps: DefaultMaxTxnOps, - MaxRequestBytes: DefaultMaxRequestBytes, - MaxConcurrentStreams: DefaultMaxConcurrentStreams, - ExperimentalWarningApplyDuration: DefaultWarningApplyDuration, - - GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, - GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, - GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, - - SocketOpts: transport.SocketOpts{ - ReusePort: false, - ReuseAddress: false, - }, - - TickMs: 100, - ElectionMs: 1000, - InitialElectionTickAdvance: true, - - LPUrls: []url.URL{*lpurl}, - LCUrls: []url.URL{*lcurl}, - APUrls: []url.URL{*apurl}, - ACUrls: []url.URL{*acurl}, - - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", - ExperimentalWaitClusterReadyTimeout: DefaultWaitClusterReadyTimeout, - - StrictReconfigCheck: DefaultStrictReconfigCheck, - Metrics: "basic", - - CORS: map[string]struct{}{"*": {}}, - HostWhitelist: map[string]struct{}{"*": {}}, - - AuthToken: "simple", - BcryptCost: uint(bcrypt.DefaultCost), - AuthTokenTTL: 300, - - PreVote: true, - - loggerMu: new(sync.RWMutex), - logger: nil, - Logger: "zap", - LogOutputs: []string{DefaultLogOutput}, - LogLevel: logutil.DefaultLogLevel, - EnableLogRotation: false, - LogRotationConfigJSON: DefaultLogRotationConfig, - EnableGRPCGateway: true, - - ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime, - ExperimentalMemoryMlock: false, - ExperimentalTxnModeWriteWithSharedBuffer: true, - ExperimentalMaxLearners: membership.DefaultMaxLearners, - - ExperimentalCompactHashCheckEnabled: false, - ExperimentalCompactHashCheckTime: time.Minute, - - V2Deprecation: config.V2_DEPR_DEFAULT, - - DiscoveryCfg: v3discovery.DiscoveryConfig{ - ConfigSpec: clientv3.ConfigSpec{ - DialTimeout: DefaultDiscoveryDialTimeout, - RequestTimeout: DefaultDiscoveryRequestTimeOut, - KeepAliveTime: DefaultDiscoveryKeepAliveTime, - KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut, - - Secure: &clientv3.SecureConfig{}, - Auth: &clientv3.AuthConfig{}, - }, - }, - } - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - return cfg -} - -func ConfigFromFile(path string) (*Config, error) { - cfg := &configYAML{Config: *NewConfig()} - if err := cfg.configFromFile(path); err != nil { - return nil, err - } - return &cfg.Config, nil -} - -func (cfg *configYAML) configFromFile(path string) error { - b, err := os.ReadFile(path) - if err != nil { - return err - } - - defaultInitialCluster := cfg.InitialCluster - - err = yaml.Unmarshal(b, cfg) - if err != nil { - return err - } - - if cfg.LPUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) - if err != nil { - fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err) - os.Exit(1) - } - cfg.LPUrls = u - } - - if cfg.LCUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) - if err != nil { - fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err) - os.Exit(1) - } - cfg.LCUrls = u - } - - if cfg.APUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) - if err != nil { - fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err) - os.Exit(1) - } - cfg.APUrls = u - } - - if cfg.ACUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) - if err != nil { - fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err) - os.Exit(1) - } - cfg.ACUrls = u - } - - if cfg.ListenMetricsUrlsJSON != "" { - u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ",")) - if err != nil { - fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err) - os.Exit(1) - } - cfg.ListenMetricsUrls = u - } - - if cfg.CORSJSON != "" { - uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*") - cfg.CORS = uv.Values - } - - if cfg.HostWhitelistJSON != "" { - uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON) - cfg.HostWhitelist = uv.Values - } - - // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName - if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = "" - } - if cfg.ClusterState == "" { - cfg.ClusterState = ClusterStateFlagNew - } - - copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { - tls.CertFile = ysc.CertFile - tls.KeyFile = ysc.KeyFile - tls.ClientCertFile = ysc.ClientCertFile - tls.ClientKeyFile = ysc.ClientKeyFile - tls.ClientCertAuth = ysc.CertAuth - tls.TrustedCAFile = ysc.TrustedCAFile - } - copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON) - copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) - cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS - cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS - if cfg.SelfSignedCertValidity == 0 { - cfg.SelfSignedCertValidity = 1 - } - return cfg.Validate() -} - -func updateCipherSuites(tls *transport.TLSInfo, ss []string) error { - if len(tls.CipherSuites) > 0 && len(ss) > 0 { - return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss) - } - if len(ss) > 0 { - cs, err := tlsutil.GetCipherSuites(ss) - if err != nil { - return err - } - tls.CipherSuites = cs - } - return nil -} - -func updateMinMaxVersions(info *transport.TLSInfo, min, max string) { - // Validate() has been called to check the user input, so it should never fail. - var err error - if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil { - panic(err) - } - if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil { - panic(err) - } -} - -// Validate ensures that '*embed.Config' fields are properly configured. -func (cfg *Config) Validate() error { - if err := cfg.setupLogging(); err != nil { - return err - } - if err := checkBindURLs(cfg.LPUrls); err != nil { - return err - } - if err := checkBindURLs(cfg.LCUrls); err != nil { - return err - } - if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil { - return err - } - if err := checkHostURLs(cfg.APUrls); err != nil { - addrs := cfg.getAPURLs() - return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) - } - if err := checkHostURLs(cfg.ACUrls); err != nil { - addrs := cfg.getACURLs() - return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) - } - // Check if conflicting flags are passed. - nSet := 0 - for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} { - if v { - nSet++ - } - } - - if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting { - return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState) - } - - if nSet > 1 { - return ErrConflictBootstrapFlags - } - - // Check if both v2 discovery and v3 discovery flags are passed. - v2discoveryFlagsExist := cfg.Dproxy != "" - v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 || - cfg.DiscoveryCfg.Token != "" || - cfg.DiscoveryCfg.Secure.Cert != "" || - cfg.DiscoveryCfg.Secure.Key != "" || - cfg.DiscoveryCfg.Secure.Cacert != "" || - cfg.DiscoveryCfg.Auth.Username != "" || - cfg.DiscoveryCfg.Auth.Password != "" - - if v2discoveryFlagsExist && v3discoveryFlagsExist { - return errors.New("both v2 discovery settings (discovery, discovery-proxy) " + - "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " + - "discovery-key, discovery-cacert, discovery-user, discovery-password) are set") - } - - // If one of `discovery-token` and `discovery-endpoints` is provided, - // then the other one must be provided as well. - if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) { - return errors.New("both --discovery-token and --discovery-endpoints must be set") - } - - if cfg.TickMs == 0 { - return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs) - } - if cfg.ElectionMs == 0 { - return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs) - } - if 5*cfg.TickMs > cfg.ElectionMs { - return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs) - } - if cfg.ElectionMs > maxElectionMs { - return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs) - } - - // check this last since proxying in etcdmain may make this OK - if cfg.LCUrls != nil && cfg.ACUrls == nil { - return ErrUnsetAdvertiseClientURLsFlag - } - - switch cfg.AutoCompactionMode { - case "": - case CompactorModeRevision, CompactorModePeriodic: - default: - return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode) - } - - // Validate distributed tracing configuration but only if enabled. - if cfg.ExperimentalEnableDistributedTracing { - if err := validateTracingConfig(cfg.ExperimentalDistributedTracingSamplingRatePerMillion); err != nil { - return fmt.Errorf("distributed tracing configurition is not valid: (%v)", err) - } - } - - if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint { - cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist") - } - - if cfg.ExperimentalEnableLeaseCheckpointPersist && !cfg.ExperimentalEnableLeaseCheckpoint { - return fmt.Errorf("setting experimental-enable-lease-checkpoint-persist requires experimental-enable-lease-checkpoint") - } - - if cfg.ExperimentalCompactHashCheckTime <= 0 { - return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime) - } - - // If `--name` isn't configured, then multiple members may have the same "default" name. - // When adding a new member with the "default" name as well, etcd may regards its peerURL - // as one additional peerURL of the existing member which has the same "default" name, - // because each member can have multiple client or peer URLs. - // Please refer to https://github.com/etcd-io/etcd/issues/13757 - if cfg.Name == DefaultName { - cfg.logger.Warn( - "it isn't recommended to use default name, please set a value for --name. "+ - "Note that etcd might run into issue when multiple members have the same default name", - zap.String("name", cfg.Name)) - } - - minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion) - if err != nil { - return err - } - maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion) - if err != nil { - return err - } - - // maxVersion == 0 means that Go selects the highest available version. - if maxVersion != 0 && minVersion > maxVersion { - return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion) - } - - // Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently. - if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 { - return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled") - } - - return nil -} - -// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. -func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { - token = cfg.InitialClusterToken - switch { - case cfg.Durl != "": - urlsmap = types.URLsMap{} - // If using v2 discovery, generate a temporary cluster based on - // self's advertised peer URLs - urlsmap[cfg.Name] = cfg.APUrls - token = cfg.Durl - - case len(cfg.DiscoveryCfg.Endpoints) > 0: - urlsmap = types.URLsMap{} - // If using v3 discovery, generate a temporary cluster based on - // self's advertised peer URLs - urlsmap[cfg.Name] = cfg.APUrls - token = cfg.DiscoveryCfg.Token - - case cfg.DNSCluster != "": - clusterStrs, cerr := cfg.GetDNSClusterNames() - lg := cfg.logger - if cerr != nil { - lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr)) - } - if len(clusterStrs) == 0 { - return nil, "", cerr - } - for _, s := range clusterStrs { - lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s)) - } - clusterStr := strings.Join(clusterStrs, ",") - if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" { - cfg.PeerTLSInfo.ServerName = cfg.DNSCluster - } - urlsmap, err = types.NewURLsMap(clusterStr) - // only etcd member must belong to the discovered cluster. - // proxy does not need to belong to the discovered cluster. - if which == "etcd" { - if _, ok := urlsmap[cfg.Name]; !ok { - return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) - } - } - - default: - // We're statically configured, and cluster has appropriately been set. - urlsmap, err = types.NewURLsMap(cfg.InitialCluster) - } - return urlsmap, token, err -} - -// GetDNSClusterNames uses DNS SRV records to get a list of initial nodes for cluster bootstrapping. -// This function will return a list of one or more nodes, as well as any errors encountered while -// performing service discovery. -// Note: Because this checks multiple sets of SRV records, discovery should only be considered to have -// failed if the returned node list is empty. -func (cfg *Config) GetDNSClusterNames() ([]string, error) { - var ( - clusterStrs []string - cerr error - serviceNameSuffix string - ) - if cfg.DNSClusterServiceName != "" { - serviceNameSuffix = "-" + cfg.DNSClusterServiceName - } - - lg := cfg.GetLogger() - - // Use both etcd-server-ssl and etcd-server for discovery. - // Combine the results if both are available. - clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) - if cerr != nil { - clusterStrs = make([]string, 0) - } - lg.Info( - "get cluster for etcd-server-ssl SRV", - zap.String("service-scheme", "https"), - zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix), - zap.String("server-name", cfg.Name), - zap.String("discovery-srv", cfg.DNSCluster), - zap.Strings("advertise-peer-urls", cfg.getAPURLs()), - zap.Strings("found-cluster", clusterStrs), - zap.Error(cerr), - ) - - defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) - if httpCerr == nil { - clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...) - } - lg.Info( - "get cluster for etcd-server SRV", - zap.String("service-scheme", "http"), - zap.String("service-name", "etcd-server"+serviceNameSuffix), - zap.String("server-name", cfg.Name), - zap.String("discovery-srv", cfg.DNSCluster), - zap.Strings("advertise-peer-urls", cfg.getAPURLs()), - zap.Strings("found-cluster", clusterStrs), - zap.Error(httpCerr), - ) - - return clusterStrs, multierr.Combine(cerr, httpCerr) -} - -func (cfg Config) InitialClusterFromName(name string) (ret string) { - if len(cfg.APUrls) == 0 { - return "" - } - n := name - if name == "" { - n = DefaultName - } - for i := range cfg.APUrls { - ret = ret + "," + n + "=" + cfg.APUrls[i].String() - } - return ret[1:] -} - -func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew } -func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } - -func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum { - if cfg.V2Deprecation == "" { - return config.V2_DEPR_DEFAULT - } - return cfg.V2Deprecation -} - -func (cfg Config) defaultPeerHost() bool { - return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs -} - -func (cfg Config) defaultClientHost() bool { - return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs -} - -func (cfg *Config) ClientSelfCert() (err error) { - if !cfg.ClientAutoTLS { - return nil - } - if !cfg.ClientTLSInfo.Empty() { - cfg.logger.Warn("ignoring client auto TLS since certs given") - return nil - } - chosts := make([]string, len(cfg.LCUrls)) - for i, u := range cfg.LCUrls { - chosts[i] = u.Host - } - cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity) - if err != nil { - return err - } - return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites) -} - -func (cfg *Config) PeerSelfCert() (err error) { - if !cfg.PeerAutoTLS { - return nil - } - if !cfg.PeerTLSInfo.Empty() { - cfg.logger.Warn("ignoring peer auto TLS since certs given") - return nil - } - phosts := make([]string, len(cfg.LPUrls)) - for i, u := range cfg.LPUrls { - phosts[i] = u.Host - } - cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity) - if err != nil { - return err - } - return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites) -} - -// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host, -// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. -// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380 -// then the advertise peer host would be updated with machine's default host, -// while keeping the listen URL's port. -// User can work around this by explicitly setting URL with 127.0.0.1. -// It returns the default hostname, if used, and the error, if any, from getting the machine's default host. -// TODO: check whether fields are set instead of whether fields have default value -func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) { - if defaultHostname == "" || defaultHostStatus != nil { - // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') - if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - } - return "", defaultHostStatus - } - - used := false - pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() - if cfg.defaultPeerHost() && pip == "0.0.0.0" { - cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} - used = true - } - // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') - if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster { - cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) - } - - cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() - if cfg.defaultClientHost() && cip == "0.0.0.0" { - cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} - used = true - } - dhost := defaultHostname - if !used { - dhost = "" - } - return dhost, defaultHostStatus -} - -// checkBindURLs returns an error if any URL uses a domain name. -func checkBindURLs(urls []url.URL) error { - for _, url := range urls { - if url.Scheme == "unix" || url.Scheme == "unixs" { - continue - } - host, _, err := net.SplitHostPort(url.Host) - if err != nil { - return err - } - if host == "localhost" { - // special case for local address - // TODO: support /etc/hosts ? - continue - } - if net.ParseIP(host) == nil { - return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) - } - } - return nil -} - -func checkHostURLs(urls []url.URL) error { - for _, url := range urls { - host, _, err := net.SplitHostPort(url.Host) - if err != nil { - return err - } - if host == "" { - return fmt.Errorf("unexpected empty host (%s)", url.String()) - } - } - return nil -} - -func (cfg *Config) getAPURLs() (ss []string) { - ss = make([]string, len(cfg.APUrls)) - for i := range cfg.APUrls { - ss[i] = cfg.APUrls[i].String() - } - return ss -} - -func (cfg *Config) getLPURLs() (ss []string) { - ss = make([]string, len(cfg.LPUrls)) - for i := range cfg.LPUrls { - ss[i] = cfg.LPUrls[i].String() - } - return ss -} - -func (cfg *Config) getACURLs() (ss []string) { - ss = make([]string, len(cfg.ACUrls)) - for i := range cfg.ACUrls { - ss[i] = cfg.ACUrls[i].String() - } - return ss -} - -func (cfg *Config) getLCURLs() (ss []string) { - ss = make([]string, len(cfg.LCUrls)) - for i := range cfg.LCUrls { - ss[i] = cfg.LCUrls[i].String() - } - return ss -} - -func (cfg *Config) getMetricsURLs() (ss []string) { - ss = make([]string, len(cfg.ListenMetricsUrls)) - for i := range cfg.ListenMetricsUrls { - ss[i] = cfg.ListenMetricsUrls[i].String() - } - return ss -} - -func parseBackendFreelistType(freelistType string) bolt.FreelistType { - if freelistType == freelistArrayType { - return bolt.FreelistArrayType - } - - return bolt.FreelistMapType -} diff --git a/server/embed/config_logging.go b/server/embed/config_logging.go deleted file mode 100644 index 900519544bb..00000000000 --- a/server/embed/config_logging.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "net/url" - "os" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zapgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - "gopkg.in/natefinch/lumberjack.v2" - - "go.etcd.io/etcd/client/pkg/v3/logutil" -) - -// GetLogger returns the logger. -func (cfg Config) GetLogger() *zap.Logger { - cfg.loggerMu.RLock() - l := cfg.logger - cfg.loggerMu.RUnlock() - return l -} - -// setupLogging initializes etcd logging. -// Must be called after flag parsing or finishing configuring embed.Config. -func (cfg *Config) setupLogging() error { - switch cfg.Logger { - case "capnslog": // removed in v3.5 - return fmt.Errorf("--logger=capnslog is removed in v3.5") - - case "zap": - if len(cfg.LogOutputs) == 0 { - cfg.LogOutputs = []string{DefaultLogOutput} - } - if len(cfg.LogOutputs) > 1 { - for _, v := range cfg.LogOutputs { - if v == DefaultLogOutput { - return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput) - } - } - } - if cfg.EnableLogRotation { - if err := setupLogRotation(cfg.LogOutputs, cfg.LogRotationConfigJSON); err != nil { - return err - } - } - - outputPaths, errOutputPaths := make([]string, 0), make([]string, 0) - isJournal := false - for _, v := range cfg.LogOutputs { - switch v { - case DefaultLogOutput: - outputPaths = append(outputPaths, StdErrLogOutput) - errOutputPaths = append(errOutputPaths, StdErrLogOutput) - - case JournalLogOutput: - isJournal = true - - case StdErrLogOutput: - outputPaths = append(outputPaths, StdErrLogOutput) - errOutputPaths = append(errOutputPaths, StdErrLogOutput) - - case StdOutLogOutput: - outputPaths = append(outputPaths, StdOutLogOutput) - errOutputPaths = append(errOutputPaths, StdOutLogOutput) - - default: - var path string - if cfg.EnableLogRotation { - // append rotate scheme to logs managed by lumberjack log rotation - if v[0:1] == "/" { - path = fmt.Sprintf("rotate:/%%2F%s", v[1:]) - } else { - path = fmt.Sprintf("rotate:/%s", v) - } - } else { - path = v - } - outputPaths = append(outputPaths, path) - errOutputPaths = append(errOutputPaths, path) - } - } - - if !isJournal { - copied := logutil.DefaultZapLoggerConfig - copied.OutputPaths = outputPaths - copied.ErrorOutputPaths = errOutputPaths - copied = logutil.MergeOutputPaths(copied) - copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) - encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) - if err != nil { - return err - } - copied.Encoding = encoding - if cfg.ZapLoggerBuilder == nil { - lg, err := copied.Build() - if err != nil { - return err - } - cfg.ZapLoggerBuilder = NewZapLoggerBuilder(lg) - } - } else { - if len(cfg.LogOutputs) > 1 { - for _, v := range cfg.LogOutputs { - if v != DefaultLogOutput { - return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs) - } - } - } - - // use stderr as fallback - syncer, lerr := getJournalWriteSyncer() - if lerr != nil { - return lerr - } - - lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) - - var encoder zapcore.Encoder - encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) - if err != nil { - return err - } - - if encoding == logutil.ConsoleLogFormat { - encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) - } else { - encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) - } - - // WARN: do not change field names in encoder config - // journald logging writer assumes field names of "level" and "caller" - cr := zapcore.NewCore( - encoder, - syncer, - lvl, - ) - if cfg.ZapLoggerBuilder == nil { - cfg.ZapLoggerBuilder = NewZapLoggerBuilder(zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))) - } - } - - err := cfg.ZapLoggerBuilder(cfg) - if err != nil { - return err - } - - logTLSHandshakeFailure := func(conn *tls.Conn, err error) { - state := conn.ConnectionState() - remoteAddr := conn.RemoteAddr().String() - serverName := state.ServerName - if len(state.PeerCertificates) > 0 { - cert := state.PeerCertificates[0] - ips := make([]string, len(cert.IPAddresses)) - for i := range cert.IPAddresses { - ips[i] = cert.IPAddresses[i].String() - } - cfg.logger.Warn( - "rejected connection", - zap.String("remote-addr", remoteAddr), - zap.String("server-name", serverName), - zap.Strings("ip-addresses", ips), - zap.Strings("dns-names", cert.DNSNames), - zap.Error(err), - ) - } else { - cfg.logger.Warn( - "rejected connection", - zap.String("remote-addr", remoteAddr), - zap.String("server-name", serverName), - zap.Error(err), - ) - } - } - cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure - cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure - - default: - return fmt.Errorf("unknown logger option %q", cfg.Logger) - } - - return nil -} - -// NewZapLoggerBuilder generates a zap logger builder that sets given logger -// for embedded etcd. -func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error { - return func(cfg *Config) error { - cfg.loggerMu.Lock() - defer cfg.loggerMu.Unlock() - cfg.logger = lg - return nil - } -} - -// NewZapCoreLoggerBuilder - is a deprecated setter for the logger. -// Deprecated: Use simpler NewZapLoggerBuilder. To be removed in etcd-3.6. -func NewZapCoreLoggerBuilder(lg *zap.Logger, _ zapcore.Core, _ zapcore.WriteSyncer) func(*Config) error { - return NewZapLoggerBuilder(lg) -} - -// SetupGlobalLoggers configures 'global' loggers (grpc, zapGlobal) based on the cfg. -// -// The method is not executed by embed server by default (since 3.5) to -// enable setups where grpc/zap.Global logging is configured independently -// or spans separate lifecycle (like in tests). -func (cfg *Config) SetupGlobalLoggers() { - lg := cfg.GetLogger() - if lg != nil { - if cfg.LogLevel == "debug" { - grpc.EnableTracing = true - grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) - } else { - grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr)) - } - zap.ReplaceGlobals(lg) - } -} - -type logRotationConfig struct { - *lumberjack.Logger -} - -// Sync implements zap.Sink -func (logRotationConfig) Sync() error { return nil } - -// setupLogRotation initializes log rotation for a single file path target. -func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error { - var logRotationConfig logRotationConfig - outputFilePaths := 0 - for _, v := range logOutputs { - switch v { - case DefaultLogOutput, StdErrLogOutput, StdOutLogOutput: - continue - default: - outputFilePaths++ - } - } - // log rotation requires file target - if len(logOutputs) == 1 && outputFilePaths == 0 { - return ErrLogRotationInvalidLogOutput - } - // support max 1 file target for log rotation - if outputFilePaths > 1 { - return ErrLogRotationInvalidLogOutput - } - - if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil { - var unmarshalTypeError *json.UnmarshalTypeError - var syntaxError *json.SyntaxError - switch { - case errors.As(err, &syntaxError): - return fmt.Errorf("improperly formatted log rotation config: %v", err) - case errors.As(err, &unmarshalTypeError): - return fmt.Errorf("invalid log rotation config: %v", err) - default: - return fmt.Errorf("fail to unmarshal log rotation config: %v", err) - } - } - zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) { - logRotationConfig.Filename = u.Path[1:] - return &logRotationConfig, nil - }) - return nil -} diff --git a/server/embed/config_test.go b/server/embed/config_test.go deleted file mode 100644 index 726f3395e6d..00000000000 --- a/server/embed/config_test.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/client/pkg/v3/srv" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - - "sigs.k8s.io/yaml" -) - -func notFoundErr(service, domain string) error { - name := fmt.Sprintf("_%s._tcp.%s", service, domain) - return &net.DNSError{Err: "no such host", Name: name, Server: "10.0.0.53:53", IsTimeout: false, IsTemporary: false, IsNotFound: true} -} - -func TestConfigFileOtherFields(t *testing.T) { - ctls := securityConfig{TrustedCAFile: "cca", CertFile: "ccert", KeyFile: "ckey"} - ptls := securityConfig{TrustedCAFile: "pca", CertFile: "pcert", KeyFile: "pkey"} - yc := struct { - ClientSecurityCfgFile securityConfig `json:"client-transport-security"` - PeerSecurityCfgFile securityConfig `json:"peer-transport-security"` - ForceNewCluster bool `json:"force-new-cluster"` - Logger string `json:"logger"` - LogOutputs []string `json:"log-outputs"` - Debug bool `json:"debug"` - SocketOpts transport.SocketOpts `json:"socket-options"` - }{ - ctls, - ptls, - true, - "zap", - []string{"/dev/null"}, - false, - transport.SocketOpts{ - ReusePort: true, - }, - } - - b, err := yaml.Marshal(&yc) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - cfg, err := ConfigFromFile(tmpfile.Name()) - if err != nil { - t.Fatal(err) - } - - if !ctls.equals(&cfg.ClientTLSInfo) { - t.Errorf("ClientTLS = %v, want %v", cfg.ClientTLSInfo, ctls) - } - if !ptls.equals(&cfg.PeerTLSInfo) { - t.Errorf("PeerTLS = %v, want %v", cfg.PeerTLSInfo, ptls) - } - - assert.Equal(t, true, cfg.ForceNewCluster, "ForceNewCluster does not match") - - assert.Equal(t, true, cfg.SocketOpts.ReusePort, "ReusePort does not match") - - assert.Equal(t, false, cfg.SocketOpts.ReuseAddress, "ReuseAddress does not match") -} - -// TestUpdateDefaultClusterFromName ensures that etcd can start with 'etcd --name=abc'. -func TestUpdateDefaultClusterFromName(t *testing.T) { - cfg := NewConfig() - defaultInitialCluster := cfg.InitialCluster - oldscheme := cfg.APUrls[0].Scheme - origpeer := cfg.APUrls[0].String() - origadvc := cfg.ACUrls[0].String() - - cfg.Name = "abc" - lpport := cfg.LPUrls[0].Port() - - // in case of 'etcd --name=abc' - exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport) - _, _ = cfg.UpdateDefaultClusterFromName(defaultInitialCluster) - if exp != cfg.InitialCluster { - t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster) - } - // advertise peer URL should not be affected - if origpeer != cfg.APUrls[0].String() { - t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.APUrls[0].String()) - } - // advertise client URL should not be affected - if origadvc != cfg.ACUrls[0].String() { - t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.ACUrls[0].String()) - } -} - -// TestUpdateDefaultClusterFromNameOverwrite ensures that machine's default host is only used -// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. -func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) { - if defaultHostname == "" { - t.Skip("machine's default host not found") - } - - cfg := NewConfig() - defaultInitialCluster := cfg.InitialCluster - oldscheme := cfg.APUrls[0].Scheme - origadvc := cfg.ACUrls[0].String() - - cfg.Name = "abc" - lpport := cfg.LPUrls[0].Port() - cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)} - dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster) - if dhost != defaultHostname { - t.Fatalf("expected default host %q, got %q", defaultHostname, dhost) - } - aphost, apport := cfg.APUrls[0].Hostname(), cfg.APUrls[0].Port() - if apport != lpport { - t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport) - } - if aphost != defaultHostname { - t.Fatalf("advertise peer url expected machine default host %q, got %q", defaultHostname, aphost) - } - expected := fmt.Sprintf("%s=%s://%s:%s", cfg.Name, oldscheme, defaultHostname, lpport) - if expected != cfg.InitialCluster { - t.Fatalf("initial-cluster expected %q, got %q", expected, cfg.InitialCluster) - } - - // advertise client URL should not be affected - if origadvc != cfg.ACUrls[0].String() { - t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.ACUrls[0].String()) - } -} - -func (s *securityConfig) equals(t *transport.TLSInfo) bool { - return s.CertFile == t.CertFile && - s.CertAuth == t.ClientCertAuth && - s.TrustedCAFile == t.TrustedCAFile -} - -func mustCreateCfgFile(t *testing.T, b []byte) *os.File { - tmpfile, err := os.CreateTemp("", "servercfg") - if err != nil { - t.Fatal(err) - } - if _, err = tmpfile.Write(b); err != nil { - t.Fatal(err) - } - if err = tmpfile.Close(); err != nil { - t.Fatal(err) - } - return tmpfile -} - -func TestAutoCompactionModeInvalid(t *testing.T) { - cfg := NewConfig() - cfg.Logger = "zap" - cfg.LogOutputs = []string{"/dev/null"} - cfg.AutoCompactionMode = "period" - err := cfg.Validate() - if err == nil { - t.Errorf("expected non-nil error, got %v", err) - } -} - -func TestAutoCompactionModeParse(t *testing.T) { - tests := []struct { - mode string - retention string - werr bool - wdur time.Duration - }{ - // revision - {"revision", "1", false, 1}, - {"revision", "1h", false, time.Hour}, - {"revision", "a", true, 0}, - {"revision", "-1", true, 0}, - // periodic - {"periodic", "1", false, time.Hour}, - {"periodic", "a", true, 0}, - {"revision", "-1", true, 0}, - // err mode - {"errmode", "1", false, 0}, - {"errmode", "1h", false, time.Hour}, - } - - hasErr := func(err error) bool { - return err != nil - } - - for i, tt := range tests { - dur, err := parseCompactionRetention(tt.mode, tt.retention) - if hasErr(err) != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - if dur != tt.wdur { - t.Errorf("#%d: duration = %s, want %s", i, dur, tt.wdur) - } - } -} - -func TestPeerURLsMapAndTokenFromSRV(t *testing.T) { - defer func() { getCluster = srv.GetCluster }() - - tests := []struct { - withSSL []string - withoutSSL []string - apurls []string - wurls string - werr bool - }{ - { - []string{}, - []string{}, - []string{"http://localhost:2380"}, - "", - true, - }, - { - []string{"1.example.com=https://1.example.com:2380", "0=https://2.example.com:2380", "1=https://3.example.com:2380"}, - []string{}, - []string{"https://1.example.com:2380"}, - "0=https://2.example.com:2380,1.example.com=https://1.example.com:2380,1=https://3.example.com:2380", - false, - }, - { - []string{"1.example.com=https://1.example.com:2380"}, - []string{"0=http://2.example.com:2380", "1=http://3.example.com:2380"}, - []string{"https://1.example.com:2380"}, - "0=http://2.example.com:2380,1.example.com=https://1.example.com:2380,1=http://3.example.com:2380", - false, - }, - { - []string{}, - []string{"1.example.com=http://1.example.com:2380", "0=http://2.example.com:2380", "1=http://3.example.com:2380"}, - []string{"http://1.example.com:2380"}, - "0=http://2.example.com:2380,1.example.com=http://1.example.com:2380,1=http://3.example.com:2380", - false, - }, - } - - hasErr := func(err error) bool { - return err != nil - } - - for i, tt := range tests { - getCluster = func(serviceScheme string, service string, name string, dns string, apurls types.URLs) ([]string, error) { - var urls []string - if serviceScheme == "https" && service == "etcd-server-ssl" { - urls = tt.withSSL - } else if serviceScheme == "http" && service == "etcd-server" { - urls = tt.withoutSSL - } - if len(urls) > 0 { - return urls, nil - } - return urls, notFoundErr(service, dns) - } - - cfg := NewConfig() - cfg.Name = "1.example.com" - cfg.InitialCluster = "" - cfg.InitialClusterToken = "" - cfg.DNSCluster = "example.com" - cfg.APUrls = types.MustNewURLs(tt.apurls) - - if err := cfg.Validate(); err != nil { - t.Errorf("#%d: failed to validate test Config: %v", i, err) - continue - } - - urlsmap, _, err := cfg.PeerURLsMapAndToken("etcd") - if urlsmap.String() != tt.wurls { - t.Errorf("#%d: urlsmap = %s, want = %s", i, urlsmap.String(), tt.wurls) - } - if hasErr(err) != tt.werr { - t.Errorf("#%d: err = %v, want = %v", i, err, tt.werr) - } - } -} - -func TestLeaseCheckpointValidate(t *testing.T) { - tcs := []struct { - name string - configFunc func() Config - expectError bool - }{ - { - name: "Default config should pass", - configFunc: func() Config { - return *NewConfig() - }, - }, - { - name: "Enabling checkpoint leases should pass", - configFunc: func() Config { - cfg := *NewConfig() - cfg.ExperimentalEnableLeaseCheckpoint = true - return cfg - }, - }, - { - name: "Enabling checkpoint leases and persist should pass", - configFunc: func() Config { - cfg := *NewConfig() - cfg.ExperimentalEnableLeaseCheckpoint = true - cfg.ExperimentalEnableLeaseCheckpointPersist = true - return cfg - }, - }, - { - name: "Enabling checkpoint leases persist without checkpointing itself should fail", - configFunc: func() Config { - cfg := *NewConfig() - cfg.ExperimentalEnableLeaseCheckpointPersist = true - return cfg - }, - expectError: true, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - cfg := tc.configFunc() - err := cfg.Validate() - if (err != nil) != tc.expectError { - t.Errorf("config.Validate() = %q, expected error: %v", err, tc.expectError) - } - }) - } -} - -func TestLogRotation(t *testing.T) { - tests := []struct { - name string - logOutputs []string - logRotationConfig string - wantErr bool - wantErrMsg error - }{ - { - name: "mixed log output targets", - logOutputs: []string{"stderr", "/tmp/path"}, - logRotationConfig: `{"maxsize": 1}`, - }, - { - name: "log output relative path", - logOutputs: []string{"stderr", "tmp/path"}, - logRotationConfig: `{"maxsize": 1}`, - }, - { - name: "no file targets", - logOutputs: []string{"stderr"}, - logRotationConfig: `{"maxsize": 1}`, - wantErr: true, - wantErrMsg: ErrLogRotationInvalidLogOutput, - }, - { - name: "multiple file targets", - logOutputs: []string{"/tmp/path1", "/tmp/path2"}, - logRotationConfig: DefaultLogRotationConfig, - wantErr: true, - wantErrMsg: ErrLogRotationInvalidLogOutput, - }, - { - name: "default output", - logRotationConfig: `{"maxsize": 1}`, - wantErr: true, - wantErrMsg: ErrLogRotationInvalidLogOutput, - }, - { - name: "default log rotation config", - logOutputs: []string{"/tmp/path"}, - logRotationConfig: DefaultLogRotationConfig, - }, - { - name: "invalid logger config", - logOutputs: []string{"/tmp/path"}, - logRotationConfig: `{"maxsize": true}`, - wantErr: true, - wantErrMsg: errors.New("invalid log rotation config: json: cannot unmarshal bool into Go struct field logRotationConfig.maxsize of type int"), - }, - { - name: "improperly formatted logger config", - logOutputs: []string{"/tmp/path"}, - logRotationConfig: `{"maxsize": true`, - wantErr: true, - wantErrMsg: errors.New("improperly formatted log rotation config: unexpected end of JSON input"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := NewConfig() - cfg.Logger = "zap" - cfg.LogOutputs = tt.logOutputs - cfg.EnableLogRotation = true - cfg.LogRotationConfigJSON = tt.logRotationConfig - err := cfg.Validate() - if err != nil && !tt.wantErr { - t.Errorf("test %q, unexpected error %v", tt.name, err) - } - if err != nil && tt.wantErr && tt.wantErrMsg.Error() != err.Error() { - t.Errorf("test %q, expected error: %+v, got: %+v", tt.name, tt.wantErrMsg, err) - } - if err == nil && tt.wantErr { - t.Errorf("test %q, expected error, got nil", tt.name) - } - if err == nil { - cfg.GetLogger().Info("test log") - } - }) - } -} - -func TestTLSVersionMinMax(t *testing.T) { - tests := []struct { - name string - givenTLSMinVersion string - givenTLSMaxVersion string - givenCipherSuites []string - expectError bool - expectedMinTLSVersion uint16 - expectedMaxTLSVersion uint16 - }{ - { - name: "Minimum TLS version is set", - givenTLSMinVersion: "TLS1.3", - expectedMinTLSVersion: tls.VersionTLS13, - expectedMaxTLSVersion: 0, - }, - { - name: "Maximum TLS version is set", - givenTLSMaxVersion: "TLS1.2", - expectedMinTLSVersion: 0, - expectedMaxTLSVersion: tls.VersionTLS12, - }, - { - name: "Minimum and Maximum TLS versions are set", - givenTLSMinVersion: "TLS1.3", - givenTLSMaxVersion: "TLS1.3", - expectedMinTLSVersion: tls.VersionTLS13, - expectedMaxTLSVersion: tls.VersionTLS13, - }, - { - name: "Minimum and Maximum TLS versions are set in reverse order", - givenTLSMinVersion: "TLS1.3", - givenTLSMaxVersion: "TLS1.2", - expectError: true, - }, - { - name: "Invalid minimum TLS version", - givenTLSMinVersion: "invalid version", - expectError: true, - }, - { - name: "Invalid maximum TLS version", - givenTLSMaxVersion: "invalid version", - expectError: true, - }, - { - name: "Cipher suites configured for TLS 1.3", - givenTLSMinVersion: "TLS1.3", - givenCipherSuites: []string{"TLS_AES_128_GCM_SHA256"}, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := NewConfig() - cfg.TlsMinVersion = tt.givenTLSMinVersion - cfg.TlsMaxVersion = tt.givenTLSMaxVersion - cfg.CipherSuites = tt.givenCipherSuites - - err := cfg.Validate() - if err != nil { - assert.True(t, tt.expectError, "Validate() returned error while expecting success: %v", err) - return - } - - updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - - assert.Equal(t, tt.expectedMinTLSVersion, cfg.PeerTLSInfo.MinVersion) - assert.Equal(t, tt.expectedMaxTLSVersion, cfg.PeerTLSInfo.MaxVersion) - assert.Equal(t, tt.expectedMinTLSVersion, cfg.ClientTLSInfo.MinVersion) - assert.Equal(t, tt.expectedMaxTLSVersion, cfg.ClientTLSInfo.MaxVersion) - }) - } -} diff --git a/server/embed/config_tracing.go b/server/embed/config_tracing.go deleted file mode 100644 index 9e03d03e320..00000000000 --- a/server/embed/config_tracing.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "fmt" - - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/sdk/resource" - tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.4.0" - "go.uber.org/zap" -) - -const maxSamplingRatePerMillion = 1000000 - -func validateTracingConfig(samplingRate int) error { - if samplingRate < 0 { - return fmt.Errorf("tracing sampling rate must be positive") - } - if samplingRate > maxSamplingRatePerMillion { - return fmt.Errorf("tracing sampling rate must be less than %d", maxSamplingRatePerMillion) - } - - return nil -} - -type tracingExporter struct { - exporter tracesdk.SpanExporter - opts []otelgrpc.Option - provider *tracesdk.TracerProvider -} - -func newTracingExporter(ctx context.Context, cfg *Config) (*tracingExporter, error) { - exporter, err := otlptracegrpc.New(ctx, - otlptracegrpc.WithInsecure(), - otlptracegrpc.WithEndpoint(cfg.ExperimentalDistributedTracingAddress), - ) - if err != nil { - return nil, err - } - - res, err := resource.New(ctx, - resource.WithAttributes( - semconv.ServiceNameKey.String(cfg.ExperimentalDistributedTracingServiceName), - ), - ) - if err != nil { - return nil, err - } - - if resWithIDKey := determineResourceWithIDKey(cfg.ExperimentalDistributedTracingServiceInstanceID); resWithIDKey != nil { - // Merge resources into a new - // resource in case of duplicates. - res, err = resource.Merge(res, resWithIDKey) - if err != nil { - return nil, err - } - } - - traceProvider := tracesdk.NewTracerProvider( - tracesdk.WithBatcher(exporter), - tracesdk.WithResource(res), - tracesdk.WithSampler( - tracesdk.ParentBased(determineSampler(cfg.ExperimentalDistributedTracingSamplingRatePerMillion)), - ), - ) - - options := []otelgrpc.Option{ - otelgrpc.WithPropagators( - propagation.NewCompositeTextMapPropagator( - propagation.TraceContext{}, - propagation.Baggage{}, - ), - ), - otelgrpc.WithTracerProvider( - traceProvider, - ), - } - - cfg.logger.Debug( - "distributed tracing enabled", - zap.String("address", cfg.ExperimentalDistributedTracingAddress), - zap.String("service-name", cfg.ExperimentalDistributedTracingServiceName), - zap.String("service-instance-id", cfg.ExperimentalDistributedTracingServiceInstanceID), - zap.Int("sampling-rate", cfg.ExperimentalDistributedTracingSamplingRatePerMillion), - ) - - return &tracingExporter{ - exporter: exporter, - opts: options, - provider: traceProvider, - }, nil -} - -func (te *tracingExporter) Close(ctx context.Context) { - if te.provider != nil { - te.provider.Shutdown(ctx) - } - if te.exporter != nil { - te.exporter.Shutdown(ctx) - } -} - -func determineSampler(samplingRate int) tracesdk.Sampler { - sampler := tracesdk.NeverSample() - if samplingRate == 0 { - return sampler - } - return tracesdk.TraceIDRatioBased(float64(samplingRate) / float64(maxSamplingRatePerMillion)) -} - -// As Tracing service Instance ID must be unique, it should -// never use the empty default string value, it's set if -// if it's a non empty string. -func determineResourceWithIDKey(serviceInstanceID string) *resource.Resource { - if serviceInstanceID != "" { - return resource.NewSchemaless( - (semconv.ServiceInstanceIDKey.String(serviceInstanceID)), - ) - } - return nil -} diff --git a/server/embed/config_tracing_test.go b/server/embed/config_tracing_test.go deleted file mode 100644 index 0abbe4d1d42..00000000000 --- a/server/embed/config_tracing_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "testing" -) - -const neverSampleDescription = "AlwaysOffSampler" - -func TestDetermineSampler(t *testing.T) { - tests := []struct { - name string - sampleRate int - wantSamplerDescription string - }{ - { - name: "sample rate is disabled", - sampleRate: 0, - wantSamplerDescription: neverSampleDescription, - }, - { - name: "sample rate is 100", - sampleRate: 100, - wantSamplerDescription: "TraceIDRatioBased{0.0001}", - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - sampler := determineSampler(tc.sampleRate) - if tc.wantSamplerDescription != sampler.Description() { - t.Errorf("tracing sampler was not as expected; expected sampler: %#+v, got sampler: %#+v", tc.wantSamplerDescription, sampler.Description()) - } - }) - } -} - -func TestTracingConfig(t *testing.T) { - tests := []struct { - name string - sampleRate int - wantErr bool - }{ - { - name: "invalid - sample rate is less than 0", - sampleRate: -1, - wantErr: true, - }, - { - name: "invalid - sample rate is more than allowed value", - sampleRate: maxSamplingRatePerMillion + 1, - wantErr: true, - }, - { - name: "valid - sample rate is 100", - sampleRate: 100, - wantErr: false, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - err := validateTracingConfig(tc.sampleRate) - if err == nil && tc.wantErr { - t.Errorf("expected error got (%v) error", err) - } - if err != nil && !tc.wantErr { - t.Errorf("expected no errors, got error: (%v)", err) - } - }) - } -} diff --git a/server/embed/doc.go b/server/embed/doc.go deleted file mode 100644 index 3449855b5ce..00000000000 --- a/server/embed/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package embed provides bindings for embedding an etcd server in a program. - -Launch an embedded etcd server using the configuration defaults: - - import ( - "log" - "time" - - "go.etcd.io/etcd/server/v3/embed" - ) - - func main() { - cfg := embed.NewConfig() - cfg.Dir = "default.etcd" - e, err := embed.StartEtcd(cfg) - if err != nil { - log.Fatal(err) - } - defer e.Close() - select { - case <-e.Server.ReadyNotify(): - log.Printf("Server is ready!") - case <-time.After(60 * time.Second): - e.Server.Stop() // trigger a shutdown - log.Printf("Server took too long to start!") - } - log.Fatal(<-e.Err()) - } -*/ -package embed diff --git a/server/embed/etcd.go b/server/embed/etcd.go deleted file mode 100644 index a03b4f1c9fe..00000000000 --- a/server/embed/etcd.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "fmt" - "io" - defaultLog "log" - "net" - "net/http" - "net/url" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/debugutil" - runtimeutil "go.etcd.io/etcd/pkg/v3/runtime" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/etcd/server/v3/verify" - - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/soheilhy/cmux" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -const ( - // internal fd usage includes disk usage and transport usage. - // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs - // at most 2 to read/lock/write WALs. One case that it needs to 2 is to - // read all logs after some snapshot index, which locates at the end of - // the second last and the head of the last. For purging, it needs to read - // directory, so it needs 1. For fd monitor, it needs 1. - // For transport, rafthttp builds two long-polling connections and at most - // four temporary connections with each member. There are at most 9 members - // in a cluster, so it should reserve 96. - // For the safety, we set the total reserved number to 150. - reservedInternalFDNum = 150 -) - -// Etcd contains a running etcd server and its listeners. -type Etcd struct { - Peers []*peerListener - Clients []net.Listener - // a map of contexts for the servers that serves client requests. - sctxs map[string]*serveCtx - metricsListeners []net.Listener - - tracingExporterShutdown func() - - Server *etcdserver.EtcdServer - - cfg Config - stopc chan struct{} - errc chan error - - closeOnce sync.Once -} - -type peerListener struct { - net.Listener - serve func() error - close func(context.Context) error -} - -// StartEtcd launches the etcd server and HTTP handlers for client/server communication. -// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait -// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. -func StartEtcd(inCfg *Config) (e *Etcd, err error) { - if err = inCfg.Validate(); err != nil { - return nil, err - } - serving := false - e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} - cfg := &e.cfg - defer func() { - if e == nil || err == nil { - return - } - if !serving { - // errored before starting gRPC server for serveCtx.serversC - for _, sctx := range e.sctxs { - close(sctx.serversC) - } - } - e.Close() - e = nil - }() - - if !cfg.SocketOpts.Empty() { - cfg.logger.Info( - "configuring socket options", - zap.Bool("reuse-address", cfg.SocketOpts.ReuseAddress), - zap.Bool("reuse-port", cfg.SocketOpts.ReusePort), - ) - } - e.cfg.logger.Info( - "configuring peer listeners", - zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), - ) - if e.Peers, err = configurePeerListeners(cfg); err != nil { - return e, err - } - - e.cfg.logger.Info( - "configuring client listeners", - zap.Strings("listen-client-urls", e.cfg.getLCURLs()), - ) - if e.sctxs, err = configureClientListeners(cfg); err != nil { - return e, err - } - - for _, sctx := range e.sctxs { - e.Clients = append(e.Clients, sctx.l) - } - - var ( - urlsmap types.URLsMap - token string - ) - memberInitialized := true - if !isMemberInitialized(cfg) { - memberInitialized = false - urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") - if err != nil { - return e, fmt.Errorf("error setting up initial cluster: %v", err) - } - } - - // AutoCompactionRetention defaults to "0" if not set. - if len(cfg.AutoCompactionRetention) == 0 { - cfg.AutoCompactionRetention = "0" - } - autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention) - if err != nil { - return e, err - } - - backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType) - - srvcfg := config.ServerConfig{ - Name: cfg.Name, - ClientURLs: cfg.ACUrls, - PeerURLs: cfg.APUrls, - DataDir: cfg.Dir, - DedicatedWALDir: cfg.WalDir, - SnapshotCount: cfg.SnapshotCount, - SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries, - MaxSnapFiles: cfg.MaxSnapFiles, - MaxWALFiles: cfg.MaxWalFiles, - InitialPeerURLsMap: urlsmap, - InitialClusterToken: token, - DiscoveryURL: cfg.Durl, - DiscoveryProxy: cfg.Dproxy, - DiscoveryCfg: cfg.DiscoveryCfg, - NewCluster: cfg.IsNewCluster(), - PeerTLSInfo: cfg.PeerTLSInfo, - TickMs: cfg.TickMs, - ElectionTicks: cfg.ElectionTicks(), - WaitClusterReadyTimeout: cfg.ExperimentalWaitClusterReadyTimeout, - InitialElectionTickAdvance: cfg.InitialElectionTickAdvance, - AutoCompactionRetention: autoCompactionRetention, - AutoCompactionMode: cfg.AutoCompactionMode, - QuotaBackendBytes: cfg.QuotaBackendBytes, - BackendBatchLimit: cfg.BackendBatchLimit, - BackendFreelistType: backendFreelistType, - BackendBatchInterval: cfg.BackendBatchInterval, - MaxTxnOps: cfg.MaxTxnOps, - MaxRequestBytes: cfg.MaxRequestBytes, - MaxConcurrentStreams: cfg.MaxConcurrentStreams, - SocketOpts: cfg.SocketOpts, - StrictReconfigCheck: cfg.StrictReconfigCheck, - ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, - AuthToken: cfg.AuthToken, - BcryptCost: cfg.BcryptCost, - TokenTTL: cfg.AuthTokenTTL, - CORS: cfg.CORS, - HostWhitelist: cfg.HostWhitelist, - InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, - CorruptCheckTime: cfg.ExperimentalCorruptCheckTime, - CompactHashCheckEnabled: cfg.ExperimentalCompactHashCheckEnabled, - CompactHashCheckTime: cfg.ExperimentalCompactHashCheckTime, - PreVote: cfg.PreVote, - Logger: cfg.logger, - ForceNewCluster: cfg.ForceNewCluster, - EnableGRPCGateway: cfg.EnableGRPCGateway, - ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing, - UnsafeNoFsync: cfg.UnsafeNoFsync, - EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, - LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist, - CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit, - CompactionSleepInterval: cfg.ExperimentalCompactionSleepInterval, - WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval, - DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime, - WarningApplyDuration: cfg.ExperimentalWarningApplyDuration, - WarningUnaryRequestDuration: cfg.WarningUnaryRequestDuration, - ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock, - ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer, - ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes, - ExperimentalMaxLearners: cfg.ExperimentalMaxLearners, - V2Deprecation: cfg.V2DeprecationEffective(), - } - - if srvcfg.ExperimentalEnableDistributedTracing { - tctx := context.Background() - tracingExporter, err := newTracingExporter(tctx, cfg) - if err != nil { - return e, err - } - e.tracingExporterShutdown = func() { - tracingExporter.Close(tctx) - } - srvcfg.ExperimentalTracerOptions = tracingExporter.opts - - e.cfg.logger.Info( - "distributed tracing setup enabled", - ) - } - - print(e.cfg.logger, *cfg, srvcfg, memberInitialized) - - if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { - return e, err - } - - // buffer channel so goroutines on closed connections won't wait forever - e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) - - // newly started member ("memberInitialized==false") - // does not need corruption check - if memberInitialized && srvcfg.InitialCorruptCheck { - if err = e.Server.CorruptionChecker().InitialCheck(); err != nil { - // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()" - // (nothing to close since rafthttp transports have not been started) - - e.cfg.logger.Error("checkInitialHashKV failed", zap.Error(err)) - e.Server.Cleanup() - e.Server = nil - return e, err - } - } - e.Server.Start() - - if err = e.servePeers(); err != nil { - return e, err - } - if err = e.serveClients(); err != nil { - return e, err - } - if err = e.serveMetrics(); err != nil { - return e, err - } - - e.cfg.logger.Info( - "now serving peer/client/metrics", - zap.String("local-member-id", e.Server.MemberId().String()), - zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), - zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), - zap.Strings("advertise-client-urls", e.cfg.getACURLs()), - zap.Strings("listen-client-urls", e.cfg.getLCURLs()), - zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), - ) - serving = true - return e, nil -} - -func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized bool) { - cors := make([]string, 0, len(ec.CORS)) - for v := range ec.CORS { - cors = append(cors, v) - } - sort.Strings(cors) - - hss := make([]string, 0, len(ec.HostWhitelist)) - for v := range ec.HostWhitelist { - hss = append(hss, v) - } - sort.Strings(hss) - - quota := ec.QuotaBackendBytes - if quota == 0 { - quota = storage.DefaultQuotaBytes - } - - lg.Info( - "starting an etcd server", - zap.String("etcd-version", version.Version), - zap.String("git-sha", version.GitSHA), - zap.String("go-version", runtime.Version()), - zap.String("go-os", runtime.GOOS), - zap.String("go-arch", runtime.GOARCH), - zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)), - zap.Int("max-cpu-available", runtime.NumCPU()), - zap.Bool("member-initialized", memberInitialized), - zap.String("name", sc.Name), - zap.String("data-dir", sc.DataDir), - zap.String("wal-dir", ec.WalDir), - zap.String("wal-dir-dedicated", sc.DedicatedWALDir), - zap.String("member-dir", sc.MemberDir()), - zap.Bool("force-new-cluster", sc.ForceNewCluster), - zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)), - zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)), - zap.String("wait-cluster-ready-timeout", sc.WaitClusterReadyTimeout.String()), - zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), - zap.Uint64("snapshot-count", sc.SnapshotCount), - zap.Uint("max-wals", sc.MaxWALFiles), - zap.Uint("max-snapshots", sc.MaxSnapFiles), - zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), - zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()), - zap.Strings("listen-peer-urls", ec.getLPURLs()), - zap.Strings("advertise-client-urls", ec.getACURLs()), - zap.Strings("listen-client-urls", ec.getLCURLs()), - zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), - zap.Strings("cors", cors), - zap.Strings("host-whitelist", hss), - zap.String("initial-cluster", sc.InitialPeerURLsMap.String()), - zap.String("initial-cluster-state", ec.ClusterState), - zap.String("initial-cluster-token", sc.InitialClusterToken), - zap.Int64("quota-backend-bytes", quota), - zap.Uint("max-request-bytes", sc.MaxRequestBytes), - zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams), - - zap.Bool("pre-vote", sc.PreVote), - zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck), - zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()), - zap.Bool("compact-check-time-enabled", sc.CompactHashCheckEnabled), - zap.Duration("compact-check-time-interval", sc.CompactHashCheckTime), - zap.String("auto-compaction-mode", sc.AutoCompactionMode), - zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention), - zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()), - zap.String("discovery-url", sc.DiscoveryURL), - zap.String("discovery-proxy", sc.DiscoveryProxy), - - zap.String("discovery-token", sc.DiscoveryCfg.Token), - zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")), - zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()), - zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeout.String()), - zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()), - zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()), - zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.Secure.InsecureTransport), - zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.Secure.InsecureSkipVerify), - zap.String("discovery-cert", sc.DiscoveryCfg.Secure.Cert), - zap.String("discovery-key", sc.DiscoveryCfg.Secure.Key), - zap.String("discovery-cacert", sc.DiscoveryCfg.Secure.Cacert), - zap.String("discovery-user", sc.DiscoveryCfg.Auth.Username), - - zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()), - zap.Int("max-learners", sc.ExperimentalMaxLearners), - ) -} - -// Config returns the current configuration. -func (e *Etcd) Config() Config { - return e.cfg -} - -// Close gracefully shuts down all servers/listeners. -// Client requests will be terminated with request timeout. -// After timeout, enforce remaning requests be closed immediately. -func (e *Etcd) Close() { - fields := []zap.Field{ - zap.String("name", e.cfg.Name), - zap.String("data-dir", e.cfg.Dir), - zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()), - zap.Strings("advertise-client-urls", e.cfg.getACURLs()), - } - lg := e.GetLogger() - lg.Info("closing etcd server", fields...) - defer func() { - lg.Info("closed etcd server", fields...) - verify.MustVerifyIfEnabled(verify.Config{ - Logger: lg, - DataDir: e.cfg.Dir, - ExactIndex: false, - }) - lg.Sync() - }() - - e.closeOnce.Do(func() { - close(e.stopc) - }) - - // close client requests with request timeout - timeout := 2 * time.Second - if e.Server != nil { - timeout = e.Server.Cfg.ReqTimeout() - } - for _, sctx := range e.sctxs { - for ss := range sctx.serversC { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - stopServers(ctx, ss) - cancel() - } - } - - for _, sctx := range e.sctxs { - sctx.cancel() - } - - for i := range e.Clients { - if e.Clients[i] != nil { - e.Clients[i].Close() - } - } - - for i := range e.metricsListeners { - e.metricsListeners[i].Close() - } - - // shutdown tracing exporter - if e.tracingExporterShutdown != nil { - e.tracingExporterShutdown() - } - - // close rafthttp transports - if e.Server != nil { - e.Server.Stop() - } - - // close all idle connections in peer handler (wait up to 1-second) - for i := range e.Peers { - if e.Peers[i] != nil && e.Peers[i].close != nil { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - e.Peers[i].close(ctx) - cancel() - } - } - if e.errc != nil { - close(e.errc) - } -} - -func stopServers(ctx context.Context, ss *servers) { - // first, close the http.Server - ss.http.Shutdown(ctx) - // do not grpc.Server.GracefulStop with TLS enabled etcd server - // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 - // and https://github.com/etcd-io/etcd/issues/8916 - if ss.secure { - ss.grpc.Stop() - return - } - - ch := make(chan struct{}) - go func() { - defer close(ch) - // close listeners to stop accepting new connections, - // will block on any existing transports - ss.grpc.GracefulStop() - }() - - // wait until all pending RPCs are finished - select { - case <-ch: - case <-ctx.Done(): - // took too long, manually close open transports - // e.g. watch streams - ss.grpc.Stop() - - // concurrent GracefulStop should be interrupted - <-ch - } -} - -// Err - return channel used to report errors during etcd run/shutdown. -// Since etcd 3.5 the channel is being closed when the etcd is over. -func (e *Etcd) Err() <-chan error { - return e.errc -} - -func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { - if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil { - return nil, err - } - if err = cfg.PeerSelfCert(); err != nil { - cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err)) - } - updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - if !cfg.PeerTLSInfo.Empty() { - cfg.logger.Info( - "starting with peer TLS", - zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)), - zap.Strings("cipher-suites", cfg.CipherSuites), - ) - } - - peers = make([]*peerListener, len(cfg.LPUrls)) - defer func() { - if err == nil { - return - } - for i := range peers { - if peers[i] != nil && peers[i].close != nil { - cfg.logger.Warn( - "closing peer listener", - zap.String("address", cfg.LPUrls[i].String()), - zap.Error(err), - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - peers[i].close(ctx) - cancel() - } - } - }() - - for i, u := range cfg.LPUrls { - if u.Scheme == "http" { - if !cfg.PeerTLSInfo.Empty() { - cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String())) - } - if cfg.PeerTLSInfo.ClientCertAuth { - cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String())) - } - } - peers[i] = &peerListener{close: func(context.Context) error { return nil }} - peers[i].Listener, err = transport.NewListenerWithOpts(u.Host, u.Scheme, - transport.WithTLSInfo(&cfg.PeerTLSInfo), - transport.WithSocketOpts(&cfg.SocketOpts), - transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout), - ) - if err != nil { - return nil, err - } - // once serve, overwrite with 'http.Server.Shutdown' - peers[i].close = func(context.Context) error { - return peers[i].Listener.Close() - } - } - return peers, nil -} - -// configure peer handlers after rafthttp.Transport started -func (e *Etcd) servePeers() (err error) { - ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server) - - for _, p := range e.Peers { - u := p.Listener.Addr().String() - m := cmux.New(p.Listener) - srv := &http.Server{ - Handler: ph, - ReadTimeout: 5 * time.Minute, - ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error - } - go srv.Serve(m.Match(cmux.Any())) - p.serve = func() error { - e.cfg.logger.Info( - "cmux::serve", - zap.String("address", u), - ) - return m.Serve() - } - p.close = func(ctx context.Context) error { - // gracefully shutdown http.Server - // close open listeners, idle connections - // until context cancel or time-out - e.cfg.logger.Info( - "stopping serving peer traffic", - zap.String("address", u), - ) - srv.Shutdown(ctx) - e.cfg.logger.Info( - "stopped serving peer traffic", - zap.String("address", u), - ) - m.Close() - return nil - } - } - - // start peer servers in a goroutine - for _, pl := range e.Peers { - go func(l *peerListener) { - u := l.Addr().String() - e.cfg.logger.Info( - "serving peer traffic", - zap.String("address", u), - ) - e.errHandler(l.serve()) - }(pl) - } - return nil -} - -func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { - if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil { - return nil, err - } - if err = cfg.ClientSelfCert(); err != nil { - cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err)) - } - updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion) - if cfg.EnablePprof { - cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) - } - - sctxs = make(map[string]*serveCtx) - for _, u := range cfg.LCUrls { - sctx := newServeCtx(cfg.logger) - if u.Scheme == "http" || u.Scheme == "unix" { - if !cfg.ClientTLSInfo.Empty() { - cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String())) - } - if cfg.ClientTLSInfo.ClientCertAuth { - cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String())) - } - } - if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { - return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String()) - } - - network := "tcp" - addr := u.Host - if u.Scheme == "unix" || u.Scheme == "unixs" { - network = "unix" - addr = u.Host + u.Path - } - sctx.network = network - - sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" - sctx.insecure = !sctx.secure - if oldctx := sctxs[addr]; oldctx != nil { - oldctx.secure = oldctx.secure || sctx.secure - oldctx.insecure = oldctx.insecure || sctx.insecure - continue - } - - if sctx.l, err = transport.NewListenerWithOpts(addr, u.Scheme, - transport.WithSocketOpts(&cfg.SocketOpts), - transport.WithSkipTLSInfoCheck(true), - ); err != nil { - return nil, err - } - // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking - // hosts that disable ipv6. So, use the address given by the user. - sctx.addr = addr - - if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { - if fdLimit <= reservedInternalFDNum { - cfg.logger.Fatal( - "file descriptor limit of etcd process is too low; please set higher", - zap.Uint64("limit", fdLimit), - zap.Int("recommended-limit", reservedInternalFDNum), - ) - } - sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) - } - - defer func(u url.URL) { - if err == nil { - return - } - sctx.l.Close() - cfg.logger.Warn( - "closing peer listener", - zap.String("address", u.Host), - zap.Error(err), - ) - }(u) - for k := range cfg.UserHandlers { - sctx.userHandlers[k] = cfg.UserHandlers[k] - } - sctx.serviceRegister = cfg.ServiceRegister - if cfg.EnablePprof || cfg.LogLevel == "debug" { - sctx.registerPprof() - } - if cfg.LogLevel == "debug" { - sctx.registerTrace() - } - sctxs[addr] = sctx - } - return sctxs, nil -} - -func (e *Etcd) serveClients() (err error) { - if !e.cfg.ClientTLSInfo.Empty() { - e.cfg.logger.Info( - "starting with client TLS", - zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)), - zap.Strings("cipher-suites", e.cfg.CipherSuites), - ) - } - - // Start a client server goroutine for each listen address - mux := http.NewServeMux() - etcdhttp.HandleDebug(mux) - etcdhttp.HandleVersion(mux, e.Server) - etcdhttp.HandleMetrics(mux) - etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server) - - var gopts []grpc.ServerOption - if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: e.cfg.GRPCKeepAliveMinTime, - PermitWithoutStream: false, - })) - } - if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && - e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: e.cfg.GRPCKeepAliveInterval, - Timeout: e.cfg.GRPCKeepAliveTimeout, - })) - } - - // start client servers in each goroutine - for _, sctx := range e.sctxs { - go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, gopts...)) - }(sctx) - } - return nil -} - -func (e *Etcd) serveMetrics() (err error) { - if e.cfg.Metrics == "extensive" { - grpc_prometheus.EnableHandlingTimeHistogram() - } - - if len(e.cfg.ListenMetricsUrls) > 0 { - metricsMux := http.NewServeMux() - etcdhttp.HandleMetrics(metricsMux) - etcdhttp.HandleHealth(e.cfg.logger, metricsMux, e.Server) - - for _, murl := range e.cfg.ListenMetricsUrls { - tlsInfo := &e.cfg.ClientTLSInfo - if murl.Scheme == "http" { - tlsInfo = nil - } - ml, err := transport.NewListenerWithOpts(murl.Host, murl.Scheme, - transport.WithTLSInfo(tlsInfo), - transport.WithSocketOpts(&e.cfg.SocketOpts), - ) - if err != nil { - return err - } - e.metricsListeners = append(e.metricsListeners, ml) - go func(u url.URL, ln net.Listener) { - e.cfg.logger.Info( - "serving metrics", - zap.String("address", u.String()), - ) - e.errHandler(http.Serve(ln, metricsMux)) - }(murl, ml) - } - } - return nil -} - -func (e *Etcd) errHandler(err error) { - if err != nil { - e.GetLogger().Error("setting up serving from embedded etcd failed.", zap.Error(err)) - } - select { - case <-e.stopc: - return - default: - } - select { - case <-e.stopc: - case e.errc <- err: - } -} - -// GetLogger returns the logger. -func (e *Etcd) GetLogger() *zap.Logger { - e.cfg.loggerMu.RLock() - l := e.cfg.logger - e.cfg.loggerMu.RUnlock() - return l -} - -func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) { - h, err := strconv.Atoi(retention) - if err == nil && h >= 0 { - switch mode { - case CompactorModeRevision: - ret = time.Duration(int64(h)) - case CompactorModePeriodic: - ret = time.Duration(int64(h)) * time.Hour - } - } else { - // periodic compaction - ret, err = time.ParseDuration(retention) - if err != nil { - return 0, fmt.Errorf("error parsing CompactionRetention: %v", err) - } - } - return ret, nil -} diff --git a/server/embed/serve.go b/server/embed/serve.go deleted file mode 100644 index 7fff618a687..00000000000 --- a/server/embed/serve.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "fmt" - "io" - defaultLog "log" - "math" - "net" - "net/http" - "strings" - "time" - - etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/pkg/v3/debugutil" - "go.etcd.io/etcd/pkg/v3/httputil" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - v3electiongw "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - v3lockgw "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - - gw "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/soheilhy/cmux" - "github.com/tmc/grpc-websocket-proxy/wsproxy" - "go.uber.org/zap" - "golang.org/x/net/http2" - "golang.org/x/net/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -type serveCtx struct { - lg *zap.Logger - l net.Listener - addr string - network string - secure bool - insecure bool - - ctx context.Context - cancel context.CancelFunc - - userHandlers map[string]http.Handler - serviceRegister func(*grpc.Server) - serversC chan *servers -} - -type servers struct { - secure bool - grpc *grpc.Server - http *http.Server -} - -func newServeCtx(lg *zap.Logger) *serveCtx { - ctx, cancel := context.WithCancel(context.Background()) - if lg == nil { - lg = zap.NewNop() - } - return &serveCtx{ - lg: lg, - ctx: ctx, - cancel: cancel, - userHandlers: make(map[string]http.Handler), - serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true - } -} - -// serve accepts incoming connections on the listener l, -// creating a new service goroutine for each. The service goroutines -// read requests and then call handler to reply to them. -func (sctx *serveCtx) serve( - s *etcdserver.EtcdServer, - tlsinfo *transport.TLSInfo, - handler http.Handler, - errHandler func(error), - gopts ...grpc.ServerOption) (err error) { - logger := defaultLog.New(io.Discard, "etcdhttp", 0) - - // When the quorum isn't satisfied, then etcd server will be blocked - // on <-s.ReadyNotify(). Set a timeout here so that the etcd server - // can continue to serve serializable read request. - select { - case <-time.After(s.Cfg.WaitClusterReadyTimeout): - sctx.lg.Warn("timed out waiting for the ready notification") - case <-s.ReadyNotify(): - } - - sctx.lg.Info("ready to serve client requests") - - m := cmux.New(sctx.l) - v3c := v3client.New(s) - servElection := v3election.NewElectionServer(v3c) - servLock := v3lock.NewLockServer(v3c) - - var gs *grpc.Server - defer func() { - if err != nil && gs != nil { - sctx.lg.Warn("stopping grpc server due to error", zap.Error(err)) - gs.Stop() - sctx.lg.Warn("stopped grpc server due to error", zap.Error(err)) - } - }() - - // Make sure serversC is closed even if we prematurely exit the function. - defer close(sctx.serversC) - - if sctx.insecure { - gs = v3rpc.Server(s, nil, nil, gopts...) - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - grpcl := m.Match(cmux.HTTP2()) - go func() { errHandler(gs.Serve(grpcl)) }() - - var gwmux *gw.ServeMux - if s.Cfg.EnableGRPCGateway { - gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}) - if err != nil { - sctx.lg.Error("registerGateway failed", zap.Error(err)) - return err - } - } - - httpmux := sctx.createMux(gwmux, handler) - - srvhttp := &http.Server{ - Handler: createAccessController(sctx.lg, s, httpmux), - ErrorLog: logger, // do not log user error - } - if err := configureHttpServer(srvhttp, s.Cfg); err != nil { - sctx.lg.Error("Configure http server failed", zap.Error(err)) - return err - } - httpl := m.Match(cmux.HTTP1()) - go func() { errHandler(srvhttp.Serve(httpl)) }() - - sctx.serversC <- &servers{grpc: gs, http: srvhttp} - sctx.lg.Info( - "serving client traffic insecurely; this is strongly discouraged!", - zap.String("address", sctx.l.Addr().String()), - ) - } - - if sctx.secure { - tlscfg, tlsErr := tlsinfo.ServerConfig() - if tlsErr != nil { - return tlsErr - } - gs = v3rpc.Server(s, tlscfg, nil, gopts...) - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - handler = grpcHandlerFunc(gs, handler) - - var gwmux *gw.ServeMux - if s.Cfg.EnableGRPCGateway { - dtls := tlscfg.Clone() - // trust local server - dtls.InsecureSkipVerify = true - bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls}) - opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())} - gwmux, err = sctx.registerGateway(opts) - if err != nil { - return err - } - } - - var tlsl net.Listener - tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) - if err != nil { - return err - } - // TODO: add debug flag; enable logging when debug flag is set - httpmux := sctx.createMux(gwmux, handler) - - srv := &http.Server{ - Handler: createAccessController(sctx.lg, s, httpmux), - TLSConfig: tlscfg, - ErrorLog: logger, // do not log user error - } - if err := configureHttpServer(srv, s.Cfg); err != nil { - sctx.lg.Error("Configure https server failed", zap.Error(err)) - return err - } - go func() { errHandler(srv.Serve(tlsl)) }() - - sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} - sctx.lg.Info( - "serving client traffic securely", - zap.String("address", sctx.l.Addr().String()), - ) - } - - return m.Serve() -} - -func configureHttpServer(srv *http.Server, cfg config.ServerConfig) error { - // todo (ahrtr): should we support configuring other parameters in the future as well? - return http2.ConfigureServer(srv, &http2.Server{ - MaxConcurrentStreams: cfg.MaxConcurrentStreams, - }) -} - -// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC -// connections or otherHandler otherwise. Given in gRPC docs. -func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { - if otherHandler == nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - grpcServer.ServeHTTP(w, r) - }) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - grpcServer.ServeHTTP(w, r) - } else { - otherHandler.ServeHTTP(w, r) - } - }) -} - -type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error - -func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { - ctx := sctx.ctx - - addr := sctx.addr - if network := sctx.network; network == "unix" { - // explicitly define unix network for gRPC socket support - addr = fmt.Sprintf("%s:%s", network, addr) - } - - opts = append(opts, grpc.WithDefaultCallOptions([]grpc.CallOption{ - grpc.MaxCallRecvMsgSize(math.MaxInt32), - }...)) - - conn, err := grpc.DialContext(ctx, addr, opts...) - if err != nil { - sctx.lg.Error("registerGateway failed to dial", zap.String("addr", addr), zap.Error(err)) - return nil, err - } - gwmux := gw.NewServeMux() - - handlers := []registerHandlerFunc{ - etcdservergw.RegisterKVHandler, - etcdservergw.RegisterWatchHandler, - etcdservergw.RegisterLeaseHandler, - etcdservergw.RegisterClusterHandler, - etcdservergw.RegisterMaintenanceHandler, - etcdservergw.RegisterAuthHandler, - v3lockgw.RegisterLockHandler, - v3electiongw.RegisterElectionHandler, - } - for _, h := range handlers { - if err := h(ctx, gwmux, conn); err != nil { - return nil, err - } - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - sctx.lg.Warn( - "failed to close connection", - zap.String("address", sctx.l.Addr().String()), - zap.Error(cerr), - ) - } - }() - - return gwmux, nil -} - -func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { - httpmux := http.NewServeMux() - for path, h := range sctx.userHandlers { - httpmux.Handle(path, h) - } - - if gwmux != nil { - httpmux.Handle( - "/v3/", - wsproxy.WebsocketProxy( - gwmux, - wsproxy.WithRequestMutator( - // Default to the POST method for streams - func(_ *http.Request, outgoing *http.Request) *http.Request { - outgoing.Method = "POST" - return outgoing - }, - ), - wsproxy.WithMaxRespBodyBufferSize(0x7fffffff), - ), - ) - } - if handler != nil { - httpmux.Handle("/", handler) - } - return httpmux -} - -// createAccessController wraps HTTP multiplexer: -// - mutate gRPC gateway request paths -// - check hostname whitelist -// client HTTP requests goes here first -func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler { - if lg == nil { - lg = zap.NewNop() - } - return &accessController{lg: lg, s: s, mux: mux} -} - -type accessController struct { - lg *zap.Logger - s *etcdserver.EtcdServer - mux *http.ServeMux -} - -func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - if req == nil { - http.Error(rw, "Request is nil", http.StatusBadRequest) - return - } - // redirect for backward compatibilities - if req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") { - req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1) - } - - if req.TLS == nil { // check origin if client connection is not secure - host := httputil.GetHostname(req) - if !ac.s.AccessController.IsHostWhitelisted(host) { - ac.lg.Warn( - "rejecting HTTP request to prevent DNS rebinding attacks", - zap.String("host", host), - ) - http.Error(rw, errCVE20185702(host), http.StatusMisdirectedRequest) - return - } - } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway && - ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") { - for _, chains := range req.TLS.VerifiedChains { - if len(chains) < 1 { - continue - } - if len(chains[0].Subject.CommonName) != 0 { - http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", http.StatusBadRequest) - return - } - } - } - - // Write CORS header. - if ac.s.AccessController.OriginAllowed("*") { - addCORSHeader(rw, "*") - } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) { - addCORSHeader(rw, origin) - } - - if req.Method == "OPTIONS" { - rw.WriteHeader(http.StatusOK) - return - } - - ac.mux.ServeHTTP(rw, req) -} - -// addCORSHeader adds the correct cors headers given an origin -func addCORSHeader(w http.ResponseWriter, origin string) { - w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") - w.Header().Add("Access-Control-Allow-Origin", origin) - w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") -} - -// https://github.com/transmission/transmission/pull/468 -func errCVE20185702(host string) string { - return fmt.Sprintf(` -etcd received your request, but the Host header was unrecognized. - -To fix this, choose one of the following options: -- Enable TLS, then any HTTPS request will be allowed. -- Add the hostname you want to use to the whitelist in settings. - - e.g. etcd --host-whitelist %q - -This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702). -`, host) -} - -// WrapCORS wraps existing handler with CORS. -// TODO: deprecate this after v2 proxy deprecate -func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler { - return &corsHandler{ - ac: &etcdserver.AccessController{CORS: cors}, - h: h, - } -} - -type corsHandler struct { - ac *etcdserver.AccessController - h http.Handler -} - -func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - if ch.ac.OriginAllowed("*") { - addCORSHeader(rw, "*") - } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) { - addCORSHeader(rw, origin) - } - - if req.Method == "OPTIONS" { - rw.WriteHeader(http.StatusOK) - return - } - - ch.h.ServeHTTP(rw, req) -} - -func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { - if sctx.userHandlers[s] != nil { - sctx.lg.Warn("path is already registered by user handler", zap.String("path", s)) - return - } - sctx.userHandlers[s] = h -} - -func (sctx *serveCtx) registerPprof() { - for p, h := range debugutil.PProfHandlers() { - sctx.registerUserHandler(p, h) - } -} - -func (sctx *serveCtx) registerTrace() { - reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } - sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) - evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } - sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) -} diff --git a/server/embed/serve_test.go b/server/embed/serve_test.go deleted file mode 100644 index 1d3cdec9362..00000000000 --- a/server/embed/serve_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "fmt" - "net/url" - "os" - "testing" - - "go.etcd.io/etcd/server/v3/auth" -) - -// TestStartEtcdWrongToken ensures that StartEtcd with wrong configs returns with error. -func TestStartEtcdWrongToken(t *testing.T) { - tdir := t.TempDir() - - cfg := NewConfig() - - // Similar to function in integration/embed/embed_test.go for setting up Config. - urls := newEmbedURLs(2) - curls := []url.URL{urls[0]} - purls := []url.URL{urls[1]} - cfg.LCUrls, cfg.ACUrls = curls, curls - cfg.LPUrls, cfg.APUrls = purls, purls - cfg.InitialCluster = "" - for i := range purls { - cfg.InitialCluster += ",default=" + purls[i].String() - } - cfg.InitialCluster = cfg.InitialCluster[1:] - cfg.Dir = tdir - cfg.AuthToken = "wrong-token" - - if _, err := StartEtcd(cfg); err != auth.ErrInvalidAuthOpts { - t.Fatalf("expected %v, got %v", auth.ErrInvalidAuthOpts, err) - } -} - -func newEmbedURLs(n int) (urls []url.URL) { - scheme := "unix" - for i := 0; i < n; i++ { - u, _ := url.Parse(fmt.Sprintf("%s://localhost:%d%06d", scheme, os.Getpid(), i)) - urls = append(urls, *u) - } - return urls -} diff --git a/server/embed/util.go b/server/embed/util.go deleted file mode 100644 index 269fbc80b20..00000000000 --- a/server/embed/util.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "path/filepath" - - "go.etcd.io/etcd/server/v3/storage/wal" -) - -func isMemberInitialized(cfg *Config) bool { - waldir := cfg.WalDir - if waldir == "" { - waldir = filepath.Join(cfg.Dir, "member", "wal") - } - return wal.Exist(waldir) -} diff --git a/server/etcdmain/config.go b/server/etcdmain/config.go deleted file mode 100644 index 84763dd9a6d..00000000000 --- a/server/etcdmain/config.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Every change should be reflected on help.go as well. - -package etcdmain - -import ( - "errors" - "flag" - "fmt" - "os" - "runtime" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - "go.etcd.io/etcd/pkg/v3/flags" - cconfig "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - - "go.uber.org/zap" -) - -var ( - fallbackFlagExit = "exit" - fallbackFlagProxy = "proxy" - - ignored = []string{ - "cluster-active-size", - "cluster-remove-delay", - "cluster-sync-interval", - "config", - "force", - "max-result-buffer", - "max-retry-attempts", - "peer-heartbeat-interval", - "peer-election-timeout", - "retry-interval", - "snapshot", - "v", - "vv", - // for coverage testing - "test.coverprofile", - "test.outputdir", - } -) - -// config holds the config for a command line invocation of etcd -type config struct { - ec embed.Config - cf configFlags - configFile string - printVersion bool - ignored []string -} - -// configFlags has the set of flags used for command line parsing a Config -type configFlags struct { - flagSet *flag.FlagSet - clusterState *flags.SelectiveStringValue - fallback *flags.SelectiveStringValue - v2deprecation *flags.SelectiveStringsValue -} - -func newConfig() *config { - cfg := &config{ - ec: *embed.NewConfig(), - ignored: ignored, - } - cfg.cf = configFlags{ - flagSet: flag.NewFlagSet("etcd", flag.ContinueOnError), - clusterState: flags.NewSelectiveStringValue( - embed.ClusterStateFlagNew, - embed.ClusterStateFlagExisting, - ), - fallback: flags.NewSelectiveStringValue( - fallbackFlagExit, - fallbackFlagProxy, - ), - v2deprecation: flags.NewSelectiveStringsValue( - string(cconfig.V2_DEPR_1_WRITE_ONLY), - string(cconfig.V2_DEPR_1_WRITE_ONLY_DROP), - string(cconfig.V2_DEPR_2_GONE)), - } - - fs := cfg.cf.flagSet - fs.Usage = func() { - fmt.Fprintln(os.Stderr, usageline) - } - - fs.StringVar(&cfg.configFile, "config-file", "", "Path to the server configuration file. Note that if a configuration file is provided, other command line flags and environment variables will be ignored.") - - // member - fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "Path to the data directory.") - fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "Path to the dedicated wal directory.") - fs.Var( - flags.NewUniqueURLsWithExceptions(embed.DefaultListenPeerURLs, ""), - "listen-peer-urls", - "List of URLs to listen on for peer traffic.", - ) - fs.Var( - flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls", - "List of URLs to listen on for client traffic.", - ) - fs.Var( - flags.NewUniqueURLsWithExceptions("", ""), - "listen-metrics-urls", - "List of URLs to listen on for the metrics and health endpoints.", - ) - fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).") - fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).") - fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "Human-readable name for this member.") - fs.Uint64Var(&cfg.ec.SnapshotCount, "snapshot-count", cfg.ec.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk.") - fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "Time (in milliseconds) of a heartbeat interval.") - fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "Time (in milliseconds) for an election to timeout.") - fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.") - fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.") - fs.StringVar(&cfg.ec.BackendFreelistType, "backend-bbolt-freelist-type", cfg.ec.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)") - fs.DurationVar(&cfg.ec.BackendBatchInterval, "backend-batch-interval", cfg.ec.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.") - fs.IntVar(&cfg.ec.BackendBatchLimit, "backend-batch-limit", cfg.ec.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.") - fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "Maximum number of operations permitted in a transaction.") - fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "Maximum client request size in bytes the server will accept.") - fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.") - fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).") - fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") - fs.BoolVar(&cfg.ec.SocketOpts.ReusePort, "socket-reuse-port", cfg.ec.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.") - fs.BoolVar(&cfg.ec.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.ec.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.") - - fs.Var(flags.NewUint32Value(cfg.ec.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.") - - // raft connection timeouts - fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection") - fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection") - - // clustering - fs.Var( - flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""), - "initial-advertise-peer-urls", - "List of this member's peer URLs to advertise to the rest of the cluster.", - ) - fs.Var( - flags.NewUniqueURLsWithExceptions(embed.DefaultAdvertiseClientURLs, ""), - "advertise-client-urls", - "List of this member's client URLs to advertise to the public.", - ) - - fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.") - fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf("Valid values include %q", cfg.cf.fallback.Valids())) - - fs.Var( - flags.NewUniqueStringsValue(""), - "discovery-endpoints", - "V3 discovery: List of gRPC endpoints of the discovery service.", - ) - fs.StringVar(&cfg.ec.DiscoveryCfg.Token, "discovery-token", "", "V3 discovery: discovery token for the etcd cluster to be bootstrapped.") - fs.DurationVar(&cfg.ec.DiscoveryCfg.DialTimeout, "discovery-dial-timeout", cfg.ec.DiscoveryCfg.DialTimeout, "V3 discovery: dial timeout for client connections.") - fs.DurationVar(&cfg.ec.DiscoveryCfg.RequestTimeout, "discovery-request-timeout", cfg.ec.DiscoveryCfg.RequestTimeout, "V3 discovery: timeout for discovery requests (excluding dial timeout).") - fs.DurationVar(&cfg.ec.DiscoveryCfg.KeepAliveTime, "discovery-keepalive-time", cfg.ec.DiscoveryCfg.KeepAliveTime, "V3 discovery: keepalive time for client connections.") - fs.DurationVar(&cfg.ec.DiscoveryCfg.KeepAliveTimeout, "discovery-keepalive-timeout", cfg.ec.DiscoveryCfg.KeepAliveTimeout, "V3 discovery: keepalive timeout for client connections.") - fs.BoolVar(&cfg.ec.DiscoveryCfg.Secure.InsecureTransport, "discovery-insecure-transport", true, "V3 discovery: disable transport security for client connections.") - fs.BoolVar(&cfg.ec.DiscoveryCfg.Secure.InsecureSkipVerify, "discovery-insecure-skip-tls-verify", false, "V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).") - fs.StringVar(&cfg.ec.DiscoveryCfg.Secure.Cert, "discovery-cert", "", "V3 discovery: identify secure client using this TLS certificate file.") - fs.StringVar(&cfg.ec.DiscoveryCfg.Secure.Key, "discovery-key", "", "V3 discovery: identify secure client using this TLS key file.") - fs.StringVar(&cfg.ec.DiscoveryCfg.Secure.Cacert, "discovery-cacert", "", "V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.") - fs.StringVar(&cfg.ec.DiscoveryCfg.Auth.Username, "discovery-user", "", "V3 discovery: username[:password] for authentication (prompt if password is not supplied).") - fs.StringVar(&cfg.ec.DiscoveryCfg.Auth.Password, "discovery-password", "", "V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).") - - fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.") - fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS domain used to bootstrap initial cluster.") - fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "Service name to query when using DNS discovery.") - fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.") - fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.") - fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').") - - fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.") - - fs.BoolVar(&cfg.ec.PreVote, "pre-vote", cfg.ec.PreVote, "Enable to run an additional Raft election phase.") - - fs.Var(cfg.cf.v2deprecation, "v2-deprecation", fmt.Sprintf("v2store deprecation stage: %q. ", cfg.cf.v2deprecation.Valids())) - - // security - fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.") - fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.") - fs.StringVar(&cfg.ec.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.") - fs.StringVar(&cfg.ec.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.") - fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.") - fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.") - fs.StringVar(&cfg.ec.ClientTLSInfo.AllowedHostname, "client-cert-allowed-hostname", "", "Allowed TLS hostname for client cert authentication.") - fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.") - fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates") - fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.") - fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.") - fs.StringVar(&cfg.ec.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.") - fs.StringVar(&cfg.ec.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.") - fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.") - fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.") - fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates") - fs.UintVar(&cfg.ec.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year") - fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.") - fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "Allowed CN for inter peer authentication.") - fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "Allowed TLS hostname for inter peer authentication.") - fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).") - fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.") - fs.StringVar(&cfg.ec.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.") - fs.StringVar(&cfg.ec.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).") - - fs.Var( - flags.NewUniqueURLsWithExceptions("*", "*"), - "cors", - "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)", - ) - fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).") - - // logging - fs.StringVar(&cfg.ec.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.") - fs.Var(flags.NewUniqueStringsValue(embed.DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.") - fs.StringVar(&cfg.ec.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.") - fs.StringVar(&cfg.ec.LogFormat, "log-format", logutil.DefaultLogFormat, "Configures log format. Only supports json, console. Default is 'json'.") - fs.BoolVar(&cfg.ec.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.") - fs.StringVar(&cfg.ec.LogRotationConfigJSON, "log-rotation-config-json", embed.DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)") - - // version - fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.") - - fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.") - fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.") - - // pprof profiler via HTTP - fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"") - - // additional metrics - fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics") - - // experimental distributed tracing - fs.BoolVar(&cfg.ec.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.") - fs.StringVar(&cfg.ec.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", embed.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).") - fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", embed.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.") - fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.") - fs.IntVar(&cfg.ec.ExperimentalDistributedTracingSamplingRatePerMillion, "experimental-distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).") - - // auth - fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "Specify auth token specific options.") - fs.UintVar(&cfg.ec.BcryptCost, "bcrypt-cost", cfg.ec.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.") - fs.UintVar(&cfg.ec.AuthTokenTTL, "auth-token-ttl", cfg.ec.AuthTokenTTL, "The lifetime in seconds of the auth token.") - - // gateway - fs.BoolVar(&cfg.ec.EnableGRPCGateway, "enable-grpc-gateway", cfg.ec.EnableGRPCGateway, "Enable GRPC gateway.") - - // experimental - fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.") - fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.") - fs.BoolVar(&cfg.ec.ExperimentalCompactHashCheckEnabled, "experimental-compact-hash-check-enabled", cfg.ec.ExperimentalCompactHashCheckEnabled, "Enable leader to periodically check followers compaction hashes.") - fs.DurationVar(&cfg.ec.ExperimentalCompactHashCheckTime, "experimental-compact-hash-check-time", cfg.ec.ExperimentalCompactHashCheckTime, "Duration of time between leader checks followers compaction hashes.") - - fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.") - // TODO: delete in v3.7 - fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.") - fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.") - fs.DurationVar(&cfg.ec.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ec.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch.") - fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.") - fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status check.") - fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.") - fs.DurationVar(&cfg.ec.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.ec.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.") - fs.DurationVar(&cfg.ec.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ec.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.") - fs.BoolVar(&cfg.ec.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ec.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.") - fs.BoolVar(&cfg.ec.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.") - fs.UintVar(&cfg.ec.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.") - fs.IntVar(&cfg.ec.ExperimentalMaxLearners, "experimental-max-learners", membership.DefaultMaxLearners, "Sets the maximum number of learners that can be available in the cluster membership.") - fs.DurationVar(&cfg.ec.ExperimentalWaitClusterReadyTimeout, "experimental-wait-cluster-ready-timeout", cfg.ec.ExperimentalWaitClusterReadyTimeout, "Maximum duration to wait for the cluster to be ready.") - fs.Uint64Var(&cfg.ec.SnapshotCatchUpEntries, "experimental-snapshot-catchup-entries", cfg.ec.SnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the the raft storage entries.") - - // unsafe - fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.") - fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.") - - // ignored - for _, f := range cfg.ignored { - fs.Var(&flags.IgnoredFlag{Name: f}, f, "") - } - return cfg -} - -func (cfg *config) parse(arguments []string) error { - perr := cfg.cf.flagSet.Parse(arguments) - switch perr { - case nil: - case flag.ErrHelp: - fmt.Println(flagsline) - os.Exit(0) - default: - os.Exit(2) - } - if len(cfg.cf.flagSet.Args()) != 0 { - return fmt.Errorf("%q is not a valid flag", cfg.cf.flagSet.Arg(0)) - } - - if cfg.printVersion { - fmt.Printf("etcd Version: %s\n", version.Version) - fmt.Printf("Git SHA: %s\n", version.GitSHA) - fmt.Printf("Go Version: %s\n", runtime.Version()) - fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) - os.Exit(0) - } - - var err error - - // This env variable must be parsed separately - // because we need to determine whether to use or - // ignore the env variables based on if the config file is set. - if cfg.configFile == "" { - cfg.configFile = os.Getenv(flags.FlagToEnv("ETCD", "config-file")) - } - - if cfg.configFile != "" { - err = cfg.configFromFile(cfg.configFile) - if lg := cfg.ec.GetLogger(); lg != nil { - lg.Info( - "loaded server configuration, other configuration command line flags and environment variables will be ignored if provided", - zap.String("path", cfg.configFile), - ) - } - } else { - err = cfg.configFromCmdLine() - } - - if cfg.ec.V2Deprecation == "" { - cfg.ec.V2Deprecation = cconfig.V2_DEPR_DEFAULT - } - - cfg.ec.WarningUnaryRequestDuration, perr = cfg.parseWarningUnaryRequestDuration() - if perr != nil { - return perr - } - - // now logger is set up - return err -} - -func (cfg *config) configFromCmdLine() error { - // user-specified logger is not setup yet, use this logger during flag parsing - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) - if err != nil { - return err - } - verKey := "ETCD_VERSION" - if verVal := os.Getenv(verKey); verVal != "" { - // unset to avoid any possible side-effect. - os.Unsetenv(verKey) - - lg.Warn( - "cannot set special environment variable", - zap.String("key", verKey), - zap.String("value", verVal), - ) - } - - err = flags.SetFlagsFromEnv(lg, "ETCD", cfg.cf.flagSet) - if err != nil { - return err - } - - if rafthttp.ConnReadTimeout < rafthttp.DefaultConnReadTimeout { - rafthttp.ConnReadTimeout = rafthttp.DefaultConnReadTimeout - lg.Info(fmt.Sprintf("raft-read-timeout increased to minimum value: %v", rafthttp.DefaultConnReadTimeout)) - } - if rafthttp.ConnWriteTimeout < rafthttp.DefaultConnWriteTimeout { - rafthttp.ConnWriteTimeout = rafthttp.DefaultConnWriteTimeout - lg.Info(fmt.Sprintf("raft-write-timeout increased to minimum value: %v", rafthttp.DefaultConnWriteTimeout)) - } - - cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls") - cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls") - cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls") - cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls") - cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls") - - cfg.ec.DiscoveryCfg.Endpoints = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "discovery-endpoints") - - cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors") - cfg.ec.HostWhitelist = flags.UniqueStringsMapFromFlag(cfg.cf.flagSet, "host-whitelist") - - cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites") - - cfg.ec.MaxConcurrentStreams = flags.Uint32FromFlag(cfg.cf.flagSet, "max-concurrent-streams") - - cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs") - - cfg.ec.ClusterState = cfg.cf.clusterState.String() - - cfg.ec.V2Deprecation = cconfig.V2DeprecationEnum(cfg.cf.v2deprecation.String()) - - // disable default advertise-client-urls if lcurls is set - missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls") - if missingAC { - cfg.ec.ACUrls = nil - } - - // disable default initial-cluster if discovery is set - if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "" || cfg.ec.DNSClusterServiceName != "" || len(cfg.ec.DiscoveryCfg.Endpoints) > 0) && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") { - cfg.ec.InitialCluster = "" - } - - return cfg.validate() -} - -func (cfg *config) configFromFile(path string) error { - eCfg, err := embed.ConfigFromFile(path) - if err != nil { - return err - } - cfg.ec = *eCfg - - return nil -} - -func (cfg *config) validate() error { - if cfg.cf.fallback.String() == fallbackFlagProxy { - return fmt.Errorf("v2 proxy is deprecated, and --discovery-fallback can't be configured as %q", fallbackFlagProxy) - } - return cfg.ec.Validate() -} - -func (cfg *config) parseWarningUnaryRequestDuration() (time.Duration, error) { - if cfg.ec.ExperimentalWarningUnaryRequestDuration != 0 && cfg.ec.WarningUnaryRequestDuration != 0 { - return 0, errors.New( - "both --experimental-warning-unary-request-duration and --warning-unary-request-duration flags are set. " + - "Use only --warning-unary-request-duration") - } - - if cfg.ec.WarningUnaryRequestDuration != 0 { - return cfg.ec.WarningUnaryRequestDuration, nil - } - - if cfg.ec.ExperimentalWarningUnaryRequestDuration != 0 { - cfg.ec.GetLogger().Warn( - "--experimental-warning-unary-request-duration is deprecated, and will be decommissioned in v3.7. " + - "Use --warning-unary-request-duration instead.") - return cfg.ec.ExperimentalWarningUnaryRequestDuration, nil - } - - return embed.DefaultWarningUnaryRequestDuration, nil -} diff --git a/server/etcdmain/config_test.go b/server/etcdmain/config_test.go deleted file mode 100644 index 1352a9ea5d5..00000000000 --- a/server/etcdmain/config_test.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "fmt" - "net/url" - "os" - "reflect" - "strings" - "testing" - - "sigs.k8s.io/yaml" - - "go.etcd.io/etcd/server/v3/embed" -) - -func TestConfigParsingMemberFlags(t *testing.T) { - args := []string{ - "-data-dir=testdir", - "-name=testname", - "-max-wals=10", - "-max-snapshots=10", - "-snapshot-count=10", - "-experimental-snapshot-catchup-entries=1000", - "-listen-peer-urls=http://localhost:8000,https://localhost:8001", - "-listen-client-urls=http://localhost:7000,https://localhost:7001", - // it should be set if -listen-client-urls is set - "-advertise-client-urls=http://localhost:7000,https://localhost:7001", - } - - cfg := newConfig() - err := cfg.parse(args) - if err != nil { - t.Fatal(err) - } - - validateMemberFlags(t, cfg) -} - -func TestConfigFileMemberFields(t *testing.T) { - yc := struct { - Dir string `json:"data-dir"` - MaxSnapFiles uint `json:"max-snapshots"` - MaxWalFiles uint `json:"max-wals"` - Name string `json:"name"` - SnapshotCount uint64 `json:"snapshot-count"` - SnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"` - LPUrls string `json:"listen-peer-urls"` - LCUrls string `json:"listen-client-urls"` - AcurlsCfgFile string `json:"advertise-client-urls"` - }{ - "testdir", - 10, - 10, - "testname", - 10, - 1000, - "http://localhost:8000,https://localhost:8001", - "http://localhost:7000,https://localhost:7001", - "http://localhost:7000,https://localhost:7001", - } - - b, err := yaml.Marshal(&yc) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())} - - cfg := newConfig() - if err = cfg.parse(args); err != nil { - t.Fatal(err) - } - - validateMemberFlags(t, cfg) -} - -func TestConfigParsingClusteringFlags(t *testing.T) { - args := []string{ - "-initial-cluster=0=http://localhost:8000", - "-initial-cluster-state=existing", - "-initial-cluster-token=etcdtest", - "-initial-advertise-peer-urls=http://localhost:8000,https://localhost:8001", - "-advertise-client-urls=http://localhost:7000,https://localhost:7001", - } - - cfg := newConfig() - if err := cfg.parse(args); err != nil { - t.Fatal(err) - } - - validateClusteringFlags(t, cfg) -} - -func TestConfigFileClusteringFields(t *testing.T) { - yc := struct { - InitialCluster string `json:"initial-cluster"` - ClusterState string `json:"initial-cluster-state"` - InitialClusterToken string `json:"initial-cluster-token"` - Apurls string `json:"initial-advertise-peer-urls"` - Acurls string `json:"advertise-client-urls"` - }{ - "0=http://localhost:8000", - "existing", - "etcdtest", - "http://localhost:8000,https://localhost:8001", - "http://localhost:7000,https://localhost:7001", - } - - b, err := yaml.Marshal(&yc) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())} - cfg := newConfig() - err = cfg.parse(args) - if err != nil { - t.Fatal(err) - } - - validateClusteringFlags(t, cfg) -} - -func TestConfigFileClusteringFlags(t *testing.T) { - tests := []struct { - Name string `json:"name"` - InitialCluster string `json:"initial-cluster"` - DNSCluster string `json:"discovery-srv"` - Durl string `json:"discovery"` - }{ - // Use default name and generate a default initial-cluster - {}, - { - Name: "non-default", - }, - { - InitialCluster: "0=localhost:8000", - }, - { - Name: "non-default", - InitialCluster: "0=localhost:8000", - }, - { - DNSCluster: "example.com", - }, - { - Name: "non-default", - DNSCluster: "example.com", - }, - { - Durl: "http://example.com/abc", - }, - { - Name: "non-default", - Durl: "http://example.com/abc", - }, - } - - for i, tt := range tests { - b, err := yaml.Marshal(&tt) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())} - - cfg := newConfig() - if err := cfg.parse(args); err != nil { - t.Errorf("%d: err = %v", i, err) - } - } -} - -func TestConfigParsingConflictClusteringFlags(t *testing.T) { - conflictArgs := [][]string{ - { - "-initial-cluster=0=localhost:8000", - "-discovery=http://example.com/abc", - }, - { - "-discovery-srv=example.com", - "-discovery=http://example.com/abc", - }, - { - "-initial-cluster=0=localhost:8000", - "-discovery-srv=example.com", - }, - { - "-initial-cluster=0=localhost:8000", - "-discovery=http://example.com/abc", - "-discovery-srv=example.com", - }, - } - - for i, tt := range conflictArgs { - cfg := newConfig() - if err := cfg.parse(tt); err != embed.ErrConflictBootstrapFlags { - t.Errorf("%d: err = %v, want %v", i, err, embed.ErrConflictBootstrapFlags) - } - } -} - -func TestConfigFileConflictClusteringFlags(t *testing.T) { - tests := []struct { - InitialCluster string `json:"initial-cluster"` - DNSCluster string `json:"discovery-srv"` - Durl string `json:"discovery"` - }{ - { - InitialCluster: "0=localhost:8000", - Durl: "http://example.com/abc", - }, - { - DNSCluster: "example.com", - Durl: "http://example.com/abc", - }, - { - InitialCluster: "0=localhost:8000", - DNSCluster: "example.com", - }, - { - InitialCluster: "0=localhost:8000", - Durl: "http://example.com/abc", - DNSCluster: "example.com", - }, - } - - for i, tt := range tests { - b, err := yaml.Marshal(&tt) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())} - - cfg := newConfig() - if err := cfg.parse(args); err != embed.ErrConflictBootstrapFlags { - t.Errorf("%d: err = %v, want %v", i, err, embed.ErrConflictBootstrapFlags) - } - } -} - -func TestConfigParsingMissedAdvertiseClientURLsFlag(t *testing.T) { - tests := []struct { - args []string - werr error - }{ - { - []string{ - "-initial-cluster=infra1=http://127.0.0.1:2380", - "-listen-client-urls=http://127.0.0.1:2379", - }, - embed.ErrUnsetAdvertiseClientURLsFlag, - }, - { - []string{ - "-discovery-srv=example.com", - "-listen-client-urls=http://127.0.0.1:2379", - }, - embed.ErrUnsetAdvertiseClientURLsFlag, - }, - { - []string{ - "-discovery=http://example.com/abc", - "-discovery-fallback=exit", - "-listen-client-urls=http://127.0.0.1:2379", - }, - embed.ErrUnsetAdvertiseClientURLsFlag, - }, - { - []string{ - "-listen-client-urls=http://127.0.0.1:2379", - }, - embed.ErrUnsetAdvertiseClientURLsFlag, - }, - } - - for i, tt := range tests { - cfg := newConfig() - if err := cfg.parse(tt.args); err != tt.werr { - t.Errorf("%d: err = %v, want %v", i, err, tt.werr) - } - } -} - -func TestConfigIsNewCluster(t *testing.T) { - tests := []struct { - state string - wIsNew bool - }{ - {embed.ClusterStateFlagExisting, false}, - {embed.ClusterStateFlagNew, true}, - } - for i, tt := range tests { - cfg := newConfig() - args := []string{"--initial-cluster-state", tests[i].state} - if err := cfg.parse(args); err != nil { - t.Fatalf("#%d: unexpected clusterState.Set error: %v", i, err) - } - if g := cfg.ec.IsNewCluster(); g != tt.wIsNew { - t.Errorf("#%d: isNewCluster = %v, want %v", i, g, tt.wIsNew) - } - } -} - -func TestConfigFileElectionTimeout(t *testing.T) { - tests := []struct { - TickMs uint `json:"heartbeat-interval"` - ElectionMs uint `json:"election-timeout"` - errStr string - }{ - { - ElectionMs: 1000, - TickMs: 800, - errStr: "should be at least as 5 times as", - }, - { - ElectionMs: 60000, - TickMs: 10000, - errStr: "is too long, and should be set less than", - }, - { - ElectionMs: 100, - TickMs: 0, - errStr: "--heartbeat-interval must be >0 (set to 0ms)", - }, - { - ElectionMs: 0, - TickMs: 100, - errStr: "--election-timeout must be >0 (set to 0ms)", - }, - } - - for i, tt := range tests { - b, err := yaml.Marshal(&tt) - if err != nil { - t.Fatal(err) - } - - tmpfile := mustCreateCfgFile(t, b) - defer os.Remove(tmpfile.Name()) - - args := []string{fmt.Sprintf("--config-file=%s", tmpfile.Name())} - - cfg := newConfig() - if err := cfg.parse(args); err == nil || !strings.Contains(err.Error(), tt.errStr) { - t.Errorf("%d: Wrong err = %v", i, err) - } - } -} - -func mustCreateCfgFile(t *testing.T, b []byte) *os.File { - tmpfile, err := os.CreateTemp("", "servercfg") - if err != nil { - t.Fatal(err) - } - - _, err = tmpfile.Write(b) - if err != nil { - t.Fatal(err) - } - err = tmpfile.Close() - if err != nil { - t.Fatal(err) - } - - return tmpfile -} - -func validateMemberFlags(t *testing.T, cfg *config) { - wcfg := &embed.Config{ - Dir: "testdir", - LPUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}, - LCUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}, - MaxSnapFiles: 10, - MaxWalFiles: 10, - Name: "testname", - SnapshotCount: 10, - SnapshotCatchUpEntries: 1000, - } - - if cfg.ec.Dir != wcfg.Dir { - t.Errorf("dir = %v, want %v", cfg.ec.Dir, wcfg.Dir) - } - if cfg.ec.MaxSnapFiles != wcfg.MaxSnapFiles { - t.Errorf("maxsnap = %v, want %v", cfg.ec.MaxSnapFiles, wcfg.MaxSnapFiles) - } - if cfg.ec.MaxWalFiles != wcfg.MaxWalFiles { - t.Errorf("maxwal = %v, want %v", cfg.ec.MaxWalFiles, wcfg.MaxWalFiles) - } - if cfg.ec.Name != wcfg.Name { - t.Errorf("name = %v, want %v", cfg.ec.Name, wcfg.Name) - } - if cfg.ec.SnapshotCount != wcfg.SnapshotCount { - t.Errorf("snapcount = %v, want %v", cfg.ec.SnapshotCount, wcfg.SnapshotCount) - } - if cfg.ec.SnapshotCatchUpEntries != wcfg.SnapshotCatchUpEntries { - t.Errorf("snapshot catch up entries = %v, want %v", cfg.ec.SnapshotCatchUpEntries, wcfg.SnapshotCatchUpEntries) - } - if !reflect.DeepEqual(cfg.ec.LPUrls, wcfg.LPUrls) { - t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.LPUrls, wcfg.LPUrls) - } - if !reflect.DeepEqual(cfg.ec.LCUrls, wcfg.LCUrls) { - t.Errorf("listen-client-urls = %v, want %v", cfg.ec.LCUrls, wcfg.LCUrls) - } -} - -func validateClusteringFlags(t *testing.T, cfg *config) { - wcfg := newConfig() - wcfg.ec.APUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}} - wcfg.ec.ACUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}} - wcfg.ec.ClusterState = embed.ClusterStateFlagExisting - wcfg.ec.InitialCluster = "0=http://localhost:8000" - wcfg.ec.InitialClusterToken = "etcdtest" - - if cfg.ec.ClusterState != wcfg.ec.ClusterState { - t.Errorf("clusterState = %v, want %v", cfg.ec.ClusterState, wcfg.ec.ClusterState) - } - if cfg.ec.InitialCluster != wcfg.ec.InitialCluster { - t.Errorf("initialCluster = %v, want %v", cfg.ec.InitialCluster, wcfg.ec.InitialCluster) - } - if cfg.ec.InitialClusterToken != wcfg.ec.InitialClusterToken { - t.Errorf("initialClusterToken = %v, want %v", cfg.ec.InitialClusterToken, wcfg.ec.InitialClusterToken) - } - if !reflect.DeepEqual(cfg.ec.APUrls, wcfg.ec.APUrls) { - t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.APUrls, wcfg.ec.APUrls) - } - if !reflect.DeepEqual(cfg.ec.ACUrls, wcfg.ec.ACUrls) { - t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.ACUrls, wcfg.ec.ACUrls) - } -} diff --git a/server/etcdmain/etcd.go b/server/etcdmain/etcd.go deleted file mode 100644 index dd9958ef6e5..00000000000 --- a/server/etcdmain/etcd.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "fmt" - "os" - "runtime" - "strings" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/osutil" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" - "go.etcd.io/etcd/server/v3/etcdserver/errors" -) - -type dirType string - -var ( - dirMember = dirType("member") - dirProxy = dirType("proxy") - dirEmpty = dirType("empty") -) - -func startEtcdOrProxyV2(args []string) { - grpc.EnableTracing = false - - cfg := newConfig() - defaultInitialCluster := cfg.ec.InitialCluster - - err := cfg.parse(args[1:]) - lg := cfg.ec.GetLogger() - // If we failed to parse the whole configuration, print the error using - // preferably the resolved logger from the config, - // but if does not exists, create a new temporary logger. - if lg == nil { - var zapError error - // use this logger - lg, zapError = logutil.CreateDefaultZapLogger(zap.InfoLevel) - if zapError != nil { - fmt.Printf("error creating zap logger %v", zapError) - os.Exit(1) - } - } - lg.Info("Running: ", zap.Strings("args", args)) - if err != nil { - lg.Warn("failed to verify flags", zap.Error(err)) - switch err { - case embed.ErrUnsetAdvertiseClientURLsFlag: - lg.Warn("advertise client URLs are not set", zap.Error(err)) - } - os.Exit(1) - } - - cfg.ec.SetupGlobalLoggers() - - defer func() { - logger := cfg.ec.GetLogger() - if logger != nil { - logger.Sync() - } - }() - - defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster) - if defaultHost != "" { - lg.Info( - "detected default host for advertise", - zap.String("host", defaultHost), - ) - } - if dhErr != nil { - lg.Info("failed to detect default host", zap.Error(dhErr)) - } - - if cfg.ec.Dir == "" { - cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name) - lg.Warn( - "'data-dir' was empty; using default", - zap.String("data-dir", cfg.ec.Dir), - ) - } - - var stopped <-chan struct{} - var errc <-chan error - - which := identifyDataDirOrDie(cfg.ec.GetLogger(), cfg.ec.Dir) - if which != dirEmpty { - lg.Info( - "server has already been initialized", - zap.String("data-dir", cfg.ec.Dir), - zap.String("dir-type", string(which)), - ) - switch which { - case dirMember: - stopped, errc, err = startEtcd(&cfg.ec) - case dirProxy: - lg.Panic("v2 http proxy has already been deprecated in 3.6", zap.String("dir-type", string(which))) - default: - lg.Panic( - "unknown directory type", - zap.String("dir-type", string(which)), - ) - } - } else { - lg.Info( - "Initialize and start etcd server", - zap.String("data-dir", cfg.ec.Dir), - zap.String("dir-type", string(which)), - ) - stopped, errc, err = startEtcd(&cfg.ec) - } - - if err != nil { - if derr, ok := err.(*errors.DiscoveryError); ok { - switch derr.Err { - case v2discovery.ErrDuplicateID: - lg.Warn( - "member has been registered with discovery service", - zap.String("name", cfg.ec.Name), - zap.String("discovery-token", cfg.ec.Durl), - zap.Error(derr.Err), - ) - lg.Warn( - "but could not find valid cluster configuration", - zap.String("data-dir", cfg.ec.Dir), - ) - lg.Warn("check data dir if previous bootstrap succeeded") - lg.Warn("or use a new discovery token if previous bootstrap failed") - - case v2discovery.ErrDuplicateName: - lg.Warn( - "member with duplicated name has already been registered", - zap.String("discovery-token", cfg.ec.Durl), - zap.Error(derr.Err), - ) - lg.Warn("cURL the discovery token URL for details") - lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster") - - default: - lg.Warn( - "failed to bootstrap; discovery token was already used", - zap.String("discovery-token", cfg.ec.Durl), - zap.Error(err), - ) - lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster") - } - os.Exit(1) - } - - if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") { - lg.Warn("failed to start", zap.Error(err)) - if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) { - lg.Warn("forgot to set --initial-cluster?") - } - if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs { - lg.Warn("forgot to set --initial-advertise-peer-urls?") - } - if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 && len(cfg.ec.DiscoveryCfg.Endpoints) == 0 { - lg.Warn("V2 discovery settings (i.e., --discovery) or v3 discovery settings (i.e., --discovery-token, --discovery-endpoints) are not set") - } - os.Exit(1) - } - lg.Fatal("discovery failed", zap.Error(err)) - } - - osutil.HandleInterrupts(lg) - - // At this point, the initialization of etcd is done. - // The listeners are listening on the TCP ports and ready - // for accepting connections. The etcd instance should be - // joined with the cluster and ready to serve incoming - // connections. - notifySystemd(lg) - - select { - case lerr := <-errc: - // fatal out on listener errors - lg.Fatal("listener failed", zap.Error(lerr)) - case <-stopped: - } - - osutil.Exit(0) -} - -// startEtcd runs StartEtcd in addition to hooks needed for standalone etcd. -func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) { - e, err := embed.StartEtcd(cfg) - if err != nil { - return nil, nil, err - } - osutil.RegisterInterruptHandler(e.Close) - select { - case <-e.Server.ReadyNotify(): // wait for e.Server to join the cluster - case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped' - case <-time.After(cfg.ExperimentalWaitClusterReadyTimeout): - e.GetLogger().Warn("startEtcd: timed out waiting for the ready notification") - } - return e.Server.StopNotify(), e.Err(), nil -} - -// identifyDataDirOrDie returns the type of the data dir. -// Dies if the datadir is invalid. -func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType { - names, err := fileutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return dirEmpty - } - lg.Fatal("failed to list data directory", zap.String("dir", dir), zap.Error(err)) - } - - var m, p bool - for _, name := range names { - switch dirType(name) { - case dirMember: - m = true - case dirProxy: - p = true - default: - lg.Warn( - "found invalid file under data directory", - zap.String("filename", name), - zap.String("data-dir", dir), - ) - } - } - - if m && p { - lg.Fatal("invalid datadir; both member and proxy directories exist") - } - if m { - return dirMember - } - if p { - return dirProxy - } - return dirEmpty -} - -func checkSupportArch() { - lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel) - if err != nil { - panic(err) - } - // To add a new platform, check https://github.com/etcd-io/website/blob/main/content/en/docs/${VERSION}/op-guide/supported-platform.md. - // The ${VERSION} is the etcd version, e.g. v3.5, v3.6 etc. - switch runtime.GOARCH { - case "amd64", "arm64", "ppc64le", "s390x": - return - } - // unsupported arch only configured via environment variable - // so unset here to not parse through flag - defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH") - if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH { - lg.Info("running etcd on unsupported architecture since ETCD_UNSUPPORTED_ARCH is set", zap.String("arch", env)) - return - } - - lg.Error("running etcd on unsupported architecture since ETCD_UNSUPPORTED_ARCH is set", zap.String("arch", runtime.GOARCH)) - os.Exit(1) -} diff --git a/server/etcdmain/grpc_proxy.go b/server/etcdmain/grpc_proxy.go deleted file mode 100644 index 62aea935903..00000000000 --- a/server/etcdmain/grpc_proxy.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "log" - "math" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/leasing" - "go.etcd.io/etcd/client/v3/namespace" - "go.etcd.io/etcd/client/v3/ordering" - "go.etcd.io/etcd/pkg/v3/debugutil" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/soheilhy/cmux" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapgrpc" - "golang.org/x/net/http2" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/keepalive" -) - -var ( - grpcProxyListenAddr string - grpcProxyMetricsListenAddr string - grpcProxyEndpoints []string - grpcProxyEndpointsAutoSyncInterval time.Duration - grpcProxyDNSCluster string - grpcProxyDNSClusterServiceName string - grpcProxyInsecureDiscovery bool - grpcProxyDataDir string - grpcMaxCallSendMsgSize int - grpcMaxCallRecvMsgSize int - - // tls for connecting to etcd - - grpcProxyCA string - grpcProxyCert string - grpcProxyKey string - grpcProxyInsecureSkipTLSVerify bool - - // tls for clients connecting to proxy - - grpcProxyListenCA string - grpcProxyListenCert string - grpcProxyListenKey string - grpcProxyListenCipherSuites []string - grpcProxyListenAutoTLS bool - grpcProxyListenCRL string - selfSignedCertValidity uint - - grpcProxyAdvertiseClientURL string - grpcProxyResolverPrefix string - grpcProxyResolverTTL int - - grpcProxyNamespace string - grpcProxyLeasing string - - grpcProxyEnablePprof bool - grpcProxyEnableOrdering bool - grpcProxyEnableLogging bool - - grpcProxyDebug bool - - // GRPC keep alive related options. - grpcKeepAliveMinTime time.Duration - grpcKeepAliveTimeout time.Duration - grpcKeepAliveInterval time.Duration - - maxConcurrentStreams uint32 -) - -const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024 - -func init() { - rootCmd.AddCommand(newGRPCProxyCommand()) -} - -// newGRPCProxyCommand returns the cobra command for "grpc-proxy". -func newGRPCProxyCommand() *cobra.Command { - lpc := &cobra.Command{ - Use: "grpc-proxy ", - Short: "grpc-proxy related command", - } - lpc.AddCommand(newGRPCProxyStartCommand()) - - return lpc -} - -func newGRPCProxyStartCommand() *cobra.Command { - cmd := cobra.Command{ - Use: "start", - Short: "start the grpc proxy", - Run: startGRPCProxy, - } - - cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address") - cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "domain name to query for SRV records describing cluster endpoints") - cmd.Flags().StringVar(&grpcProxyDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery") - cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for endpoint /metrics requests on an additional interface") - cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records") - cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints") - cmd.Flags().DurationVar(&grpcProxyEndpointsAutoSyncInterval, "endpoints-auto-sync-interval", 0, "etcd endpoints auto sync interval (disabled by default)") - cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)") - cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)") - cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints") - cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests") - cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"`) - cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data") - cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)") - cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)") - cmd.Flags().DurationVar(&grpcKeepAliveMinTime, "grpc-keepalive-min-time", embed.DefaultGRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging proxy.") - cmd.Flags().DurationVar(&grpcKeepAliveInterval, "grpc-keepalive-interval", embed.DefaultGRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).") - cmd.Flags().DurationVar(&grpcKeepAliveTimeout, "grpc-keepalive-timeout", embed.DefaultGRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") - - // client TLS for connecting to server - cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file") - cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file") - cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle") - cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates (CAUTION: this option should be enabled only for testing purposes)") - - // client TLS for connecting to proxy - cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file") - cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file") - cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle") - cmd.Flags().StringSliceVar(&grpcProxyListenCipherSuites, "listen-cipher-suites", grpcProxyListenCipherSuites, "Comma-separated list of supported TLS cipher suites between client/proxy (empty will be auto-populated by Go).") - cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates") - cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.") - cmd.Flags().UintVar(&selfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the proxy certificates, unit is year") - - // experimental flags - cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.") - cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.") - cmd.Flags().BoolVar(&grpcProxyEnableLogging, "experimental-enable-grpc-logging", false, "logging all grpc requests and responses") - - cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.") - - cmd.Flags().Uint32Var(&maxConcurrentStreams, "max-concurrent-streams", math.MaxUint32, "Maximum concurrent streams that each client can open at a time.") - - return &cmd -} - -func startGRPCProxy(cmd *cobra.Command, args []string) { - checkArgs() - lvl := zap.InfoLevel - if grpcProxyDebug { - lvl = zap.DebugLevel - grpc.EnableTracing = true - } - lg, err := logutil.CreateDefaultZapLogger(lvl) - if err != nil { - panic(err) - } - defer lg.Sync() - - grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) - - // The proxy itself (ListenCert) can have not-empty CN. - // The empty CN is required for grpcProxyCert. - // Please see https://github.com/etcd-io/etcd/issues/11970#issuecomment-687875315 for more context. - tlsInfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey, false) - if len(grpcProxyListenCipherSuites) > 0 { - cs, err := tlsutil.GetCipherSuites(grpcProxyListenCipherSuites) - if err != nil { - log.Fatal(err) - } - tlsInfo.CipherSuites = cs - } - if tlsInfo == nil && grpcProxyListenAutoTLS { - host := []string{"https://" + grpcProxyListenAddr} - dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy") - autoTLS, err := transport.SelfCert(lg, dir, host, selfSignedCertValidity) - if err != nil { - log.Fatal(err) - } - tlsInfo = &autoTLS - } - - if tlsInfo != nil { - lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsInfo))) - } - m := mustListenCMux(lg, tlsInfo) - grpcl := m.Match(cmux.HTTP2()) - defer func() { - grpcl.Close() - lg.Info("stop listening gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) - }() - - client := mustNewClient(lg) - - // The proxy client is used for self-healthchecking. - // TODO: The mechanism should be refactored to use internal connection. - var proxyClient *clientv3.Client - if grpcProxyAdvertiseClientURL != "" { - proxyClient = mustNewProxyClient(lg, tlsInfo) - } - httpClient := mustNewHTTPClient(lg) - - srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client, proxyClient) - - if err := http2.ConfigureServer(srvhttp, &http2.Server{ - MaxConcurrentStreams: maxConcurrentStreams, - }); err != nil { - lg.Fatal("Failed to configure the http server", zap.Error(err)) - } - - errc := make(chan error, 3) - go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }() - go func() { errc <- srvhttp.Serve(httpl) }() - go func() { errc <- m.Serve() }() - if len(grpcProxyMetricsListenAddr) > 0 { - mhttpl := mustMetricsListener(lg, tlsInfo) - go func() { - mux := http.NewServeMux() - grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints()) - grpcproxy.HandleHealth(lg, mux, client) - grpcproxy.HandleProxyMetrics(mux) - grpcproxy.HandleProxyHealth(lg, mux, proxyClient) - lg.Info("gRPC proxy server metrics URL serving") - herr := http.Serve(mhttpl, mux) - if herr != nil { - lg.Fatal("gRPC proxy server metrics URL returned", zap.Error(herr)) - } else { - lg.Info("gRPC proxy server metrics URL returned") - } - }() - } - - lg.Info("started gRPC proxy", zap.String("address", grpcProxyListenAddr)) - - // grpc-proxy is initialized, ready to serve - notifySystemd(lg) - - fmt.Fprintln(os.Stderr, <-errc) - os.Exit(1) -} - -func checkArgs() { - if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 { - fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL)) - os.Exit(1) - } - if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 { - fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix)) - os.Exit(1) - } - if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" { - fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL)) - os.Exit(1) - } - if grpcProxyListenAutoTLS && selfSignedCertValidity == 0 { - fmt.Fprintln(os.Stderr, fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0")) - os.Exit(1) - } -} - -func mustNewClient(lg *zap.Logger) *clientv3.Client { - srvs := discoverEndpoints(lg, grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery, grpcProxyDNSClusterServiceName) - eps := srvs.Endpoints - if len(eps) == 0 { - eps = grpcProxyEndpoints - } - cfg, err := newClientCfg(lg, eps) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - cfg.DialOptions = append(cfg.DialOptions, - grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor)) - cfg.DialOptions = append(cfg.DialOptions, - grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor)) - cfg.Logger = lg.Named("client") - client, err := clientv3.New(*cfg) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - return client -} - -func mustNewProxyClient(lg *zap.Logger, tls *transport.TLSInfo) *clientv3.Client { - eps := []string{grpcProxyAdvertiseClientURL} - cfg, err := newProxyClientCfg(lg.Named("client"), eps, tls) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - client, err := clientv3.New(*cfg) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - lg.Info("create proxy client", zap.String("grpcProxyAdvertiseClientURL", grpcProxyAdvertiseClientURL)) - return client -} - -func newProxyClientCfg(lg *zap.Logger, eps []string, tls *transport.TLSInfo) (*clientv3.Config, error) { - cfg := clientv3.Config{ - Endpoints: eps, - DialTimeout: 5 * time.Second, - Logger: lg, - } - if tls != nil { - clientTLS, err := tls.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = clientTLS - } - return &cfg, nil -} - -func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) { - // set tls if any one tls option set - cfg := clientv3.Config{ - Endpoints: eps, - AutoSyncInterval: grpcProxyEndpointsAutoSyncInterval, - DialTimeout: 5 * time.Second, - } - - if grpcMaxCallSendMsgSize > 0 { - cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize - } - if grpcMaxCallRecvMsgSize > 0 { - cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize - } - - tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey, true) - if tls == nil && grpcProxyInsecureSkipTLSVerify { - tls = &transport.TLSInfo{} - } - if tls != nil { - clientTLS, err := tls.ClientConfig() - if err != nil { - return nil, err - } - clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify - if clientTLS.InsecureSkipVerify { - lg.Warn("--insecure-skip-tls-verify was given, this grpc proxy process skips authentication of etcd server TLS certificates. This option should be enabled only for testing purposes.") - } - cfg.TLS = clientTLS - lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls))) - } - return &cfg, nil -} - -func newTLS(ca, cert, key string, requireEmptyCN bool) *transport.TLSInfo { - if ca == "" && cert == "" && key == "" { - return nil - } - return &transport.TLSInfo{TrustedCAFile: ca, CertFile: cert, KeyFile: key, EmptyCN: requireEmptyCN} -} - -func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux { - l, err := net.Listen("tcp", grpcProxyListenAddr) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - - if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if tlsinfo != nil { - tlsinfo.CRLFile = grpcProxyListenCRL - if l, err = transport.NewTLSListener(l, tlsinfo); err != nil { - lg.Fatal("failed to create TLS listener", zap.Error(err)) - } - } - - lg.Info("listening for gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) - return cmux.New(l) -} - -func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server { - if grpcProxyEnableOrdering { - vf := ordering.NewOrderViolationSwitchEndpointClosure(client) - client.KV = ordering.NewKV(client.KV, vf) - lg.Info("waiting for linearized read from cluster to recover ordering") - for { - _, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly()) - if err == nil { - break - } - lg.Warn("ordering recovery failed, retrying in 1s", zap.Error(err)) - time.Sleep(time.Second) - } - } - - if len(grpcProxyNamespace) > 0 { - client.KV = namespace.NewKV(client.KV, grpcProxyNamespace) - client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace) - client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace) - } - - if len(grpcProxyLeasing) > 0 { - client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing) - } - - kvp, _ := grpcproxy.NewKvProxy(client) - watchp, _ := grpcproxy.NewWatchProxy(client.Ctx(), lg, client) - if grpcProxyResolverPrefix != "" { - grpcproxy.Register(lg, client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL) - } - clusterp, _ := grpcproxy.NewClusterProxy(lg, client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix) - leasep, _ := grpcproxy.NewLeaseProxy(client.Ctx(), client) - - mainp := grpcproxy.NewMaintenanceProxy(client) - authp := grpcproxy.NewAuthProxy(client) - electionp := grpcproxy.NewElectionProxy(client) - lockp := grpcproxy.NewLockProxy(client) - - alwaysLoggingDeciderServer := func(ctx context.Context, fullMethodName string, servingObject interface{}) bool { return true } - - grpcChainStreamList := []grpc.StreamServerInterceptor{ - grpc_prometheus.StreamServerInterceptor, - } - grpcChainUnaryList := []grpc.UnaryServerInterceptor{ - grpc_prometheus.UnaryServerInterceptor, - } - if grpcProxyEnableLogging { - grpcChainStreamList = append(grpcChainStreamList, - grpc_ctxtags.StreamServerInterceptor(), - grpc_zap.PayloadStreamServerInterceptor(lg, alwaysLoggingDeciderServer), - ) - grpcChainUnaryList = append(grpcChainUnaryList, - grpc_ctxtags.UnaryServerInterceptor(), - grpc_zap.PayloadUnaryServerInterceptor(lg, alwaysLoggingDeciderServer), - ) - } - - gopts := []grpc.ServerOption{ - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - grpcChainStreamList..., - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpcChainUnaryList..., - )), - grpc.MaxConcurrentStreams(math.MaxUint32), - } - if grpcKeepAliveMinTime > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: grpcKeepAliveMinTime, - PermitWithoutStream: false, - })) - } - if grpcKeepAliveInterval > time.Duration(0) || - grpcKeepAliveTimeout > time.Duration(0) { - gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: grpcKeepAliveInterval, - Timeout: grpcKeepAliveTimeout, - })) - } - - server := grpc.NewServer(gopts...) - - pb.RegisterKVServer(server, kvp) - pb.RegisterWatchServer(server, watchp) - pb.RegisterClusterServer(server, clusterp) - pb.RegisterLeaseServer(server, leasep) - pb.RegisterMaintenanceServer(server, mainp) - pb.RegisterAuthServer(server, authp) - v3electionpb.RegisterElectionServer(server, electionp) - v3lockpb.RegisterLockServer(server, lockp) - - return server -} - -func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) { - httpClient := mustNewHTTPClient(lg) - httpmux := http.NewServeMux() - httpmux.HandleFunc("/", http.NotFound) - grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints()) - grpcproxy.HandleHealth(lg, httpmux, c) - grpcproxy.HandleProxyMetrics(httpmux) - grpcproxy.HandleProxyHealth(lg, httpmux, proxy) - if grpcProxyEnablePprof { - for p, h := range debugutil.PProfHandlers() { - httpmux.Handle(p, h) - } - lg.Info("gRPC proxy enabled pprof", zap.String("path", debugutil.HTTPPrefixPProf)) - } - srvhttp := &http.Server{ - Handler: httpmux, - ErrorLog: log.New(io.Discard, "net/http", 0), - } - - if tlsinfo == nil { - return srvhttp, m.Match(cmux.HTTP1()) - } - - srvTLS, err := tlsinfo.ServerConfig() - if err != nil { - lg.Fatal("failed to set up TLS", zap.Error(err)) - } - srvhttp.TLSConfig = srvTLS - return srvhttp, m.Match(cmux.Any()) -} - -func mustNewHTTPClient(lg *zap.Logger) *http.Client { - transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - return &http.Client{Transport: transport} -} - -func newHTTPTransport(ca, cert, key string) (*http.Transport, error) { - tr := &http.Transport{} - - if ca != "" && cert != "" && key != "" { - caCert, err := os.ReadFile(ca) - if err != nil { - return nil, err - } - keyPair, err := tls.LoadX509KeyPair(cert, key) - if err != nil { - return nil, err - } - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM(caCert) - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{keyPair}, - RootCAs: caPool, - } - tlsConfig.BuildNameToCertificate() - tr.TLSClientConfig = tlsConfig - } else if grpcProxyInsecureSkipTLSVerify { - tlsConfig := &tls.Config{InsecureSkipVerify: grpcProxyInsecureSkipTLSVerify} - tr.TLSClientConfig = tlsConfig - } - return tr, nil -} - -func mustMetricsListener(lg *zap.Logger, tlsinfo *transport.TLSInfo) net.Listener { - murl, err := url.Parse(grpcProxyMetricsListenAddr) - if err != nil { - fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr) - os.Exit(1) - } - ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - lg.Info("gRPC proxy listening for metrics", zap.String("address", murl.String())) - return ml -} diff --git a/server/etcdmain/help.go b/server/etcdmain/help.go deleted file mode 100644 index bc444208d48..00000000000 --- a/server/etcdmain/help.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2015 The etcd Authors -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "fmt" - "strconv" - - "golang.org/x/crypto/bcrypt" - - cconfig "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/embed" -) - -var ( - usageline = `Usage: - - etcd [flags] - Start an etcd server. - - etcd --version - Show the version of etcd. - - etcd -h | --help - Show the help information about etcd. - - etcd --config-file - Path to the server configuration file. Note that if a configuration file is provided, other command line flags and environment variables will be ignored. - - etcd gateway - Run the stateless pass-through etcd TCP connection forwarding proxy. - - etcd grpc-proxy - Run the stateless etcd v3 gRPC L7 reverse proxy. -` - flagsline = ` -Member: - --name 'default' - Human-readable name for this member. - --data-dir '${name}.etcd' - Path to the data directory. - --wal-dir '' - Path to the dedicated wal directory. - --snapshot-count '100000' - Number of committed transactions to trigger a snapshot to disk. - --heartbeat-interval '100' - Time (in milliseconds) of a heartbeat interval. - --election-timeout '1000' - Time (in milliseconds) for an election to timeout. See tuning documentation for details. - --initial-election-tick-advance 'true' - Whether to fast-forward initial election ticks on boot for faster election. - --listen-peer-urls 'http://localhost:2380' - List of URLs to listen on for peer traffic. - --listen-client-urls 'http://localhost:2379' - List of URLs to listen on for client traffic. - --max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `' - Maximum number of snapshot files to retain (0 is unlimited). - --max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `' - Maximum number of wal files to retain (0 is unlimited). - --quota-backend-bytes '0' - Raise alarms when backend size exceeds the given quota (0 defaults to low space quota). - --backend-bbolt-freelist-type 'map' - BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types). - --backend-batch-interval '' - BackendBatchInterval is the maximum time before commit the backend transaction. - --backend-batch-limit '0' - BackendBatchLimit is the maximum operations before commit the backend transaction. - --max-txn-ops '128' - Maximum number of operations permitted in a transaction. - --max-request-bytes '1572864' - Maximum client request size in bytes the server will accept. - --max-concurrent-streams 'math.MaxUint32' - Maximum concurrent streams that each client can open at a time. - --grpc-keepalive-min-time '5s' - Minimum duration interval that a client should wait before pinging server. - --grpc-keepalive-interval '2h' - Frequency duration of server-to-client ping to check if a connection is alive (0 to disable). - --grpc-keepalive-timeout '20s' - Additional duration of wait before closing a non-responsive connection (0 to disable). - --socket-reuse-port 'false' - Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use. - --socket-reuse-address 'false' - Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in TIME_WAIT state. - -Clustering: - --initial-advertise-peer-urls 'http://localhost:2380' - List of this member's peer URLs to advertise to the rest of the cluster. - --initial-cluster 'default=http://localhost:2380' - Initial cluster configuration for bootstrapping. - --initial-cluster-state 'new' - Initial cluster state ('new' or 'existing'). - --initial-cluster-token 'etcd-cluster' - Initial cluster token for the etcd cluster during bootstrap. - Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters. - --advertise-client-urls 'http://localhost:2379' - List of this member's client URLs to advertise to the public. - The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster. - --discovery '' - Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8. - --discovery-token '' - V3 discovery: discovery token for the etcd cluster to be bootstrapped. - --discovery-endpoints '' - V3 discovery: List of gRPC endpoints of the discovery service. - --discovery-dial-timeout '2s' - V3 discovery: dial timeout for client connections. - --discovery-request-timeout '5s' - V3 discovery: timeout for discovery requests (excluding dial timeout). - --discovery-keepalive-time '2s' - V3 discovery: keepalive time for client connections. - --discovery-keepalive-timeout '6s' - V3 discovery: keepalive timeout for client connections. - --discovery-insecure-transport 'true' - V3 discovery: disable transport security for client connections. - --discovery-insecure-skip-tls-verify 'false' - V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes). - --discovery-cert '' - V3 discovery: identify secure client using this TLS certificate file. - --discovery-key '' - V3 discovery: identify secure client using this TLS key file. - --discovery-cacert '' - V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle. - --discovery-user '' - V3 discovery: username[:password] for authentication (prompt if password is not supplied). - --discovery-password '' - V3 discovery: password for authentication (if this option is used, --user option shouldn't include password). - --discovery-fallback 'exit' - Expected behavior ('exit') when discovery services fails. Note that v2 proxy is removed. - --discovery-proxy '' - HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8. - --discovery-srv '' - DNS srv domain used to bootstrap the cluster. - --discovery-srv-name '' - Suffix to the dns srv name queried when bootstrapping. - --strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `' - Reject reconfiguration requests that would cause quorum loss. - --pre-vote 'true' - Enable to run an additional Raft election phase. - --auto-compaction-retention '0' - Auto compaction retention length. 0 means disable auto compaction. - --auto-compaction-mode 'periodic' - Interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention. - --v2-deprecation '` + string(cconfig.V2_DEPR_DEFAULT) + `' - Phase of v2store deprecation. Allows to opt-in for higher compatibility mode. - Supported values: - 'not-yet' // Issues a warning if v2store have meaningful content (default in v3.5) - 'write-only' // Custom v2 state is not allowed (planned default in v3.6) - 'write-only-drop-data' // Custom v2 state will get DELETED ! - 'gone' // v2store is not maintained any longer. (planned default in v3.7) - -Security: - --cert-file '' - Path to the client server TLS cert file. - --key-file '' - Path to the client server TLS key file. - --client-cert-auth 'false' - Enable client cert authentication. - --client-crl-file '' - Path to the client certificate revocation list file. - --client-cert-allowed-hostname '' - Allowed TLS hostname for client cert authentication. - --trusted-ca-file '' - Path to the client server TLS trusted CA cert file. - --auto-tls 'false' - Client TLS using generated certificates. - --peer-cert-file '' - Path to the peer server TLS cert file. - --peer-key-file '' - Path to the peer server TLS key file. - --peer-client-cert-auth 'false' - Enable peer client cert authentication. - --peer-trusted-ca-file '' - Path to the peer server TLS trusted CA file. - --peer-cert-allowed-cn '' - Required CN for client certs connecting to the peer endpoint. - --peer-cert-allowed-hostname '' - Allowed TLS hostname for inter peer authentication. - --peer-auto-tls 'false' - Peer TLS using self-generated certificates if --peer-key-file and --peer-cert-file are not provided. - --self-signed-cert-validity '1' - The validity period of the client and peer certificates that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS, the unit is year, and the default is 1. - --peer-crl-file '' - Path to the peer certificate revocation list file. - --cipher-suites '' - Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go). - --cors '*' - Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all). - --host-whitelist '*' - Acceptable hostnames from HTTP client requests, if server is not secure (empty or * means allow all). - --tls-min-version 'TLS1.2' - Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3. - --tls-max-version '' - Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty will be auto-populated by Go). - -Auth: - --auth-token 'simple' - Specify a v3 authentication token type and its options ('simple' or 'jwt'). - --bcrypt-cost ` + fmt.Sprintf("%d", bcrypt.DefaultCost) + ` - Specify the cost / strength of the bcrypt algorithm for hashing auth passwords. Valid values are between ` + fmt.Sprintf("%d", bcrypt.MinCost) + ` and ` + fmt.Sprintf("%d", bcrypt.MaxCost) + `. - --auth-token-ttl 300 - Time (in seconds) of the auth-token-ttl. - -Profiling and Monitoring: - --enable-pprof 'false' - Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/" - --metrics 'basic' - Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics. - --listen-metrics-urls '' - List of URLs to listen on for the metrics and health endpoints. - -Logging: - --logger 'zap' - Currently only supports 'zap' for structured logging. - --log-outputs 'default' - Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets. - --log-level 'info' - Configures log level. Only supports debug, info, warn, error, panic, or fatal. - --log-format 'json' - Configures log format. Only supports json, console. - --enable-log-rotation 'false' - Enable log rotation of a single log-outputs file target. - --log-rotation-config-json '{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}' - Configures log rotation if enabled with a JSON logger config. MaxSize(MB), MaxAge(days,0=no limit), MaxBackups(0=no limit), LocalTime(use computers local time), Compress(gzip)". - --warning-unary-request-duration '300ms' - Set time duration after which a warning is logged if a unary request takes more than this duration. - -Experimental distributed tracing: - --experimental-enable-distributed-tracing 'false' - Enable experimental distributed tracing. - --experimental-distributed-tracing-address 'localhost:4317' - Distributed tracing collector address. - --experimental-distributed-tracing-service-name 'etcd' - Distributed tracing service name, must be same across all etcd instances. - --experimental-distributed-tracing-instance-id '' - Distributed tracing instance ID, must be unique per each etcd instance. - --experimental-distributed-tracing-sampling-rate '0' - Number of samples to collect per million spans for distributed tracing. Disabled by default. - -Experimental feature: - --experimental-initial-corrupt-check 'false' - Enable to check data corruption before serving any client/peer traffic. - --experimental-corrupt-check-time '0s' - Duration of time between cluster corruption check passes. - --experimental-enable-lease-checkpoint 'false' - ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. - --experimental-compaction-batch-limit 1000 - ExperimentalCompactionBatchLimit sets the maximum revisions deleted in each compaction batch. - --experimental-peer-skip-client-san-verification 'false' - Skip verification of SAN field in client certificate for peer connections. - --experimental-watch-progress-notify-interval '10m' - Duration of periodical watch progress notification. - --experimental-warning-apply-duration '100ms' - Warning is generated if requests take more than this duration. - --experimental-txn-mode-write-with-shared-buffer 'true' - Enable the write transaction to use a shared buffer in its readonly check operations. - --experimental-bootstrap-defrag-threshold-megabytes - Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect. - --experimental-warning-unary-request-duration '300ms' - Set time duration after which a warning is generated if a unary request takes more than this duration. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead. - --experimental-max-learners '1' - Set the max number of learner members allowed in the cluster membership. - --experimental-wait-cluster-ready-timeout '5s' - Set the maximum time duration to wait for the cluster to be ready. - --experimental-snapshot-catch-up-entries '5000' - Number of entries for a slow follower to catch up after compacting the the raft storage entries. - -Unsafe feature: - --force-new-cluster 'false' - Force to create a new one-member cluster. - --unsafe-no-fsync 'false' - Disables fsync, unsafe, will cause data loss. - -CAUTIOUS with unsafe flag! It may break the guarantees given by the consensus protocol! -` -) - -// Add back "TO BE DEPRECATED" section if needed diff --git a/server/etcdmain/main.go b/server/etcdmain/main.go deleted file mode 100644 index e28e7da928d..00000000000 --- a/server/etcdmain/main.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "fmt" - "os" - - "github.com/coreos/go-systemd/v22/daemon" - "go.uber.org/zap" -) - -func Main(args []string) { - checkSupportArch() - - if len(args) > 1 { - cmd := args[1] - switch cmd { - case "gateway", "grpc-proxy": - if err := rootCmd.Execute(); err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - return - } - } - - startEtcdOrProxyV2(args) -} - -func notifySystemd(lg *zap.Logger) { - lg.Info("notifying init daemon") - _, err := daemon.SdNotify(false, daemon.SdNotifyReady) - if err != nil { - lg.Error("failed to notify systemd for readiness", zap.Error(err)) - return - } - lg.Info("successfully notified init daemon") -} diff --git a/server/etcdmain/util.go b/server/etcdmain/util.go deleted file mode 100644 index 0bd23e9e591..00000000000 --- a/server/etcdmain/util.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdmain - -import ( - "fmt" - "os" - - "go.etcd.io/etcd/client/pkg/v3/srv" - "go.etcd.io/etcd/client/pkg/v3/transport" - - "go.uber.org/zap" -) - -func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, serviceName string) (s srv.SRVClients) { - if dns == "" { - return s - } - srvs, err := srv.GetClient("etcd-client", dns, serviceName) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - endpoints := srvs.Endpoints - - if lg != nil { - lg.Info( - "discovered cluster from SRV", - zap.String("srv-server", dns), - zap.Strings("endpoints", endpoints), - ) - } - - if insecure { - return *srvs - } - // confirm TLS connections are good - tlsInfo := transport.TLSInfo{ - TrustedCAFile: ca, - ServerName: dns, - } - - if lg != nil { - lg.Info( - "validating discovered SRV endpoints", - zap.String("srv-server", dns), - zap.Strings("endpoints", endpoints), - ) - } - - endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints) - if err != nil { - if lg != nil { - lg.Warn( - "failed to validate discovered endpoints", - zap.String("srv-server", dns), - zap.Strings("endpoints", endpoints), - zap.Error(err), - ) - } - } else { - if lg != nil { - lg.Info( - "using validated discovered SRV endpoints", - zap.String("srv-server", dns), - zap.Strings("endpoints", endpoints), - ) - } - } - - // map endpoints back to SRVClients struct with SRV data - eps := make(map[string]struct{}) - for _, ep := range endpoints { - eps[ep] = struct{}{} - } - for i := range srvs.Endpoints { - if _, ok := eps[srvs.Endpoints[i]]; !ok { - continue - } - s.Endpoints = append(s.Endpoints, srvs.Endpoints[i]) - s.SRVs = append(s.SRVs, srvs.SRVs[i]) - } - - return s -} diff --git a/server/etcdserver/adapters.go b/server/etcdserver/adapters.go deleted file mode 100644 index 8a95b9488fd..00000000000 --- a/server/etcdserver/adapters.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - - "github.com/coreos/go-semver/semver" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/api/v3/version" - serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -// serverVersionAdapter implements the interface Server defined in package -// go.etcd.io/etcd/server/v3/etcdserver/version, and it's needed by Monitor -// in the same package. -type serverVersionAdapter struct { - *EtcdServer -} - -func NewServerVersionAdapter(s *EtcdServer) *serverVersionAdapter { - return &serverVersionAdapter{ - EtcdServer: s, - } -} - -var _ serverversion.Server = (*serverVersionAdapter)(nil) - -func (s *serverVersionAdapter) UpdateClusterVersion(version string) { - s.GoAttach(func() { s.updateClusterVersionV3(version) }) -} - -func (s *serverVersionAdapter) LinearizableReadNotify(ctx context.Context) error { - return s.linearizableReadNotify(ctx) -} - -func (s *serverVersionAdapter) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) - return err -} - -func (s *serverVersionAdapter) DowngradeCancel(ctx context.Context) error { - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) - return err -} - -func (s *serverVersionAdapter) GetClusterVersion() *semver.Version { - return s.cluster.Version() -} - -func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo { - return s.cluster.DowngradeInfo() -} - -func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions { - return getMembersVersions(s.lg, s.cluster, s.MemberId(), s.peerRt, s.Cfg.ReqTimeout()) -} - -func (s *serverVersionAdapter) GetStorageVersion() *semver.Version { - return s.StorageVersion() -} - -func (s *serverVersionAdapter) UpdateStorageVersion(target semver.Version) error { - // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. - s.bemu.RLock() - defer s.bemu.RUnlock() - - tx := s.be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - return schema.UnsafeMigrate(s.lg, tx, s.r.storage, target) -} diff --git a/server/etcdserver/api/capability.go b/server/etcdserver/api/capability.go deleted file mode 100644 index 8546eeb543a..00000000000 --- a/server/etcdserver/api/capability.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "sync" - - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/version" - serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" - - "github.com/coreos/go-semver/semver" -) - -type Capability string - -const ( - AuthCapability Capability = "auth" - V3rpcCapability Capability = "v3rpc" -) - -var ( - // capabilityMaps is a static map of version to capability map. - capabilityMaps = map[string]map[Capability]bool{ - "3.0.0": {AuthCapability: true, V3rpcCapability: true}, - "3.1.0": {AuthCapability: true, V3rpcCapability: true}, - "3.2.0": {AuthCapability: true, V3rpcCapability: true}, - "3.3.0": {AuthCapability: true, V3rpcCapability: true}, - "3.4.0": {AuthCapability: true, V3rpcCapability: true}, - "3.5.0": {AuthCapability: true, V3rpcCapability: true}, - "3.6.0": {AuthCapability: true, V3rpcCapability: true}, - } - - enableMapMu sync.RWMutex - // enabledMap points to a map in capabilityMaps - enabledMap map[Capability]bool - - curVersion *semver.Version -) - -func init() { - enabledMap = map[Capability]bool{ - AuthCapability: true, - V3rpcCapability: true, - } -} - -// UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(lg *zap.Logger, v *semver.Version) { - if v == nil { - // if recovered but version was never set by cluster - return - } - enableMapMu.Lock() - if curVersion != nil && !serverversion.IsValidVersionChange(v, curVersion) { - enableMapMu.Unlock() - return - } - curVersion = v - enabledMap = capabilityMaps[curVersion.String()] - enableMapMu.Unlock() - - if lg != nil { - lg.Info( - "enabled capabilities for version", - zap.String("cluster-version", version.Cluster(v.String())), - ) - } -} - -func IsCapabilityEnabled(c Capability) bool { - enableMapMu.RLock() - defer enableMapMu.RUnlock() - if enabledMap == nil { - return false - } - return enabledMap[c] -} - -func EnableCapability(c Capability) { - enableMapMu.Lock() - defer enableMapMu.Unlock() - enabledMap[c] = true -} diff --git a/server/etcdserver/api/cluster.go b/server/etcdserver/api/cluster.go deleted file mode 100644 index f05997da52a..00000000000 --- a/server/etcdserver/api/cluster.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" -) - -// Cluster is an interface representing a collection of members in one etcd cluster. -type Cluster interface { - // ID returns the cluster ID - ID() types.ID - // ClientURLs returns an aggregate set of all URLs on which this - // cluster is listening for client requests - ClientURLs() []string - // Members returns a slice of members sorted by their ID - Members() []*membership.Member - // Member retrieves a particular member based on ID, or nil if the - // member does not exist in the cluster - Member(id types.ID) *membership.Member - // Version is the cluster-wide minimum major.minor version. - Version() *semver.Version -} diff --git a/server/etcdserver/api/doc.go b/server/etcdserver/api/doc.go deleted file mode 100644 index f44881be663..00000000000 --- a/server/etcdserver/api/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. -package api diff --git a/server/etcdserver/api/etcdhttp/debug.go b/server/etcdserver/api/etcdhttp/debug.go deleted file mode 100644 index ab7feee97f6..00000000000 --- a/server/etcdserver/api/etcdhttp/debug.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "expvar" - "fmt" - "net/http" -) - -const ( - varsPath = "/debug/vars" -) - -func HandleDebug(mux *http.ServeMux) { - mux.HandleFunc(varsPath, serveVars) -} - -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprint(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprint(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprint(w, "\n}\n") -} diff --git a/server/etcdserver/api/etcdhttp/doc.go b/server/etcdserver/api/etcdhttp/doc.go deleted file mode 100644 index a03b626204f..00000000000 --- a/server/etcdserver/api/etcdhttp/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdhttp implements HTTP transportation layer for etcdserver. -package etcdhttp diff --git a/server/etcdserver/api/etcdhttp/health.go b/server/etcdserver/api/etcdhttp/health.go deleted file mode 100644 index 95950de8961..00000000000 --- a/server/etcdserver/api/etcdhttp/health.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/raft/v3" -) - -const ( - PathHealth = "/health" - PathProxyHealth = "/proxy/health" -) - -type ServerHealth interface { - Alarms() []*pb.AlarmMember - Leader() types.ID - Range(context.Context, *pb.RangeRequest) (*pb.RangeResponse, error) - Config() config.ServerConfig -} - -// HandleHealth registers metrics and health handlers. it checks health by using v3 range request -// and its corresponding timeout. -func HandleHealth(lg *zap.Logger, mux *http.ServeMux, srv ServerHealth) { - mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet, serializable bool) Health { - if h := checkAlarms(lg, srv, excludedAlarms); h.Health != "true" { - return h - } - if h := checkLeader(lg, srv, serializable); h.Health != "true" { - return h - } - return checkAPI(lg, srv, serializable) - })) -} - -// NewHealthHandler handles '/health' requests. -func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet, Serializable bool) Health) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - w.Header().Set("Allow", http.MethodGet) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed)) - return - } - excludedAlarms := getExcludedAlarms(r) - // Passing the query parameter "serializable=true" ensures that the - // health of the local etcd is checked vs the health of the cluster. - // This is useful for probes attempting to validate the liveness of - // the etcd process vs readiness of the cluster to serve requests. - serializableFlag := getSerializableFlag(r) - h := hfunc(excludedAlarms, serializableFlag) - defer func() { - if h.Health == "true" { - healthSuccess.Inc() - } else { - healthFailed.Inc() - } - }() - d, _ := json.Marshal(h) - if h.Health != "true" { - http.Error(w, string(d), http.StatusServiceUnavailable) - lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable)) - return - } - w.WriteHeader(http.StatusOK) - w.Write(d) - lg.Debug("/health OK", zap.Int("status-code", http.StatusOK)) - } -} - -var ( - healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "health_success", - Help: "The total number of successful health checks", - }) - healthFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "health_failures", - Help: "The total number of failed health checks", - }) -) - -func init() { - prometheus.MustRegister(healthSuccess) - prometheus.MustRegister(healthFailed) -} - -// Health defines etcd server health status. -// TODO: remove manual parsing in etcdctl cluster-health -type Health struct { - Health string `json:"health"` - Reason string `json:"reason"` -} - -type AlarmSet map[string]struct{} - -func getExcludedAlarms(r *http.Request) (alarms AlarmSet) { - alarms = make(map[string]struct{}, 2) - alms, found := r.URL.Query()["exclude"] - if found { - for _, alm := range alms { - if len(alm) == 0 { - continue - } - alarms[alm] = struct{}{} - } - } - return alarms -} - -func getSerializableFlag(r *http.Request) bool { - return r.URL.Query().Get("serializable") == "true" -} - -// TODO: etcdserver.ErrNoLeader in health API - -func checkAlarms(lg *zap.Logger, srv ServerHealth, excludedAlarms AlarmSet) Health { - h := Health{Health: "true"} - as := srv.Alarms() - if len(as) > 0 { - for _, v := range as { - alarmName := v.Alarm.String() - if _, found := excludedAlarms[alarmName]; found { - lg.Debug("/health excluded alarm", zap.String("alarm", v.String())) - continue - } - - h.Health = "false" - switch v.Alarm { - case etcdserverpb.AlarmType_NOSPACE: - h.Reason = "ALARM NOSPACE" - case etcdserverpb.AlarmType_CORRUPT: - h.Reason = "ALARM CORRUPT" - default: - h.Reason = "ALARM UNKNOWN" - } - lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String())) - return h - } - } - - return h -} - -func checkLeader(lg *zap.Logger, srv ServerHealth, serializable bool) Health { - h := Health{Health: "true"} - if !serializable && (uint64(srv.Leader()) == raft.None) { - h.Health = "false" - h.Reason = "RAFT NO LEADER" - lg.Warn("serving /health false; no leader") - } - return h -} - -func checkAPI(lg *zap.Logger, srv ServerHealth, serializable bool) Health { - h := Health{Health: "true"} - cfg := srv.Config() - ctx, cancel := context.WithTimeout(context.Background(), cfg.ReqTimeout()) - _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable}) - cancel() - if err != nil && err != auth.ErrUserEmpty && err != auth.ErrPermissionDenied { - h.Health = "false" - h.Reason = fmt.Sprintf("RANGE ERROR:%s", err) - lg.Warn("serving /health false; Range fails", zap.Error(err)) - return h - } - lg.Debug("serving /health true") - return h -} diff --git a/server/etcdserver/api/etcdhttp/health_test.go b/server/etcdserver/api/etcdhttp/health_test.go deleted file mode 100644 index 29172b5ee5c..00000000000 --- a/server/etcdserver/api/etcdhttp/health_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/raft/v3" -) - -type fakeStats struct{} - -func (s *fakeStats) SelfStats() []byte { return nil } -func (s *fakeStats) LeaderStats() []byte { return nil } -func (s *fakeStats) StoreStats() []byte { return nil } - -type fakeHealthServer struct { - fakeServer - health string - apiError error -} - -func (s *fakeHealthServer) Range(ctx context.Context, request *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, s.apiError -} - -func (s *fakeHealthServer) Config() config.ServerConfig { - return config.ServerConfig{} -} - -func (s *fakeHealthServer) Leader() types.ID { - if s.health == "true" { - return 1 - } - return types.ID(raft.None) -} -func (s *fakeHealthServer) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) { - if s.health == "true" { - return etcdserver.Response{}, nil - } - return etcdserver.Response{}, fmt.Errorf("fail health check") -} -func (s *fakeHealthServer) ClientCertAuthEnabled() bool { return false } - -func TestHealthHandler(t *testing.T) { - // define the input and expected output - // input: alarms, and healthCheckURL - tests := []struct { - name string - alarms []*pb.AlarmMember - healthCheckURL string - apiError error - - expectStatusCode int - expectHealth string - }{ - { - name: "Healthy if no alarm", - alarms: []*pb.AlarmMember{}, - healthCheckURL: "/health", - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Unhealthy if NOSPACE alarm is on", - alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}}, - healthCheckURL: "/health", - expectStatusCode: http.StatusServiceUnavailable, - expectHealth: "false", - }, - { - name: "Healthy if NOSPACE alarm is on and excluded", - alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}}, - healthCheckURL: "/health?exclude=NOSPACE", - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Healthy if NOSPACE alarm is excluded", - alarms: []*pb.AlarmMember{}, - healthCheckURL: "/health?exclude=NOSPACE", - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Healthy if multiple NOSPACE alarms are on and excluded", - alarms: []*pb.AlarmMember{{MemberID: uint64(1), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(2), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(3), Alarm: pb.AlarmType_NOSPACE}}, - healthCheckURL: "/health?exclude=NOSPACE", - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Unhealthy if NOSPACE alarms is excluded and CORRUPT is on", - alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}}, - healthCheckURL: "/health?exclude=NOSPACE", - expectStatusCode: http.StatusServiceUnavailable, - expectHealth: "false", - }, - { - name: "Unhealthy if both NOSPACE and CORRUPT are on and excluded", - alarms: []*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}}, - healthCheckURL: "/health?exclude=NOSPACE&exclude=CORRUPT", - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Healthy even if authentication failed", - healthCheckURL: "/health", - apiError: auth.ErrUserEmpty, - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Healthy even if authorization failed", - healthCheckURL: "/health", - apiError: auth.ErrPermissionDenied, - expectStatusCode: http.StatusOK, - expectHealth: "true", - }, - { - name: "Unhealthy if api is not available", - healthCheckURL: "/health", - apiError: fmt.Errorf("Unexpected error"), - expectStatusCode: http.StatusServiceUnavailable, - expectHealth: "false", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mux := http.NewServeMux() - HandleHealth(zaptest.NewLogger(t), mux, &fakeHealthServer{ - fakeServer: fakeServer{alarms: tt.alarms}, - health: tt.expectHealth, - apiError: tt.apiError, - }) - ts := httptest.NewServer(mux) - defer ts.Close() - - res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+tt.healthCheckURL)}) - if err != nil { - t.Errorf("fail serve http request %s %v", tt.healthCheckURL, err) - } - if res == nil { - t.Errorf("got nil http response with http request %s", tt.healthCheckURL) - return - } - if res.StatusCode != tt.expectStatusCode { - t.Errorf("want statusCode %d but got %d", tt.expectStatusCode, res.StatusCode) - } - health, err := parseHealthOutput(res.Body) - if err != nil { - t.Errorf("fail parse health check output %v", err) - } - if health.Health != tt.expectHealth { - t.Errorf("want health %s but got %s", tt.expectHealth, health.Health) - } - }) - } -} - -func parseHealthOutput(body io.Reader) (Health, error) { - obj := Health{} - d, derr := io.ReadAll(body) - if derr != nil { - return obj, derr - } - if err := json.Unmarshal(d, &obj); err != nil { - return obj, err - } - return obj, nil -} diff --git a/server/etcdserver/api/etcdhttp/metrics.go b/server/etcdserver/api/etcdhttp/metrics.go deleted file mode 100644 index bf7d4a4a445..00000000000 --- a/server/etcdserver/api/etcdhttp/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -const ( - PathMetrics = "/metrics" - PathProxyMetrics = "/proxy/metrics" -) - -// HandleMetrics registers prometheus handler on '/metrics'. -func HandleMetrics(mux *http.ServeMux) { - mux.Handle(PathMetrics, promhttp.Handler()) -} diff --git a/server/etcdserver/api/etcdhttp/peer.go b/server/etcdserver/api/etcdhttp/peer.go deleted file mode 100644 index a205eca65c4..00000000000 --- a/server/etcdserver/api/etcdhttp/peer.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/lease/leasehttp" - - "go.uber.org/zap" -) - -const ( - peerMembersPath = "/members" - peerMemberPromotePrefix = "/members/promote/" -) - -// NewPeerHandler generates an http.Handler to handle etcd peer requests. -func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler { - return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler(), s.DowngradeEnabledHandler()) -} - -func newPeerHandler( - lg *zap.Logger, - s etcdserver.Server, - raftHandler http.Handler, - leaseHandler http.Handler, - hashKVHandler http.Handler, - downgradeEnabledHandler http.Handler, -) http.Handler { - if lg == nil { - lg = zap.NewNop() - } - peerMembersHandler := newPeerMembersHandler(lg, s.Cluster()) - peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s) - - mux := http.NewServeMux() - mux.HandleFunc("/", http.NotFound) - mux.Handle(rafthttp.RaftPrefix, raftHandler) - mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) - mux.Handle(peerMembersPath, peerMembersHandler) - mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler) - if leaseHandler != nil { - mux.Handle(leasehttp.LeasePrefix, leaseHandler) - mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) - } - if downgradeEnabledHandler != nil { - mux.Handle(etcdserver.DowngradeEnabledPath, downgradeEnabledHandler) - } - if hashKVHandler != nil { - mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler) - } - mux.HandleFunc(versionPath, versionHandler(s, serveVersion)) - return mux -} - -func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler { - return &peerMembersHandler{ - lg: lg, - cluster: cluster, - } -} - -type peerMembersHandler struct { - lg *zap.Logger - cluster api.Cluster -} - -func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler { - return &peerMemberPromoteHandler{ - lg: lg, - cluster: s.Cluster(), - server: s, - } -} - -type peerMemberPromoteHandler struct { - lg *zap.Logger - cluster api.Cluster - server etcdserver.Server -} - -func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - if r.URL.Path != peerMembersPath { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - ms := h.cluster.Members() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(ms); err != nil { - h.lg.Warn("failed to encode membership members", zap.Error(err)) - } -} - -func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "POST") { - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix) - id, err := strconv.ParseUint(idStr, 10, 64) - if err != nil { - http.Error(w, fmt.Sprintf("member %s not found in cluster", idStr), http.StatusNotFound) - return - } - - resp, err := h.server.PromoteMember(r.Context(), id) - if err != nil { - switch err { - case membership.ErrIDNotFound: - http.Error(w, err.Error(), http.StatusNotFound) - case membership.ErrMemberNotLearner: - http.Error(w, err.Error(), http.StatusPreconditionFailed) - case errors.ErrLearnerNotReady: - http.Error(w, err.Error(), http.StatusPreconditionFailed) - default: - writeError(h.lg, w, r, err) - } - h.lg.Warn( - "failed to promote a member", - zap.String("member-id", types.ID(id).String()), - zap.Error(err), - ) - return - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - if err := json.NewEncoder(w).Encode(resp); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } -} diff --git a/server/etcdserver/api/etcdhttp/peer_test.go b/server/etcdserver/api/etcdhttp/peer_test.go deleted file mode 100644 index 1a2380a003d..00000000000 --- a/server/etcdserver/api/etcdhttp/peer_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "path" - "sort" - "strings" - "testing" - - "go.uber.org/zap/zaptest" - - "github.com/coreos/go-semver/semver" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" -) - -type fakeCluster struct { - id uint64 - clientURLs []string - members map[uint64]*membership.Member -} - -func (c *fakeCluster) ID() types.ID { return types.ID(c.id) } -func (c *fakeCluster) ClientURLs() []string { return c.clientURLs } -func (c *fakeCluster) Members() []*membership.Member { - ms := make(membership.MembersByID, 0, len(c.members)) - for _, m := range c.members { - ms = append(ms, m) - } - sort.Sort(ms) - return ms -} -func (c *fakeCluster) Member(id types.ID) *membership.Member { return c.members[uint64(id)] } -func (c *fakeCluster) Version() *semver.Version { return nil } - -type fakeServer struct { - cluster api.Cluster - alarms []*pb.AlarmMember -} - -func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - return nil, fmt.Errorf("AddMember not implemented in fakeServer") -} -func (s *fakeServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - return nil, fmt.Errorf("RemoveMember not implemented in fakeServer") -} -func (s *fakeServer) UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) { - return nil, fmt.Errorf("UpdateMember not implemented in fakeServer") -} -func (s *fakeServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - return nil, fmt.Errorf("PromoteMember not implemented in fakeServer") -} -func (s *fakeServer) ClusterVersion() *semver.Version { return nil } -func (s *fakeServer) StorageVersion() *semver.Version { return nil } -func (s *fakeServer) Cluster() api.Cluster { return s.cluster } -func (s *fakeServer) Alarms() []*pb.AlarmMember { return s.alarms } -func (s *fakeServer) LeaderChangedNotify() <-chan struct{} { return nil } - -var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("test data")) -}) - -// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that -// handles raft-prefix requests well. -func TestNewPeerHandlerOnRaftPrefix(t *testing.T) { - ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil) - srv := httptest.NewServer(ph) - defer srv.Close() - - tests := []string{ - rafthttp.RaftPrefix, - rafthttp.RaftPrefix + "/hello", - } - for i, tt := range tests { - resp, err := http.Get(srv.URL + tt) - if err != nil { - t.Fatalf("unexpected http.Get error: %v", err) - } - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected io.ReadAll error: %v", err) - } - if w := "test data"; string(body) != w { - t.Errorf("#%d: body = %s, want %s", i, body, w) - } - } -} - -// TestServeMembersFails ensures peerMembersHandler only accepts GET request -func TestServeMembersFails(t *testing.T) { - tests := []struct { - method string - wcode int - }{ - { - "POST", - http.StatusMethodNotAllowed, - }, - { - "PUT", - http.StatusMethodNotAllowed, - }, - { - "DELETE", - http.StatusMethodNotAllowed, - }, - { - "BAD", - http.StatusMethodNotAllowed, - }, - } - for i, tt := range tests { - rw := httptest.NewRecorder() - h := newPeerMembersHandler(nil, &fakeCluster{}) - req, err := http.NewRequest(tt.method, "", nil) - if err != nil { - t.Fatalf("#%d: failed to create http request: %v", i, err) - } - h.ServeHTTP(rw, req) - if rw.Code != tt.wcode { - t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode) - } - } -} - -func TestServeMembersGet(t *testing.T) { - memb1 := membership.Member{ID: 1, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8080"}}} - memb2 := membership.Member{ID: 2, Attributes: membership.Attributes{ClientURLs: []string{"http://localhost:8081"}}} - cluster := &fakeCluster{ - id: 1, - members: map[uint64]*membership.Member{1: &memb1, 2: &memb2}, - } - h := newPeerMembersHandler(nil, cluster) - msb, err := json.Marshal([]membership.Member{memb1, memb2}) - if err != nil { - t.Fatal(err) - } - wms := string(msb) + "\n" - - tests := []struct { - path string - wcode int - wct string - wbody string - }{ - {peerMembersPath, http.StatusOK, "application/json", wms}, - {path.Join(peerMembersPath, "bad"), http.StatusBadRequest, "text/plain; charset=utf-8", "bad path\n"}, - } - - for i, tt := range tests { - req, err := http.NewRequest("GET", testutil.MustNewURL(t, tt.path).String(), nil) - if err != nil { - t.Fatal(err) - } - rw := httptest.NewRecorder() - h.ServeHTTP(rw, req) - - if rw.Code != tt.wcode { - t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode) - } - if gct := rw.Header().Get("Content-Type"); gct != tt.wct { - t.Errorf("#%d: content-type = %s, want %s", i, gct, tt.wct) - } - if rw.Body.String() != tt.wbody { - t.Errorf("#%d: body = %s, want %s", i, rw.Body.String(), tt.wbody) - } - gcid := rw.Header().Get("X-Etcd-Cluster-ID") - wcid := cluster.ID().String() - if gcid != wcid { - t.Errorf("#%d: cid = %s, want %s", i, gcid, wcid) - } - } -} - -// TestServeMemberPromoteFails ensures peerMemberPromoteHandler only accepts POST request -func TestServeMemberPromoteFails(t *testing.T) { - tests := []struct { - method string - wcode int - }{ - { - "GET", - http.StatusMethodNotAllowed, - }, - { - "PUT", - http.StatusMethodNotAllowed, - }, - { - "DELETE", - http.StatusMethodNotAllowed, - }, - { - "BAD", - http.StatusMethodNotAllowed, - }, - } - for i, tt := range tests { - rw := httptest.NewRecorder() - h := newPeerMemberPromoteHandler(nil, &fakeServer{cluster: &fakeCluster{}}) - req, err := http.NewRequest(tt.method, "", nil) - if err != nil { - t.Fatalf("#%d: failed to create http request: %v", i, err) - } - h.ServeHTTP(rw, req) - if rw.Code != tt.wcode { - t.Errorf("#%d: code=%d, want %d", i, rw.Code, tt.wcode) - } - } -} - -// TestNewPeerHandlerOnMembersPromotePrefix verifies the request with members promote prefix is routed correctly -func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) { - ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil) - srv := httptest.NewServer(ph) - defer srv.Close() - - tests := []struct { - path string - wcode int - checkBody bool - wKeyWords string - }{ - { - // does not contain member id in path - peerMemberPromotePrefix, - http.StatusNotFound, - false, - "", - }, - { - // try to promote member id = 1 - peerMemberPromotePrefix + "1", - http.StatusInternalServerError, - true, - "PromoteMember not implemented in fakeServer", - }, - } - for i, tt := range tests { - req, err := http.NewRequest("POST", srv.URL+tt.path, nil) - if err != nil { - t.Fatalf("failed to create request: %v", err) - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("failed to get http response: %v", err) - } - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("unexpected io.ReadAll error: %v", err) - } - if resp.StatusCode != tt.wcode { - t.Fatalf("#%d: code = %d, want %d", i, resp.StatusCode, tt.wcode) - } - if tt.checkBody && strings.Contains(string(body), tt.wKeyWords) { - t.Errorf("#%d: body: %s, want body to contain keywords: %s", i, string(body), tt.wKeyWords) - } - } -} diff --git a/server/etcdserver/api/etcdhttp/utils.go b/server/etcdserver/api/etcdhttp/utils.go deleted file mode 100644 index 268400d7640..00000000000 --- a/server/etcdserver/api/etcdhttp/utils.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "net/http" - - "go.uber.org/zap" - - httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/errors" -) - -func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { - if m == r.Method { - return true - } - w.Header().Set("Allow", m) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError -func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *v2error.Error: - e.WriteTo(w) - - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", e.Error()), - zap.Error(et), - ) - } - } - - default: - switch err { - case errors.ErrTimeoutDueToLeaderFail, errors.ErrTimeoutDueToConnectionLost, errors.ErrNotEnoughStartedMembers, - errors.ErrUnhealthy: - if lg != nil { - lg.Warn( - "v2 response error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - ) - } - - default: - if lg != nil { - lg.Warn( - "unexpected v2 response error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - ) - } - } - - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("internal-server-error", err.Error()), - zap.Error(et), - ) - } - } - } -} diff --git a/server/etcdserver/api/etcdhttp/version.go b/server/etcdserver/api/etcdhttp/version.go deleted file mode 100644 index 8090703a0ed..00000000000 --- a/server/etcdserver/api/etcdhttp/version.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "fmt" - "net/http" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/etcdserver" -) - -const ( - versionPath = "/version" -) - -func HandleVersion(mux *http.ServeMux, server etcdserver.Server) { - mux.HandleFunc(versionPath, versionHandler(server, serveVersion)) -} - -func versionHandler(server etcdserver.Server, fn func(http.ResponseWriter, *http.Request, string, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - clusterVersion := server.ClusterVersion() - storageVersion := server.StorageVersion() - clusterVersionStr, storageVersionStr := "not_decided", "unknown" - if clusterVersion != nil { - clusterVersionStr = clusterVersion.String() - } - if storageVersion != nil { - storageVersionStr = storageVersion.String() - } - fn(w, r, clusterVersionStr, storageVersionStr) - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV, storageV string) { - if !allowMethod(w, r, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - Storage: storageV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - panic(fmt.Sprintf("cannot marshal versions to json (%v)", err)) - } - w.Write(b) -} diff --git a/server/etcdserver/api/etcdhttp/version_test.go b/server/etcdserver/api/etcdhttp/version_test.go deleted file mode 100644 index 25e0c4f3c26..00000000000 --- a/server/etcdserver/api/etcdhttp/version_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "go.etcd.io/etcd/api/v3/version" -) - -func TestServeVersion(t *testing.T) { - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatalf("error creating request: %v", err) - } - rw := httptest.NewRecorder() - serveVersion(rw, req, "3.6.0", "3.5.2") - if rw.Code != http.StatusOK { - t.Errorf("code=%d, want %d", rw.Code, http.StatusOK) - } - vs := version.Versions{ - Server: version.Version, - Cluster: "3.6.0", - Storage: "3.5.2", - } - w, err := json.Marshal(&vs) - if err != nil { - t.Fatal(err) - } - if g := rw.Body.String(); g != string(w) { - t.Fatalf("body = %q, want %q", g, string(w)) - } - if ct := rw.HeaderMap.Get("Content-Type"); ct != "application/json" { - t.Errorf("contet-type header = %s, want %s", ct, "application/json") - } -} - -func TestServeVersionFails(t *testing.T) { - for _, m := range []string{ - "CONNECT", "TRACE", "PUT", "POST", "HEAD", - } { - t.Run(m, func(t *testing.T) { - req, err := http.NewRequest(m, "", nil) - if err != nil { - t.Fatalf("error creating request: %v", err) - } - rw := httptest.NewRecorder() - serveVersion(rw, req, "3.6.0", "3.5.2") - if rw.Code != http.StatusMethodNotAllowed { - t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed) - } - }) - } -} diff --git a/server/etcdserver/api/membership/cluster.go b/server/etcdserver/api/membership/cluster.go deleted file mode 100644 index 31fb088f8db..00000000000 --- a/server/etcdserver/api/membership/cluster.go +++ /dev/null @@ -1,857 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "bytes" - "context" - "crypto/sha1" - "encoding/binary" - "encoding/json" - "fmt" - "sort" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/pkg/v3/notify" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "github.com/coreos/go-semver/semver" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// RaftCluster is a list of Members that belong to the same raft cluster -type RaftCluster struct { - lg *zap.Logger - - localID types.ID - cid types.ID - - v2store v2store.Store - be MembershipBackend - - sync.Mutex // guards the fields below - version *semver.Version - members map[types.ID]*Member - // removed contains the ids of removed members in the cluster. - // removed id cannot be reused. - removed map[types.ID]bool - - downgradeInfo *serverversion.DowngradeInfo - maxLearners int - versionChanged *notify.Notifier -} - -// ConfigChangeContext represents a context for confChange. -type ConfigChangeContext struct { - Member - // IsPromote indicates if the config change is for promoting a learner member. - // This flag is needed because both adding a new member and promoting a learner member - // uses the same config change type 'ConfChangeAddNode'. - IsPromote bool `json:"isPromote"` -} - -type ShouldApplyV3 bool - -const ( - ApplyBoth = ShouldApplyV3(true) - ApplyV2storeOnly = ShouldApplyV3(false) -) - -// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating -// cluster with raft learner member. -func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap, opts ...ClusterOption) (*RaftCluster, error) { - c := NewCluster(lg, opts...) - for name, urls := range urlsmap { - m := NewMember(name, urls, token, nil) - if _, ok := c.members[m.ID]; ok { - return nil, fmt.Errorf("member exists with identical ID %v", m) - } - if uint64(m.ID) == raft.None { - return nil, fmt.Errorf("cannot use %x as member id", raft.None) - } - c.members[m.ID] = m - } - c.genID() - return c, nil -} - -func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member, opts ...ClusterOption) *RaftCluster { - c := NewCluster(lg, opts...) - c.cid = id - for _, m := range membs { - c.members[m.ID] = m - } - return c -} - -func NewCluster(lg *zap.Logger, opts ...ClusterOption) *RaftCluster { - if lg == nil { - lg = zap.NewNop() - } - clOpts := newClusterOpts(opts...) - - return &RaftCluster{ - lg: lg, - members: make(map[types.ID]*Member), - removed: make(map[types.ID]bool), - downgradeInfo: &serverversion.DowngradeInfo{Enabled: false}, - maxLearners: clOpts.maxLearners, - } -} - -func (c *RaftCluster) ID() types.ID { return c.cid } - -func (c *RaftCluster) Members() []*Member { - c.Lock() - defer c.Unlock() - var ms MembersByID - for _, m := range c.members { - ms = append(ms, m.Clone()) - } - sort.Sort(ms) - return ms -} - -func (c *RaftCluster) Member(id types.ID) *Member { - c.Lock() - defer c.Unlock() - return c.members[id].Clone() -} - -func (c *RaftCluster) VotingMembers() []*Member { - c.Lock() - defer c.Unlock() - var ms MembersByID - for _, m := range c.members { - if !m.IsLearner { - ms = append(ms, m.Clone()) - } - } - sort.Sort(ms) - return ms -} - -// MemberByName returns a Member with the given name if exists. -// If more than one member has the given name, it will panic. -func (c *RaftCluster) MemberByName(name string) *Member { - c.Lock() - defer c.Unlock() - var memb *Member - for _, m := range c.members { - if m.Name == name { - if memb != nil { - c.lg.Panic("two member with same name found", zap.String("name", name)) - } - memb = m - } - } - return memb.Clone() -} - -func (c *RaftCluster) MemberIDs() []types.ID { - c.Lock() - defer c.Unlock() - var ids []types.ID - for _, m := range c.members { - ids = append(ids, m.ID) - } - sort.Sort(types.IDSlice(ids)) - return ids -} - -func (c *RaftCluster) IsIDRemoved(id types.ID) bool { - c.Lock() - defer c.Unlock() - return c.removed[id] -} - -// PeerURLs returns a list of all peer addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) PeerURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.PeerURLs...) - } - sort.Strings(urls) - return urls -} - -// ClientURLs returns a list of all client addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) ClientURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.ClientURLs...) - } - sort.Strings(urls) - return urls -} - -func (c *RaftCluster) String() string { - c.Lock() - defer c.Unlock() - b := &bytes.Buffer{} - fmt.Fprintf(b, "{ClusterID:%s ", c.cid) - var ms []string - for _, m := range c.members { - ms = append(ms, fmt.Sprintf("%+v", m)) - } - fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) - var ids []string - for id := range c.removed { - ids = append(ids, id.String()) - } - fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) - return b.String() -} - -func (c *RaftCluster) genID() { - mIDs := c.MemberIDs() - b := make([]byte, 8*len(mIDs)) - for i, id := range mIDs { - binary.BigEndian.PutUint64(b[8*i:], uint64(id)) - } - hash := sha1.Sum(b) - c.cid = types.ID(binary.BigEndian.Uint64(hash[:8])) -} - -func (c *RaftCluster) SetID(localID, cid types.ID) { - c.localID = localID - c.cid = cid - c.buildMembershipMetric() -} - -func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st } - -func (c *RaftCluster) SetBackend(be MembershipBackend) { - c.be = be - c.be.MustCreateBackendBuckets() -} - -func (c *RaftCluster) SetVersionChangedNotifier(n *notify.Notifier) { - c.versionChanged = n -} - -func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { - c.Lock() - defer c.Unlock() - - if c.be != nil { - c.version = c.be.ClusterVersionFromBackend() - c.members, c.removed = c.be.MustReadMembersFromBackend() - } else { - c.version = clusterVersionFromStore(c.lg, c.v2store) - c.members, c.removed = membersFromStore(c.lg, c.v2store) - } - c.buildMembershipMetric() - - if c.be != nil { - c.downgradeInfo = c.be.DowngradeInfoFromBackend() - } - sv := semver.Must(semver.NewVersion(version.Version)) - if c.downgradeInfo != nil && c.downgradeInfo.Enabled { - c.lg.Info( - "cluster is downgrading to target version", - zap.String("target-cluster-version", c.downgradeInfo.TargetVersion), - zap.String("current-server-version", sv.String()), - ) - } - serverversion.MustDetectDowngrade(c.lg, sv, c.version) - onSet(c.lg, c.version) - - for _, m := range c.members { - c.lg.Info( - "recovered/added member from store", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("recovered-remote-peer-id", m.ID.String()), - zap.Strings("recovered-remote-peer-urls", m.PeerURLs), - zap.Bool("recovered-remote-peer-is-learner", m.IsLearner), - ) - } - if c.version != nil { - c.lg.Info( - "set cluster version from store", - zap.String("cluster-version", version.Cluster(c.version.String())), - ) - } -} - -// ValidateConfigurationChange takes a proposed ConfChange and -// ensures that it is still valid. -func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { - // TODO: this must be switched to backend as well. - membersMap, removedMap := membersFromStore(c.lg, c.v2store) - id := types.ID(cc.NodeID) - if removedMap[id] { - return ErrIDRemoved - } - switch cc.Type { - case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: - confChangeContext := new(ConfigChangeContext) - if err := json.Unmarshal(cc.Context, confChangeContext); err != nil { - c.lg.Panic("failed to unmarshal confChangeContext", zap.Error(err)) - } - - if confChangeContext.IsPromote { // promoting a learner member to voting member - if membersMap[id] == nil { - return ErrIDNotFound - } - if !membersMap[id].IsLearner { - return ErrMemberNotLearner - } - } else { // adding a new member - if membersMap[id] != nil { - return ErrIDExists - } - - var members []*Member - urls := make(map[string]bool) - for _, m := range membersMap { - members = append(members, m) - for _, u := range m.PeerURLs { - urls[u] = true - } - } - for _, u := range confChangeContext.Member.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - - if confChangeContext.Member.RaftAttributes.IsLearner && cc.Type == raftpb.ConfChangeAddLearnerNode { // the new member is a learner - scaleUpLearners := true - if err := ValidateMaxLearnerConfig(c.maxLearners, members, scaleUpLearners); err != nil { - return err - } - } - } - case raftpb.ConfChangeRemoveNode: - if membersMap[id] == nil { - return ErrIDNotFound - } - - case raftpb.ConfChangeUpdateNode: - if membersMap[id] == nil { - return ErrIDNotFound - } - urls := make(map[string]bool) - for _, m := range membersMap { - if m.ID == id { - continue - } - for _, u := range m.PeerURLs { - urls[u] = true - } - } - m := new(Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - c.lg.Panic("failed to unmarshal member", zap.Error(err)) - } - for _, u := range m.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - - default: - c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String())) - } - return nil -} - -// AddMember adds a new Member into the cluster, and saves the given member's -// raftAttributes into the store. The given member should have empty attributes. -// A Member with a matching id must not exist. -func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - if c.v2store != nil { - mustSaveMemberToStore(c.lg, c.v2store, m) - } - if c.be != nil && shouldApplyV3 { - c.be.MustSaveMemberToBackend(m) - } - - c.members[m.ID] = m - c.updateMembershipMetric(m.ID, true) - - c.lg.Info( - "added member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("added-peer-id", m.ID.String()), - zap.Strings("added-peer-peer-urls", m.PeerURLs), - zap.Bool("added-peer-is-learner", m.IsLearner), - ) -} - -// RemoveMember removes a member from the store. -// The given id MUST exist, or the function panics. -func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - if c.v2store != nil { - mustDeleteMemberFromStore(c.lg, c.v2store, id) - } - if c.be != nil && shouldApplyV3 { - c.be.MustDeleteMemberFromBackend(id) - } - - m, ok := c.members[id] - delete(c.members, id) - c.removed[id] = true - c.updateMembershipMetric(id, false) - - if ok { - c.lg.Info( - "removed member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("removed-remote-peer-id", id.String()), - zap.Strings("removed-remote-peer-urls", m.PeerURLs), - zap.Bool("removed-remote-peer-is-learner", m.IsLearner), - ) - } else { - c.lg.Warn( - "skipped removing already removed member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("removed-remote-peer-id", id.String()), - ) - } -} - -func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - - if m, ok := c.members[id]; ok { - m.Attributes = attr - if c.v2store != nil { - mustUpdateMemberAttrInStore(c.lg, c.v2store, m) - } - if c.be != nil && shouldApplyV3 { - c.be.MustSaveMemberToBackend(m) - } - return - } - - _, ok := c.removed[id] - if !ok { - c.lg.Panic( - "failed to update; member unknown", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("unknown-remote-peer-id", id.String()), - ) - } - - c.lg.Warn( - "skipped attributes update of removed member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("updated-peer-id", id.String()), - ) -} - -// PromoteMember marks the member's IsLearner RaftAttributes to false. -func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - - c.members[id].RaftAttributes.IsLearner = false - c.updateMembershipMetric(id, true) - if c.v2store != nil { - mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) - } - if c.be != nil && shouldApplyV3 { - c.be.MustSaveMemberToBackend(c.members[id]) - } - - c.lg.Info( - "promote member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) -} - -func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - - c.members[id].RaftAttributes = raftAttr - if c.v2store != nil { - mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) - } - if c.be != nil && shouldApplyV3 { - c.be.MustSaveMemberToBackend(c.members[id]) - } - - c.lg.Info( - "updated member", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("updated-remote-peer-id", id.String()), - zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), - zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner), - ) -} - -func (c *RaftCluster) Version() *semver.Version { - c.Lock() - defer c.Unlock() - if c.version == nil { - return nil - } - return semver.Must(semver.NewVersion(c.version.String())) -} - -func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version), shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - if c.version != nil { - c.lg.Info( - "updated cluster version", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("from", version.Cluster(c.version.String())), - zap.String("to", version.Cluster(ver.String())), - ) - } else { - c.lg.Info( - "set initial cluster version", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - zap.String("cluster-version", version.Cluster(ver.String())), - ) - } - oldVer := c.version - c.version = ver - sv := semver.Must(semver.NewVersion(version.Version)) - serverversion.MustDetectDowngrade(c.lg, sv, c.version) - if c.v2store != nil { - mustSaveClusterVersionToStore(c.lg, c.v2store, ver) - } - if c.be != nil && shouldApplyV3 { - c.be.MustSaveClusterVersionToBackend(ver) - } - if oldVer != nil { - ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0) - } - ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1) - if c.versionChanged != nil { - c.versionChanged.Notify() - } - onSet(c.lg, ver) -} - -func (c *RaftCluster) IsReadyToAddVotingMember() bool { - nmembers := 1 - nstarted := 0 - - for _, member := range c.VotingMembers() { - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - if nstarted == 1 && nmembers == 2 { - // a case of adding a new node to 1-member cluster for restoring cluster data - // https://github.com/etcd-io/website/blob/main/content/docs/v2/admin_guide.md#restoring-the-cluster - c.lg.Debug("number of started member is 1; can accept add member request") - return true - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - c.lg.Warn( - "rejecting member add; started member will be less than quorum", - zap.Int("number-of-started-member", nstarted), - zap.Int("quorum", nquorum), - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) - return false - } - - return true -} - -func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool { - nmembers := 0 - nstarted := 0 - - for _, member := range c.VotingMembers() { - if uint64(member.ID) == id { - continue - } - - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - c.lg.Warn( - "rejecting member remove; started member will be less than quorum", - zap.Int("number-of-started-member", nstarted), - zap.Int("quorum", nquorum), - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) - return false - } - - return true -} - -func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool { - nmembers := 1 // We count the learner to be promoted for the future quorum - nstarted := 1 // and we also count it as started. - - for _, member := range c.VotingMembers() { - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - c.lg.Warn( - "rejecting member promote; started member will be less than quorum", - zap.Int("number-of-started-member", nstarted), - zap.Int("quorum", nquorum), - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) - return false - } - - return true -} - -func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) { - members := make(map[types.ID]*Member) - removed := make(map[types.ID]bool) - e, err := st.Get(StoreMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - lg.Panic("failed to get members from store", zap.String("path", StoreMembersPrefix), zap.Error(err)) - } - for _, n := range e.Node.Nodes { - var m *Member - m, err = nodeToMember(lg, n) - if err != nil { - lg.Panic("failed to nodeToMember", zap.Error(err)) - } - members[m.ID] = m - } - - e, err = st.Get(storeRemovedMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - lg.Panic( - "failed to get removed members from store", - zap.String("path", storeRemovedMembersPrefix), - zap.Error(err), - ) - } - for _, n := range e.Node.Nodes { - removed[MustParseMemberIDFromKey(lg, n.Key)] = true - } - return members, removed -} - -// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs -// with the existing cluster. If the validation succeeds, it assigns the IDs -// from the existing cluster to the local cluster. -// If the validation fails, an error will be returned. -func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error { - ems := existing.Members() - lms := local.Members() - if len(ems) != len(lms) { - return fmt.Errorf("member count is unequal") - } - - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - for i := range ems { - var err error - ok := false - for j := range lms { - if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok { - lms[j].ID = ems[i].ID - break - } - } - if !ok { - return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err) - } - } - local.members = make(map[types.ID]*Member) - for _, m := range lms { - local.members[m.ID] = m - } - local.buildMembershipMetric() - return nil -} - -// IsLocalMemberLearner returns if the local member is raft learner -func (c *RaftCluster) IsLocalMemberLearner() bool { - c.Lock() - defer c.Unlock() - localMember, ok := c.members[c.localID] - if !ok { - c.lg.Panic( - "failed to find local ID in cluster members", - zap.String("cluster-id", c.cid.String()), - zap.String("local-member-id", c.localID.String()), - ) - } - return localMember.IsLearner -} - -// DowngradeInfo returns the downgrade status of the cluster -func (c *RaftCluster) DowngradeInfo() *serverversion.DowngradeInfo { - c.Lock() - defer c.Unlock() - if c.downgradeInfo == nil { - return &serverversion.DowngradeInfo{Enabled: false} - } - d := &serverversion.DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} - return d -} - -func (c *RaftCluster) SetDowngradeInfo(d *serverversion.DowngradeInfo, shouldApplyV3 ShouldApplyV3) { - c.Lock() - defer c.Unlock() - - if c.be != nil && shouldApplyV3 { - c.be.MustSaveDowngradeToBackend(d) - } - - c.downgradeInfo = d -} - -// IsMemberExist returns if the member with the given id exists in cluster. -func (c *RaftCluster) IsMemberExist(id types.ID) bool { - c.Lock() - defer c.Unlock() - _, ok := c.members[id] - return ok -} - -// VotingMemberIDs returns the ID of voting members in cluster. -func (c *RaftCluster) VotingMemberIDs() []types.ID { - c.Lock() - defer c.Unlock() - var ids []types.ID - for _, m := range c.members { - if !m.IsLearner { - ids = append(ids, m.ID) - } - } - sort.Sort(types.IDSlice(ids)) - return ids -} - -// PushMembershipToStorage is overriding storage information about cluster's -// members, such that they fully reflect internal RaftCluster's storage. -func (c *RaftCluster) PushMembershipToStorage() { - if c.be != nil { - c.be.TrimMembershipFromBackend() - for _, m := range c.members { - c.be.MustSaveMemberToBackend(m) - } - } - if c.v2store != nil { - TrimMembershipFromV2Store(c.lg, c.v2store) - for _, m := range c.members { - mustSaveMemberToStore(c.lg, c.v2store, m) - } - } -} - -// buildMembershipMetric sets the knownPeers metric based on the current -// members of the cluster. -func (c *RaftCluster) buildMembershipMetric() { - if c.localID == 0 { - // We don't know our own id yet. - return - } - for p := range c.members { - knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(1) - } - for p := range c.removed { - knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(0) - } -} - -// updateMembershipMetric updates the knownPeers metric to indicate that -// the given peer is now (un)known. -func (c *RaftCluster) updateMembershipMetric(peer types.ID, known bool) { - if c.localID == 0 { - // We don't know our own id yet. - return - } - v := float64(0) - if known { - v = 1 - } - knownPeers.WithLabelValues(c.localID.String(), peer.String()).Set(v) -} - -// ValidateMaxLearnerConfig verifies the existing learner members in the cluster membership and an optional N+1 learner -// scale up are not more than maxLearners. -func ValidateMaxLearnerConfig(maxLearners int, members []*Member, scaleUpLearners bool) error { - numLearners := 0 - for _, m := range members { - if m.IsLearner { - numLearners++ - } - } - // Validate config can accommodate scale up. - if scaleUpLearners { - numLearners++ - } - - if numLearners > maxLearners { - return ErrTooManyLearners - } - - return nil -} diff --git a/server/etcdserver/api/membership/cluster_opts.go b/server/etcdserver/api/membership/cluster_opts.go deleted file mode 100644 index 204fbf04d2c..00000000000 --- a/server/etcdserver/api/membership/cluster_opts.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -const DefaultMaxLearners = 1 - -type ClusterOptions struct { - maxLearners int -} - -// ClusterOption are options which can be applied to the raft cluster. -type ClusterOption func(*ClusterOptions) - -func newClusterOpts(opts ...ClusterOption) *ClusterOptions { - clOpts := &ClusterOptions{} - clOpts.applyOpts(opts) - return clOpts -} - -func (co *ClusterOptions) applyOpts(opts []ClusterOption) { - for _, opt := range opts { - opt(co) - } -} - -// WithMaxLearners sets the maximum number of learners that can exist in the cluster membership. -func WithMaxLearners(max int) ClusterOption { - return func(co *ClusterOptions) { - co.maxLearners = max - } -} diff --git a/server/etcdserver/api/membership/cluster_test.go b/server/etcdserver/api/membership/cluster_test.go deleted file mode 100644 index ce98472df7b..00000000000 --- a/server/etcdserver/api/membership/cluster_test.go +++ /dev/null @@ -1,977 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "encoding/json" - "fmt" - "path" - "reflect" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/mock/mockstore" - "go.etcd.io/raft/v3/raftpb" -) - -func TestClusterMember(t *testing.T) { - membs := []*Member{ - newTestMember(1, nil, "node1", nil), - newTestMember(2, nil, "node2", nil), - } - tests := []struct { - id types.ID - match bool - }{ - {1, true}, - {2, true}, - {3, false}, - } - for i, tt := range tests { - c := newTestCluster(t, membs) - m := c.Member(tt.id) - if g := m != nil; g != tt.match { - t.Errorf("#%d: find member = %v, want %v", i, g, tt.match) - } - if m != nil && m.ID != tt.id { - t.Errorf("#%d: id = %x, want %x", i, m.ID, tt.id) - } - } -} - -func TestClusterMemberByName(t *testing.T) { - membs := []*Member{ - newTestMember(1, nil, "node1", nil), - newTestMember(2, nil, "node2", nil), - } - tests := []struct { - name string - match bool - }{ - {"node1", true}, - {"node2", true}, - {"node3", false}, - } - for i, tt := range tests { - c := newTestCluster(t, membs) - m := c.MemberByName(tt.name) - if g := m != nil; g != tt.match { - t.Errorf("#%d: find member = %v, want %v", i, g, tt.match) - } - if m != nil && m.Name != tt.name { - t.Errorf("#%d: name = %v, want %v", i, m.Name, tt.name) - } - } -} - -func TestClusterMemberIDs(t *testing.T) { - c := newTestCluster(t, []*Member{ - newTestMember(1, nil, "", nil), - newTestMember(4, nil, "", nil), - newTestMember(100, nil, "", nil), - }) - w := []types.ID{1, 4, 100} - g := c.MemberIDs() - if !reflect.DeepEqual(w, g) { - t.Errorf("IDs = %+v, want %+v", g, w) - } -} - -func TestClusterPeerURLs(t *testing.T) { - tests := []struct { - mems []*Member - wurls []string - }{ - // single peer with a single address - { - mems: []*Member{ - newTestMember(1, []string{"http://192.0.2.1"}, "", nil), - }, - wurls: []string{"http://192.0.2.1"}, - }, - - // single peer with a single address with a port - { - mems: []*Member{ - newTestMember(1, []string{"http://192.0.2.1:8001"}, "", nil), - }, - wurls: []string{"http://192.0.2.1:8001"}, - }, - - // several members explicitly unsorted - { - mems: []*Member{ - newTestMember(2, []string{"http://192.0.2.3", "http://192.0.2.4"}, "", nil), - newTestMember(3, []string{"http://192.0.2.5", "http://192.0.2.6"}, "", nil), - newTestMember(1, []string{"http://192.0.2.1", "http://192.0.2.2"}, "", nil), - }, - wurls: []string{"http://192.0.2.1", "http://192.0.2.2", "http://192.0.2.3", "http://192.0.2.4", "http://192.0.2.5", "http://192.0.2.6"}, - }, - - // no members - { - mems: []*Member{}, - wurls: []string{}, - }, - - // peer with no peer urls - { - mems: []*Member{ - newTestMember(3, []string{}, "", nil), - }, - wurls: []string{}, - }, - } - - for i, tt := range tests { - c := newTestCluster(t, tt.mems) - urls := c.PeerURLs() - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: PeerURLs = %v, want %v", i, urls, tt.wurls) - } - } -} - -func TestClusterClientURLs(t *testing.T) { - tests := []struct { - mems []*Member - wurls []string - }{ - // single peer with a single address - { - mems: []*Member{ - newTestMember(1, nil, "", []string{"http://192.0.2.1"}), - }, - wurls: []string{"http://192.0.2.1"}, - }, - - // single peer with a single address with a port - { - mems: []*Member{ - newTestMember(1, nil, "", []string{"http://192.0.2.1:8001"}), - }, - wurls: []string{"http://192.0.2.1:8001"}, - }, - - // several members explicitly unsorted - { - mems: []*Member{ - newTestMember(2, nil, "", []string{"http://192.0.2.3", "http://192.0.2.4"}), - newTestMember(3, nil, "", []string{"http://192.0.2.5", "http://192.0.2.6"}), - newTestMember(1, nil, "", []string{"http://192.0.2.1", "http://192.0.2.2"}), - }, - wurls: []string{"http://192.0.2.1", "http://192.0.2.2", "http://192.0.2.3", "http://192.0.2.4", "http://192.0.2.5", "http://192.0.2.6"}, - }, - - // no members - { - mems: []*Member{}, - wurls: []string{}, - }, - - // peer with no client urls - { - mems: []*Member{ - newTestMember(3, nil, "", []string{}), - }, - wurls: []string{}, - }, - } - - for i, tt := range tests { - c := newTestCluster(t, tt.mems) - urls := c.ClientURLs() - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: ClientURLs = %v, want %v", i, urls, tt.wurls) - } - } -} - -func TestClusterValidateAndAssignIDsBad(t *testing.T) { - tests := []struct { - clmembs []*Member - membs []*Member - }{ - { - // unmatched length - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:2379"}, "", nil), - }, - []*Member{}, - }, - { - // unmatched peer urls - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:2379"}, "", nil), - }, - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:4001"}, "", nil), - }, - }, - { - // unmatched peer urls - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:2379"}, "", nil), - newTestMember(2, []string{"http://127.0.0.2:2379"}, "", nil), - }, - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:2379"}, "", nil), - newTestMember(2, []string{"http://127.0.0.2:4001"}, "", nil), - }, - }, - } - for i, tt := range tests { - ecl := newTestCluster(t, tt.clmembs) - lcl := newTestCluster(t, tt.membs) - if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err == nil { - t.Errorf("#%d: unexpected update success", i) - } - } -} - -func TestClusterValidateAndAssignIDs(t *testing.T) { - tests := []struct { - clmembs []*Member - membs []*Member - wids []types.ID - }{ - { - []*Member{ - newTestMember(1, []string{"http://127.0.0.1:2379"}, "", nil), - newTestMember(2, []string{"http://127.0.0.2:2379"}, "", nil), - }, - []*Member{ - newTestMember(3, []string{"http://127.0.0.1:2379"}, "", nil), - newTestMember(4, []string{"http://127.0.0.2:2379"}, "", nil), - }, - []types.ID{3, 4}, - }, - } - for i, tt := range tests { - lcl := newTestCluster(t, tt.clmembs) - ecl := newTestCluster(t, tt.membs) - if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err != nil { - t.Errorf("#%d: unexpect update error: %v", i, err) - } - if !reflect.DeepEqual(lcl.MemberIDs(), tt.wids) { - t.Errorf("#%d: ids = %v, want %v", i, lcl.MemberIDs(), tt.wids) - } - } -} - -func TestClusterValidateConfigurationChange(t *testing.T) { - cl := NewCluster(zaptest.NewLogger(t), WithMaxLearners(1)) - cl.SetStore(v2store.New()) - for i := 1; i <= 4; i++ { - var isLearner bool - if i == 1 { - isLearner = true - } - attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", i)}, IsLearner: isLearner} - cl.AddMember(&Member{ID: types.ID(i), RaftAttributes: attr}, true) - } - cl.RemoveMember(4, true) - - attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}} - ctx, err := json.Marshal(&Member{ID: types.ID(5), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}} - ctx1, err := json.Marshal(&Member{ID: types.ID(1), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 5)}} - ctx5, err := json.Marshal(&Member{ID: types.ID(5), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 3)}} - ctx2to3, err := json.Marshal(&Member{ID: types.ID(2), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 5)}} - ctx2to5, err := json.Marshal(&Member{ID: types.ID(2), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - ctx3, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(3), RaftAttributes: attr}, IsPromote: true}) - if err != nil { - t.Fatal(err) - } - - ctx6, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(6), RaftAttributes: attr}, IsPromote: true}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 7)}, IsLearner: true} - ctx7, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(7), RaftAttributes: attr}}) - if err != nil { - t.Fatal(err) - } - - attr = RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}, IsLearner: true} - ctx8, err := json.Marshal(&ConfigChangeContext{Member: Member{ID: types.ID(1), RaftAttributes: attr}, IsPromote: true}) - if err != nil { - t.Fatal(err) - } - tests := []struct { - cc raftpb.ConfChange - werr error - }{ - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: 3, - }, - nil, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 4, - }, - ErrIDRemoved, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: 4, - }, - ErrIDRemoved, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 1, - Context: ctx1, - }, - ErrIDExists, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 5, - Context: ctx, - }, - ErrPeerURLexists, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: 5, - }, - ErrIDNotFound, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 5, - Context: ctx5, - }, - nil, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: 5, - Context: ctx, - }, - ErrIDNotFound, - }, - // try to change the peer url of 2 to the peer url of 3 - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: 2, - Context: ctx2to3, - }, - ErrPeerURLexists, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: 2, - Context: ctx2to5, - }, - nil, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 3, - Context: ctx3, - }, - ErrMemberNotLearner, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 6, - Context: ctx6, - }, - ErrIDNotFound, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddLearnerNode, - NodeID: 7, - Context: ctx7, - }, - ErrTooManyLearners, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 1, - Context: ctx8, - }, - nil, - }, - } - for i, tt := range tests { - err := cl.ValidateConfigurationChange(tt.cc) - if err != tt.werr { - t.Errorf("#%d: validateConfigurationChange error = %v, want %v", i, err, tt.werr) - } - } -} - -func TestClusterGenID(t *testing.T) { - cs := newTestCluster(t, []*Member{ - newTestMember(1, nil, "", nil), - newTestMember(2, nil, "", nil), - }) - - cs.genID() - if cs.ID() == 0 { - t.Fatalf("cluster.ID = %v, want not 0", cs.ID()) - } - previd := cs.ID() - - cs.SetStore(mockstore.NewNop()) - cs.AddMember(newTestMember(3, nil, "", nil), true) - cs.genID() - if cs.ID() == previd { - t.Fatalf("cluster.ID = %v, want not %v", cs.ID(), previd) - } -} - -func TestNodeToMemberBad(t *testing.T) { - tests := []*v2store.NodeExtern{ - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/strange"}, - }}, - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/raftAttributes", Value: stringp("garbage")}, - }}, - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)}, - }}, - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, - {Key: "/1234/strange"}, - }}, - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, - {Key: "/1234/attributes", Value: stringp("garbage")}, - }}, - {Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, - {Key: "/1234/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)}, - {Key: "/1234/strange"}, - }}, - } - for i, tt := range tests { - if _, err := nodeToMember(zaptest.NewLogger(t), tt); err == nil { - t.Errorf("#%d: unexpected nil error", i) - } - } -} - -func TestClusterAddMember(t *testing.T) { - st := mockstore.NewRecorder() - c := newTestCluster(t, nil) - c.SetStore(st) - c.AddMember(newTestMember(1, nil, "node1", nil), true) - - wactions := []testutil.Action{ - { - Name: "Create", - Params: []interface{}{ - path.Join(StoreMembersPrefix, "1", "raftAttributes"), - false, - `{"peerURLs":null}`, - false, - v2store.TTLOptionSet{ExpireTime: v2store.Permanent}, - }, - }, - } - if g := st.Action(); !reflect.DeepEqual(g, wactions) { - t.Errorf("actions = %v, want %v", g, wactions) - } -} - -func TestClusterAddMemberAsLearner(t *testing.T) { - st := mockstore.NewRecorder() - c := newTestCluster(t, nil) - c.SetStore(st) - c.AddMember(newTestMemberAsLearner(1, nil, "node1", nil), true) - - wactions := []testutil.Action{ - { - Name: "Create", - Params: []interface{}{ - path.Join(StoreMembersPrefix, "1", "raftAttributes"), - false, - `{"peerURLs":null,"isLearner":true}`, - false, - v2store.TTLOptionSet{ExpireTime: v2store.Permanent}, - }, - }, - } - if g := st.Action(); !reflect.DeepEqual(g, wactions) { - t.Errorf("actions = %v, want %v", g, wactions) - } -} - -func TestClusterMembers(t *testing.T) { - cls := newTestCluster(t, []*Member{ - {ID: 1}, - {ID: 20}, - {ID: 100}, - {ID: 5}, - {ID: 50}, - }) - w := []*Member{ - {ID: 1}, - {ID: 5}, - {ID: 20}, - {ID: 50}, - {ID: 100}, - } - if g := cls.Members(); !reflect.DeepEqual(g, w) { - t.Fatalf("Members()=%#v, want %#v", g, w) - } -} - -func TestClusterRemoveMember(t *testing.T) { - st := mockstore.NewRecorder() - c := newTestCluster(t, nil) - c.SetStore(st) - c.RemoveMember(1, true) - - wactions := []testutil.Action{ - {Name: "Delete", Params: []interface{}{MemberStoreKey(1), true, true}}, - {Name: "Create", Params: []interface{}{RemovedMemberStoreKey(1), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}}}, - } - if !reflect.DeepEqual(st.Action(), wactions) { - t.Errorf("actions = %v, want %v", st.Action(), wactions) - } -} - -func TestClusterUpdateAttributes(t *testing.T) { - name := "etcd" - clientURLs := []string{"http://127.0.0.1:4001"} - tests := []struct { - mems []*Member - removed map[types.ID]bool - wmems []*Member - }{ - // update attributes of existing member - { - []*Member{ - newTestMember(1, nil, "", nil), - }, - nil, - []*Member{ - newTestMember(1, nil, name, clientURLs), - }, - }, - // update attributes of removed member - { - nil, - map[types.ID]bool{types.ID(1): true}, - nil, - }, - } - for i, tt := range tests { - c := newTestCluster(t, tt.mems) - c.removed = tt.removed - - c.UpdateAttributes(types.ID(1), Attributes{Name: name, ClientURLs: clientURLs}, true) - if g := c.Members(); !reflect.DeepEqual(g, tt.wmems) { - t.Errorf("#%d: members = %+v, want %+v", i, g, tt.wmems) - } - } -} - -func TestNodeToMember(t *testing.T) { - n := &v2store.NodeExtern{Key: "/1234", Nodes: []*v2store.NodeExtern{ - {Key: "/1234/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)}, - {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)}, - }} - wm := &Member{ID: 0x1234, RaftAttributes: RaftAttributes{}, Attributes: Attributes{Name: "node1"}} - m, err := nodeToMember(zaptest.NewLogger(t), n) - if err != nil { - t.Fatalf("unexpected nodeToMember error: %v", err) - } - if !reflect.DeepEqual(m, wm) { - t.Errorf("member = %+v, want %+v", m, wm) - } -} - -func newTestCluster(t testing.TB, membs []*Member) *RaftCluster { - c := &RaftCluster{lg: zaptest.NewLogger(t), members: make(map[types.ID]*Member), removed: make(map[types.ID]bool)} - for _, m := range membs { - c.members[m.ID] = m - } - return c -} - -func stringp(s string) *string { return &s } - -func TestIsReadyToAddVotingMember(t *testing.T) { - tests := []struct { - members []*Member - want bool - }{ - { - // 0/3 members ready, should fail - []*Member{ - newTestMember(1, nil, "", nil), - newTestMember(2, nil, "", nil), - newTestMember(3, nil, "", nil), - }, - false, - }, - { - // 1/2 members ready, should fail - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - }, - false, - }, - { - // 1/3 members ready, should fail - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMember(3, nil, "", nil), - }, - false, - }, - { - // 1/1 members ready, should succeed (special case of 1-member cluster for recovery) - []*Member{ - newTestMember(1, nil, "1", nil), - }, - true, - }, - { - // 2/3 members ready, should fail - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "", nil), - }, - false, - }, - { - // 3/3 members ready, should be fine to add one member and retain quorum - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "3", nil), - }, - true, - }, - { - // 3/4 members ready, should be fine to add one member and retain quorum - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "3", nil), - newTestMember(4, nil, "", nil), - }, - true, - }, - { - // empty cluster, it is impossible but should fail - []*Member{}, - false, - }, - { - // 2 voting members ready in cluster with 2 voting members and 2 unstarted learner member, should succeed - // (the status of learner members does not affect the readiness of adding voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMemberAsLearner(3, nil, "", nil), - newTestMemberAsLearner(4, nil, "", nil), - }, - true, - }, - { - // 1 voting member ready in cluster with 2 voting members and 2 ready learner member, should fail - // (the status of learner members does not affect the readiness of adding voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMemberAsLearner(3, nil, "3", nil), - newTestMemberAsLearner(4, nil, "4", nil), - }, - false, - }, - } - for i, tt := range tests { - c := newTestCluster(t, tt.members) - if got := c.IsReadyToAddVotingMember(); got != tt.want { - t.Errorf("%d: isReadyToAddNewMember returned %t, want %t", i, got, tt.want) - } - } -} - -func TestIsReadyToRemoveVotingMember(t *testing.T) { - tests := []struct { - members []*Member - removeID uint64 - want bool - }{ - { - // 1/1 members ready, should fail - []*Member{ - newTestMember(1, nil, "1", nil), - }, - 1, - false, - }, - { - // 0/3 members ready, should fail - []*Member{ - newTestMember(1, nil, "", nil), - newTestMember(2, nil, "", nil), - newTestMember(3, nil, "", nil), - }, - 1, - false, - }, - { - // 1/2 members ready, should be fine to remove unstarted member - // (isReadyToRemoveMember() logic should return success, but operation itself would fail) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - }, - 2, - true, - }, - { - // 2/3 members ready, should fail - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "", nil), - }, - 2, - false, - }, - { - // 3/3 members ready, should be fine to remove one member and retain quorum - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "3", nil), - }, - 3, - true, - }, - { - // 3/4 members ready, should be fine to remove one member - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "3", nil), - newTestMember(4, nil, "", nil), - }, - 3, - true, - }, - { - // 3/4 members ready, should be fine to remove unstarted member - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "3", nil), - newTestMember(4, nil, "", nil), - }, - 4, - true, - }, - { - // 1 voting members ready in cluster with 1 voting member and 1 ready learner, - // removing voting member should fail - // (the status of learner members does not affect the readiness of removing voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMemberAsLearner(2, nil, "2", nil), - }, - 1, - false, - }, - { - // 1 voting members ready in cluster with 2 voting member and 1 ready learner, - // removing ready voting member should fail - // (the status of learner members does not affect the readiness of removing voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMemberAsLearner(3, nil, "3", nil), - }, - 1, - false, - }, - { - // 1 voting members ready in cluster with 2 voting member and 1 ready learner, - // removing unstarted voting member should be fine. (Actual operation will fail) - // (the status of learner members does not affect the readiness of removing voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMemberAsLearner(3, nil, "3", nil), - }, - 2, - true, - }, - { - // 1 voting members ready in cluster with 2 voting member and 1 unstarted learner, - // removing not-ready voting member should be fine. (Actual operation will fail) - // (the status of learner members does not affect the readiness of removing voting member) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMemberAsLearner(3, nil, "", nil), - }, - 2, - true, - }, - } - for i, tt := range tests { - c := newTestCluster(t, tt.members) - if got := c.IsReadyToRemoveVotingMember(tt.removeID); got != tt.want { - t.Errorf("%d: isReadyToAddNewMember returned %t, want %t", i, got, tt.want) - } - } -} - -func TestIsReadyToPromoteMember(t *testing.T) { - tests := []struct { - members []*Member - promoteID uint64 - want bool - }{ - { - // 1/1 members ready, should succeed (quorum = 1, new quorum = 2) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMemberAsLearner(2, nil, "2", nil), - }, - 2, - true, - }, - { - // 0/1 members ready, should fail (quorum = 1) - []*Member{ - newTestMember(1, nil, "", nil), - newTestMemberAsLearner(2, nil, "2", nil), - }, - 2, - false, - }, - { - // 2/2 members ready, should succeed (quorum = 2) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMemberAsLearner(3, nil, "3", nil), - }, - 3, - true, - }, - { - // 1/2 members ready, should succeed (quorum = 2) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMemberAsLearner(3, nil, "3", nil), - }, - 3, - true, - }, - { - // 1/3 members ready, should fail (quorum = 2) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "", nil), - newTestMember(3, nil, "", nil), - newTestMemberAsLearner(4, nil, "4", nil), - }, - 4, - false, - }, - { - // 2/3 members ready, should succeed (quorum = 2, new quorum = 3) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "", nil), - newTestMemberAsLearner(4, nil, "4", nil), - }, - 4, - true, - }, - { - // 2/4 members ready, should succeed (quorum = 3) - []*Member{ - newTestMember(1, nil, "1", nil), - newTestMember(2, nil, "2", nil), - newTestMember(3, nil, "", nil), - newTestMember(4, nil, "", nil), - newTestMemberAsLearner(5, nil, "5", nil), - }, - 5, - true, - }, - } - for i, tt := range tests { - c := newTestCluster(t, tt.members) - if got := c.IsReadyToPromoteMember(tt.promoteID); got != tt.want { - t.Errorf("%d: isReadyToPromoteMember returned %t, want %t", i, got, tt.want) - } - } -} diff --git a/server/etcdserver/api/membership/doc.go b/server/etcdserver/api/membership/doc.go deleted file mode 100644 index b07fb2d9285..00000000000 --- a/server/etcdserver/api/membership/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package membership describes individual etcd members and clusters of members. -package membership diff --git a/server/etcdserver/api/membership/errors.go b/server/etcdserver/api/membership/errors.go deleted file mode 100644 index e944d48c693..00000000000 --- a/server/etcdserver/api/membership/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "errors" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" -) - -var ( - ErrIDRemoved = errors.New("membership: ID removed") - ErrIDExists = errors.New("membership: ID exists") - ErrIDNotFound = errors.New("membership: ID not found") - ErrPeerURLexists = errors.New("membership: peerURL exists") - ErrMemberNotLearner = errors.New("membership: can only promote a learner member") - ErrTooManyLearners = errors.New("membership: too many learner members in cluster") -) - -func isKeyNotFound(err error) bool { - e, ok := err.(*v2error.Error) - return ok && e.ErrorCode == v2error.EcodeKeyNotFound -} diff --git a/server/etcdserver/api/membership/member.go b/server/etcdserver/api/membership/member.go deleted file mode 100644 index 97cc26c589d..00000000000 --- a/server/etcdserver/api/membership/member.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "crypto/sha1" - "encoding/binary" - "fmt" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -// RaftAttributes represents the raft related attributes of an etcd member. -type RaftAttributes struct { - // PeerURLs is the list of peers in the raft cluster. - // TODO(philips): ensure these are URLs - PeerURLs []string `json:"peerURLs"` - // IsLearner indicates if the member is raft learner. - IsLearner bool `json:"isLearner,omitempty"` -} - -// Attributes represents all the non-raft related attributes of an etcd member. -type Attributes struct { - Name string `json:"name,omitempty"` - ClientURLs []string `json:"clientURLs,omitempty"` -} - -type Member struct { - ID types.ID `json:"id"` - RaftAttributes - Attributes -} - -// NewMember creates a Member without an ID and generates one based on the -// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member. -func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { - memberId := computeMemberId(peerURLs, clusterName, now) - return newMember(name, peerURLs, memberId, false) -} - -// NewMemberAsLearner creates a learner Member without an ID and generates one based on the -// cluster name, peer URLs, and time. This is used for adding new learner member. -func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { - memberId := computeMemberId(peerURLs, clusterName, now) - return newMember(name, peerURLs, memberId, true) -} - -func computeMemberId(peerURLs types.URLs, clusterName string, now *time.Time) types.ID { - peerURLstrs := peerURLs.StringSlice() - sort.Strings(peerURLstrs) - joinedPeerUrls := strings.Join(peerURLstrs, "") - b := []byte(joinedPeerUrls) - - b = append(b, []byte(clusterName)...) - if now != nil { - b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...) - } - - hash := sha1.Sum(b) - return types.ID(binary.BigEndian.Uint64(hash[:8])) -} - -func newMember(name string, peerURLs types.URLs, memberId types.ID, isLearner bool) *Member { - m := &Member{ - RaftAttributes: RaftAttributes{ - PeerURLs: peerURLs.StringSlice(), - IsLearner: isLearner, - }, - Attributes: Attributes{Name: name}, - ID: memberId, - } - return m -} - -func (m *Member) Clone() *Member { - if m == nil { - return nil - } - mm := &Member{ - ID: m.ID, - RaftAttributes: RaftAttributes{ - IsLearner: m.IsLearner, - }, - Attributes: Attributes{ - Name: m.Name, - }, - } - if m.PeerURLs != nil { - mm.PeerURLs = make([]string, len(m.PeerURLs)) - copy(mm.PeerURLs, m.PeerURLs) - } - if m.ClientURLs != nil { - mm.ClientURLs = make([]string, len(m.ClientURLs)) - copy(mm.ClientURLs, m.ClientURLs) - } - return mm -} - -func (m *Member) IsStarted() bool { - return len(m.Name) != 0 -} - -// MembersByID implements sort by ID interface -type MembersByID []*Member - -func (ms MembersByID) Len() int { return len(ms) } -func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID } -func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } - -// MembersByPeerURLs implements sort by peer urls interface -type MembersByPeerURLs []*Member - -func (ms MembersByPeerURLs) Len() int { return len(ms) } -func (ms MembersByPeerURLs) Less(i, j int) bool { - return ms[i].PeerURLs[0] < ms[j].PeerURLs[0] -} -func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } diff --git a/server/etcdserver/api/membership/member_test.go b/server/etcdserver/api/membership/member_test.go deleted file mode 100644 index 6541ec0f33b..00000000000 --- a/server/etcdserver/api/membership/member_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "net/url" - "reflect" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -func timeParse(value string) *time.Time { - t, err := time.Parse(time.RFC3339, value) - if err != nil { - panic(err) - } - return &t -} - -func TestMemberTime(t *testing.T) { - tests := []struct { - mem *Member - id types.ID - }{ - {NewMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, "", nil), 14544069596553697298}, - // Same ID, different name (names shouldn't matter) - {NewMember("memfoo", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, "", nil), 14544069596553697298}, - // Same ID, different Time - {NewMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, "", timeParse("1984-12-23T15:04:05Z")), 2448790162483548276}, - // Different cluster name - {NewMember("mcm1", []url.URL{{Scheme: "http", Host: "10.0.0.8:2379"}}, "etcd", timeParse("1984-12-23T15:04:05Z")), 6973882743191604649}, - {NewMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}}, "", timeParse("1984-12-23T15:04:05Z")), 1466075294948436910}, - // Order shouldn't matter - {NewMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "10.0.0.2:2379"}}, "", nil), 16552244735972308939}, - {NewMember("mem1", []url.URL{{Scheme: "http", Host: "10.0.0.2:2379"}, {Scheme: "http", Host: "10.0.0.1:2379"}}, "", nil), 16552244735972308939}, - } - for i, tt := range tests { - if tt.mem.ID != tt.id { - t.Errorf("#%d: mem.ID = %v, want %v", i, tt.mem.ID, tt.id) - } - } -} - -func TestMemberClone(t *testing.T) { - tests := []*Member{ - newTestMember(1, nil, "abc", nil), - newTestMember(1, []string{"http://a"}, "abc", nil), - newTestMember(1, nil, "abc", []string{"http://b"}), - newTestMember(1, []string{"http://a"}, "abc", []string{"http://b"}), - } - for i, tt := range tests { - nm := tt.Clone() - if nm == tt { - t.Errorf("#%d: the pointers are the same, and clone doesn't happen", i) - } - if !reflect.DeepEqual(nm, tt) { - t.Errorf("#%d: member = %+v, want %+v", i, nm, tt) - } - } -} - -func newTestMember(id uint64, peerURLs []string, name string, clientURLs []string) *Member { - return &Member{ - ID: types.ID(id), - RaftAttributes: RaftAttributes{PeerURLs: peerURLs}, - Attributes: Attributes{Name: name, ClientURLs: clientURLs}, - } -} - -func newTestMemberAsLearner(id uint64, peerURLs []string, name string, clientURLs []string) *Member { - return &Member{ - ID: types.ID(id), - RaftAttributes: RaftAttributes{PeerURLs: peerURLs, IsLearner: true}, - Attributes: Attributes{Name: name, ClientURLs: clientURLs}, - } -} diff --git a/server/etcdserver/api/membership/membership_test.go b/server/etcdserver/api/membership/membership_test.go deleted file mode 100644 index 728121e1c69..00000000000 --- a/server/etcdserver/api/membership/membership_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/version" - - "go.uber.org/zap" -) - -func TestAddRemoveMember(t *testing.T) { - c := newTestCluster(t, nil) - be := &backendMock{} - c.SetBackend(be) - c.AddMember(newTestMemberAsLearner(17, nil, "node17", nil), true) - c.RemoveMember(17, true) - c.AddMember(newTestMember(18, nil, "node18", nil), true) - c.RemoveMember(18, true) - - // Skipping removal of already removed member - c.RemoveMember(17, true) - c.RemoveMember(18, true) - - if false { - // TODO: Enable this code when Recover is reading membership from the backend. - c2 := newTestCluster(t, nil) - c2.SetBackend(be) - c2.Recover(func(*zap.Logger, *semver.Version) {}) - assert.Equal(t, []*Member{{ID: types.ID(18), - Attributes: Attributes{Name: "node18"}}}, c2.Members()) - assert.Equal(t, true, c2.IsIDRemoved(17)) - assert.Equal(t, false, c2.IsIDRemoved(18)) - } -} - -type backendMock struct { -} - -var _ MembershipBackend = (*backendMock)(nil) - -func (b *backendMock) MustCreateBackendBuckets() {} - -func (b *backendMock) ClusterVersionFromBackend() *semver.Version { return nil } -func (b *backendMock) MustSaveClusterVersionToBackend(version *semver.Version) {} - -func (b *backendMock) MustReadMembersFromBackend() (x map[types.ID]*Member, y map[types.ID]bool) { - return -} -func (b *backendMock) MustSaveMemberToBackend(*Member) {} -func (b *backendMock) TrimMembershipFromBackend() error { return nil } -func (b *backendMock) MustDeleteMemberFromBackend(types.ID) {} - -func (b *backendMock) MustSaveDowngradeToBackend(*version.DowngradeInfo) {} -func (b *backendMock) DowngradeInfoFromBackend() *version.DowngradeInfo { return nil } diff --git a/server/etcdserver/api/membership/metrics.go b/server/etcdserver/api/membership/metrics.go deleted file mode 100644 index f08763779f0..00000000000 --- a/server/etcdserver/api/membership/metrics.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import "github.com/prometheus/client_golang/prometheus" - -var ( - ClusterVersionMetrics = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "cluster", - Name: "version", - Help: "Which version is running. 1 for 'cluster_version' label with current cluster version", - }, - []string{"cluster_version"}) - knownPeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "known_peers", - Help: "The current number of known peers.", - }, - []string{"Local", "Remote"}, - ) -) - -func init() { - prometheus.MustRegister(ClusterVersionMetrics) - prometheus.MustRegister(knownPeers) -} diff --git a/server/etcdserver/api/membership/store.go b/server/etcdserver/api/membership/store.go deleted file mode 100644 index bee385b0603..00000000000 --- a/server/etcdserver/api/membership/store.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "path" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/version" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -type MembershipBackend interface { - ClusterVersionBackend - MemberBackend - DowngradeInfoBackend - MustCreateBackendBuckets() -} - -type ClusterVersionBackend interface { - ClusterVersionFromBackend() *semver.Version - MustSaveClusterVersionToBackend(version *semver.Version) -} - -type MemberBackend interface { - MustReadMembersFromBackend() (map[types.ID]*Member, map[types.ID]bool) - MustSaveMemberToBackend(*Member) - TrimMembershipFromBackend() error - MustDeleteMemberFromBackend(types.ID) -} - -type DowngradeInfoBackend interface { - MustSaveDowngradeToBackend(*version.DowngradeInfo) - DowngradeInfoFromBackend() *version.DowngradeInfo -} - -func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { - id, err := types.IDFromString(path.Base(key)) - if err != nil { - lg.Panic("failed to parse member id from key", zap.Error(err)) - } - return id -} diff --git a/server/etcdserver/api/membership/storev2.go b/server/etcdserver/api/membership/storev2.go deleted file mode 100644 index d428cb66e22..00000000000 --- a/server/etcdserver/api/membership/storev2.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "encoding/json" - "fmt" - "path" - - "go.etcd.io/etcd/client/pkg/v3/types" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -const ( - // the prefix for storing membership related information in store provided by store pkg. - storePrefix = "/0" - - attributesSuffix = "attributes" - raftAttributesSuffix = "raftAttributes" -) - -var ( - StoreMembersPrefix = path.Join(storePrefix, "members") - storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") -) - -// IsMetaStoreOnly verifies if the given `store` contains only -// a meta-information (members, version) that can be recovered from the -// backend (storev3) as well as opposed to user-data. -func IsMetaStoreOnly(store v2store.Store) (bool, error) { - event, err := store.Get("/", true, false) - if err != nil { - return false, err - } - for _, n := range event.Node.Nodes { - if n.Key != storePrefix && n.Nodes.Len() > 0 { - return false, nil - } - } - - return true, nil -} - -// TrimMembershipFromV2Store removes all information about members & -// removed_members from the v2 store. -func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error { - members, removed := membersFromStore(lg, s) - - for mID := range members { - _, err := s.Delete(MemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - for mID := range removed { - _, err := s.Delete(RemovedMemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - - return nil -} - -func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to save member to store", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustDeleteMemberFromStore(lg *zap.Logger, s v2store.Store, id types.ID) { - if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { - lg.Panic( - "failed to delete member from store", - zap.String("path", MemberStoreKey(id)), - zap.Error(err), - ) - } - if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to create removedMember", - zap.String("path", RemovedMemberStoreKey(id)), - zap.Error(err), - ) - } -} - -func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update raftAttributes", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.Attributes) - if err != nil { - lg.Panic("failed to marshal attributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), attributesSuffix) - if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update attributes", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { - if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to save cluster version to store", - zap.String("path", StoreClusterVersionKey()), - zap.Error(err), - ) - } -} - -// nodeToMember builds member from a key value node. -// the child nodes of the given node MUST be sorted by key. -func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { - m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} - attrs := make(map[string][]byte) - raftAttrKey := path.Join(n.Key, raftAttributesSuffix) - attrKey := path.Join(n.Key, attributesSuffix) - for _, nn := range n.Nodes { - if nn.Key != raftAttrKey && nn.Key != attrKey { - return nil, fmt.Errorf("unknown key %q", nn.Key) - } - attrs[nn.Key] = []byte(*nn.Value) - } - if data := attrs[raftAttrKey]; data != nil { - if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { - return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) - } - } else { - return nil, fmt.Errorf("raftAttributes key doesn't exist") - } - if data := attrs[attrKey]; data != nil { - if err := json.Unmarshal(data, &m.Attributes); err != nil { - return m, fmt.Errorf("unmarshal attributes error: %v", err) - } - } - return m, nil -} - -func StoreClusterVersionKey() string { - return path.Join(storePrefix, "version") -} - -func RemovedMemberStoreKey(id types.ID) string { - return path.Join(storeRemovedMembersPrefix, id.String()) -} - -func MemberStoreKey(id types.ID) string { - return path.Join(StoreMembersPrefix, id.String()) -} - -func MemberAttributesStorePath(id types.ID) string { - return path.Join(MemberStoreKey(id), attributesSuffix) -} - -func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { - e, err := st.Get(path.Join(storePrefix, "version"), false, false) - if err != nil { - if isKeyNotFound(err) { - return nil - } - lg.Panic( - "failed to get cluster version from store", - zap.String("path", path.Join(storePrefix, "version")), - zap.Error(err), - ) - } - return semver.Must(semver.NewVersion(*e.Node.Value)) -} diff --git a/server/etcdserver/api/membership/storev2_test.go b/server/etcdserver/api/membership/storev2_test.go deleted file mode 100644 index 4f849f4162d..00000000000 --- a/server/etcdserver/api/membership/storev2_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -func TestIsMetaStoreOnly(t *testing.T) { - lg := zaptest.NewLogger(t) - s := v2store.New("/0", "/1") - - metaOnly, err := IsMetaStoreOnly(s) - assert.NoError(t, err) - assert.True(t, metaOnly, "Just created v2store should be meta-only") - - mustSaveClusterVersionToStore(lg, s, semver.New("3.5.17")) - metaOnly, err = IsMetaStoreOnly(s) - assert.NoError(t, err) - assert.True(t, metaOnly, "Just created v2store should be meta-only") - - mustSaveMemberToStore(lg, s, &Member{ID: 0x00abcd}) - metaOnly, err = IsMetaStoreOnly(s) - assert.NoError(t, err) - assert.True(t, metaOnly, "Just created v2store should be meta-only") - - _, err = s.Create("/1/foo", false, "v1", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - assert.NoError(t, err) - metaOnly, err = IsMetaStoreOnly(s) - assert.NoError(t, err) - assert.False(t, metaOnly, "Just created v2store should be meta-only") - - _, err = s.Delete("/1/foo", false, false) - assert.NoError(t, err) - assert.NoError(t, err) - assert.False(t, metaOnly, "Just created v2store should be meta-only") -} diff --git a/server/etcdserver/api/rafthttp/coder.go b/server/etcdserver/api/rafthttp/coder.go deleted file mode 100644 index 97744299896..00000000000 --- a/server/etcdserver/api/rafthttp/coder.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import "go.etcd.io/raft/v3/raftpb" - -type encoder interface { - // encode encodes the given message to an output stream. - encode(m *raftpb.Message) error -} - -type decoder interface { - // decode decodes the message from an input stream. - decode() (raftpb.Message, error) -} diff --git a/server/etcdserver/api/rafthttp/doc.go b/server/etcdserver/api/rafthttp/doc.go deleted file mode 100644 index c45dc817825..00000000000 --- a/server/etcdserver/api/rafthttp/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rafthttp implements HTTP transportation layer for raft pkg. -package rafthttp diff --git a/server/etcdserver/api/rafthttp/fake_roundtripper_test.go b/server/etcdserver/api/rafthttp/fake_roundtripper_test.go deleted file mode 100644 index c29173d3efc..00000000000 --- a/server/etcdserver/api/rafthttp/fake_roundtripper_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "errors" - "net/http" -) - -func (t *roundTripperBlocker) RoundTrip(req *http.Request) (*http.Response, error) { - c := make(chan struct{}, 1) - t.mu.Lock() - t.cancel[req] = c - t.mu.Unlock() - ctx := req.Context() - select { - case <-t.unblockc: - return &http.Response{StatusCode: http.StatusNoContent, Body: &nopReadCloser{}}, nil - case <-ctx.Done(): - return nil, errors.New("request canceled") - case <-c: - return nil, errors.New("request canceled") - } -} diff --git a/server/etcdserver/api/rafthttp/functional_test.go b/server/etcdserver/api/rafthttp/functional_test.go deleted file mode 100644 index cf507d2773e..00000000000 --- a/server/etcdserver/api/rafthttp/functional_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "net/http/httptest" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -func TestSendMessage(t *testing.T) { - // member 1 - tr := &Transport{ - ID: types.ID(1), - ClusterID: types.ID(1), - Raft: &fakeRaft{}, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "1"), - } - tr.Start() - srv := httptest.NewServer(tr.Handler()) - defer srv.Close() - - // member 2 - recvc := make(chan raftpb.Message, 1) - p := &fakeRaft{recvc: recvc} - tr2 := &Transport{ - ID: types.ID(2), - ClusterID: types.ID(1), - Raft: p, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "2"), - } - tr2.Start() - srv2 := httptest.NewServer(tr2.Handler()) - defer srv2.Close() - - tr.AddPeer(types.ID(2), []string{srv2.URL}) - defer tr.Stop() - tr2.AddPeer(types.ID(1), []string{srv.URL}) - defer tr2.Stop() - if !waitStreamWorking(tr.Get(types.ID(2)).(*peer)) { - t.Fatalf("stream from 1 to 2 is not in work as expected") - } - - data := []byte("some data") - tests := []raftpb.Message{ - // these messages are set to send to itself, which facilitates testing. - {Type: raftpb.MsgProp, From: 1, To: 2, Entries: []raftpb.Entry{{Data: data}}}, - {Type: raftpb.MsgApp, From: 1, To: 2, Term: 1, Index: 3, LogTerm: 0, Entries: []raftpb.Entry{{Index: 4, Term: 1, Data: data}}, Commit: 3}, - {Type: raftpb.MsgAppResp, From: 1, To: 2, Term: 1, Index: 3}, - {Type: raftpb.MsgVote, From: 1, To: 2, Term: 1, Index: 3, LogTerm: 0}, - {Type: raftpb.MsgVoteResp, From: 1, To: 2, Term: 1}, - {Type: raftpb.MsgSnap, From: 1, To: 2, Term: 1, Snapshot: &raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1000, Term: 1}, Data: data}}, - {Type: raftpb.MsgHeartbeat, From: 1, To: 2, Term: 1, Commit: 3}, - {Type: raftpb.MsgHeartbeatResp, From: 1, To: 2, Term: 1}, - } - for i, tt := range tests { - tr.Send([]raftpb.Message{tt}) - msg := <-recvc - if !reflect.DeepEqual(msg, tt) { - t.Errorf("#%d: msg = %+v, want %+v", i, msg, tt) - } - } -} - -// TestSendMessageWhenStreamIsBroken tests that message can be sent to the -// remote in a limited time when all underlying connections are broken. -func TestSendMessageWhenStreamIsBroken(t *testing.T) { - // member 1 - tr := &Transport{ - ID: types.ID(1), - ClusterID: types.ID(1), - Raft: &fakeRaft{}, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "1"), - } - tr.Start() - srv := httptest.NewServer(tr.Handler()) - defer srv.Close() - - // member 2 - recvc := make(chan raftpb.Message, 1) - p := &fakeRaft{recvc: recvc} - tr2 := &Transport{ - ID: types.ID(2), - ClusterID: types.ID(1), - Raft: p, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "2"), - } - tr2.Start() - srv2 := httptest.NewServer(tr2.Handler()) - defer srv2.Close() - - tr.AddPeer(types.ID(2), []string{srv2.URL}) - defer tr.Stop() - tr2.AddPeer(types.ID(1), []string{srv.URL}) - defer tr2.Stop() - if !waitStreamWorking(tr.Get(types.ID(2)).(*peer)) { - t.Fatalf("stream from 1 to 2 is not in work as expected") - } - - // break the stream - srv.CloseClientConnections() - srv2.CloseClientConnections() - var n int - for { - select { - // TODO: remove this resend logic when we add retry logic into the code - case <-time.After(time.Millisecond): - n++ - tr.Send([]raftpb.Message{{Type: raftpb.MsgHeartbeat, From: 1, To: 2, Term: 1, Commit: 3}}) - case <-recvc: - if n > 50 { - t.Errorf("disconnection time = %dms, want < 50ms", n) - } - return - } - } -} - -func newServerStats() *stats.ServerStats { - return stats.NewServerStats("", "") -} - -func waitStreamWorking(p *peer) bool { - for i := 0; i < 1000; i++ { - time.Sleep(time.Millisecond) - if _, ok := p.msgAppV2Writer.writec(); !ok { - continue - } - if _, ok := p.writer.writec(); !ok { - continue - } - return true - } - return false -} - -type fakeRaft struct { - recvc chan<- raftpb.Message - err error - removedID uint64 -} - -func (p *fakeRaft) Process(ctx context.Context, m raftpb.Message) error { - select { - case p.recvc <- m: - default: - } - return p.err -} - -func (p *fakeRaft) IsIDRemoved(id uint64) bool { return id == p.removedID } - -func (p *fakeRaft) ReportUnreachable(id uint64) {} - -func (p *fakeRaft) ReportSnapshot(id uint64, status raft.SnapshotStatus) {} diff --git a/server/etcdserver/api/rafthttp/http.go b/server/etcdserver/api/rafthttp/http.go deleted file mode 100644 index 6e6686b4c87..00000000000 --- a/server/etcdserver/api/rafthttp/http.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "path" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" - pioutil "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3/raftpb" - - humanize "github.com/dustin/go-humanize" - "go.uber.org/zap" -) - -const ( - // connReadLimitByte limits the number of bytes - // a single read can read out. - // - // 64KB should be large enough for not causing - // throughput bottleneck as well as small enough - // for not causing a read timeout. - connReadLimitByte = 64 * 1024 - - // snapshotLimitByte limits the snapshot size to 1TB - snapshotLimitByte = 1 * 1024 * 1024 * 1024 * 1024 -) - -var ( - RaftPrefix = "/raft" - ProbingPrefix = path.Join(RaftPrefix, "probing") - RaftStreamPrefix = path.Join(RaftPrefix, "stream") - RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot") - - errIncompatibleVersion = errors.New("incompatible version") - errClusterIDMismatch = errors.New("cluster ID mismatch") -) - -type peerGetter interface { - Get(id types.ID) Peer -} - -type writerToResponse interface { - WriteTo(w http.ResponseWriter) -} - -type pipelineHandler struct { - lg *zap.Logger - localID types.ID - tr Transporter - r Raft - cid types.ID -} - -// newPipelineHandler returns a handler for handling raft messages -// from pipeline for RaftPrefix. -// -// The handler reads out the raft message from request body, -// and forwards it to the given raft state machine for processing. -func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler { - h := &pipelineHandler{ - lg: t.Logger, - localID: t.ID, - tr: t, - r: r, - cid: cid, - } - if h.lg == nil { - h.lg = zap.NewNop() - } - return h -} - -func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - w.Header().Set("Allow", "POST") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - - addRemoteFromRequest(h.tr, r) - - // Limit the data size that could be read from the request body, which ensures that read from - // connection will not time out accidentally due to possible blocking in underlying implementation. - limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) - b, err := io.ReadAll(limitedr) - if err != nil { - h.lg.Warn( - "failed to read Raft message", - zap.String("local-member-id", h.localID.String()), - zap.Error(err), - ) - http.Error(w, "error reading raft message", http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - return - } - - var m raftpb.Message - if err := m.Unmarshal(b); err != nil { - h.lg.Warn( - "failed to unmarshal Raft message", - zap.String("local-member-id", h.localID.String()), - zap.Error(err), - ) - http.Error(w, "error unmarshalling raft message", http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - return - } - - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b))) - - if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { - case writerToResponse: - v.WriteTo(w) - default: - h.lg.Warn( - "failed to process Raft message", - zap.String("local-member-id", h.localID.String()), - zap.Error(err), - ) - http.Error(w, "error processing raft message", http.StatusInternalServerError) - w.(http.Flusher).Flush() - // disconnect the http stream - panic(err) - } - return - } - - // Write StatusNoContent header after the message has been processed by - // raft, which facilitates the client to report MsgSnap status. - w.WriteHeader(http.StatusNoContent) -} - -type snapshotHandler struct { - lg *zap.Logger - tr Transporter - r Raft - snapshotter *snap.Snapshotter - - localID types.ID - cid types.ID -} - -func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { - h := &snapshotHandler{ - lg: t.Logger, - tr: t, - r: r, - snapshotter: snapshotter, - localID: t.ID, - cid: cid, - } - if h.lg == nil { - h.lg = zap.NewNop() - } - return h -} - -const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER" - -// ServeHTTP serves HTTP request to receive and process snapshot message. -// -// If request sender dies without closing underlying TCP connection, -// the handler will keep waiting for the request body until TCP keepalive -// finds out that the connection is broken after several minutes. -// This is acceptable because -// 1. snapshot messages sent through other TCP connections could still be -// received and processed. -// 2. this case should happen rarely, so no further optimization is done. -func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - start := time.Now() - - if r.Method != "POST" { - w.Header().Set("Allow", "POST") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc() - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc() - return - } - - addRemoteFromRequest(h.tr, r) - - dec := &messageDecoder{r: r.Body} - // let snapshots be very large since they can exceed 512MB for large installations - m, err := dec.decodeLimit(snapshotLimitByte) - from := types.ID(m.From).String() - if err != nil { - msg := fmt.Sprintf("failed to decode raft message (%v)", err) - h.lg.Warn( - "failed to decode Raft message", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.Error(err), - ) - http.Error(w, msg, http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - snapshotReceiveFailures.WithLabelValues(from).Inc() - return - } - - msgSize := m.Size() - receivedBytes.WithLabelValues(from).Add(float64(msgSize)) - - if m.Type != raftpb.MsgSnap { - h.lg.Warn( - "unexpected Raft message type", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.String("message-type", m.Type.String()), - ) - http.Error(w, "wrong raft message type", http.StatusBadRequest) - snapshotReceiveFailures.WithLabelValues(from).Inc() - return - } - - snapshotReceiveInflights.WithLabelValues(from).Inc() - defer func() { - snapshotReceiveInflights.WithLabelValues(from).Dec() - }() - - h.lg.Info( - "receiving database snapshot", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), - zap.Int("incoming-snapshot-message-size-bytes", msgSize), - zap.String("incoming-snapshot-message-size", humanize.Bytes(uint64(msgSize))), - ) - - // save incoming database snapshot. - - n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index) - if err != nil { - msg := fmt.Sprintf("failed to save KV snapshot (%v)", err) - h.lg.Warn( - "failed to save incoming database snapshot", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), - zap.Error(err), - ) - http.Error(w, msg, http.StatusInternalServerError) - snapshotReceiveFailures.WithLabelValues(from).Inc() - return - } - - receivedBytes.WithLabelValues(from).Add(float64(n)) - - downloadTook := time.Since(start) - h.lg.Info( - "received and saved database snapshot", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), - zap.Int64("incoming-snapshot-size-bytes", n), - zap.String("incoming-snapshot-size", humanize.Bytes(uint64(n))), - zap.String("download-took", downloadTook.String()), - ) - - if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { - // Process may return writerToResponse error when doing some - // additional checks before calling raft.Node.Step. - case writerToResponse: - v.WriteTo(w) - default: - msg := fmt.Sprintf("failed to process raft message (%v)", err) - h.lg.Warn( - "failed to process Raft message", - zap.String("local-member-id", h.localID.String()), - zap.String("remote-snapshot-sender-id", from), - zap.Error(err), - ) - http.Error(w, msg, http.StatusInternalServerError) - snapshotReceiveFailures.WithLabelValues(from).Inc() - } - return - } - - // Write StatusNoContent header after the message has been processed by - // raft, which facilitates the client to report MsgSnap status. - w.WriteHeader(http.StatusNoContent) - - snapshotReceive.WithLabelValues(from).Inc() - snapshotReceiveSeconds.WithLabelValues(from).Observe(time.Since(start).Seconds()) -} - -type streamHandler struct { - lg *zap.Logger - tr *Transport - peerGetter peerGetter - r Raft - id types.ID - cid types.ID -} - -func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { - h := &streamHandler{ - lg: t.Logger, - tr: t, - peerGetter: pg, - r: r, - id: id, - cid: cid, - } - if h.lg == nil { - h.lg = zap.NewNop() - } - return h -} - -func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Server-Version", version.Version) - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - - var t streamType - switch path.Dir(r.URL.Path) { - case streamTypeMsgAppV2.endpoint(h.lg): - t = streamTypeMsgAppV2 - case streamTypeMessage.endpoint(h.lg): - t = streamTypeMessage - default: - h.lg.Debug( - "ignored unexpected streaming request path", - zap.String("local-member-id", h.tr.ID.String()), - zap.String("remote-peer-id-stream-handler", h.id.String()), - zap.String("path", r.URL.Path), - ) - http.Error(w, "invalid path", http.StatusNotFound) - return - } - - fromStr := path.Base(r.URL.Path) - from, err := types.IDFromString(fromStr) - if err != nil { - h.lg.Warn( - "failed to parse path into ID", - zap.String("local-member-id", h.tr.ID.String()), - zap.String("remote-peer-id-stream-handler", h.id.String()), - zap.String("path", fromStr), - zap.Error(err), - ) - http.Error(w, "invalid from", http.StatusNotFound) - return - } - if h.r.IsIDRemoved(uint64(from)) { - h.lg.Warn( - "rejected stream from remote peer because it was removed", - zap.String("local-member-id", h.tr.ID.String()), - zap.String("remote-peer-id-stream-handler", h.id.String()), - zap.String("remote-peer-id-from", from.String()), - ) - http.Error(w, "removed member", http.StatusGone) - return - } - p := h.peerGetter.Get(from) - if p == nil { - // This may happen in following cases: - // 1. user starts a remote peer that belongs to a different cluster - // with the same cluster ID. - // 2. local etcd falls behind of the cluster, and cannot recognize - // the members that joined after its current progress. - if urls := r.Header.Get("X-PeerURLs"); urls != "" { - h.tr.AddRemote(from, strings.Split(urls, ",")) - } - h.lg.Warn( - "failed to find remote peer in cluster", - zap.String("local-member-id", h.tr.ID.String()), - zap.String("remote-peer-id-stream-handler", h.id.String()), - zap.String("remote-peer-id-from", from.String()), - zap.String("cluster-id", h.cid.String()), - ) - http.Error(w, "error sender not found", http.StatusNotFound) - return - } - - wto := h.id.String() - if gto := r.Header.Get("X-Raft-To"); gto != wto { - h.lg.Warn( - "ignored streaming request; ID mismatch", - zap.String("local-member-id", h.tr.ID.String()), - zap.String("remote-peer-id-stream-handler", h.id.String()), - zap.String("remote-peer-id-header", gto), - zap.String("remote-peer-id-from", from.String()), - zap.String("cluster-id", h.cid.String()), - ) - http.Error(w, "to field mismatch", http.StatusPreconditionFailed) - return - } - - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - - c := newCloseNotifier() - conn := &outgoingConn{ - t: t, - Writer: w, - Flusher: w.(http.Flusher), - Closer: c, - localID: h.tr.ID, - peerID: from, - } - p.attachOutgoingConn(conn) - <-c.closeNotify() -} - -// checkClusterCompatibilityFromHeader checks the cluster compatibility of -// the local member from the given header. -// It checks whether the version of local member is compatible with -// the versions in the header, and whether the cluster ID of local member -// matches the one in the header. -func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error { - remoteName := header.Get("X-Server-From") - - remoteServer := serverVersion(header) - remoteVs := "" - if remoteServer != nil { - remoteVs = remoteServer.String() - } - - remoteMinClusterVer := minClusterVersion(header) - remoteMinClusterVs := "" - if remoteMinClusterVer != nil { - remoteMinClusterVs = remoteMinClusterVer.String() - } - - localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer) - - localVs := "" - if localServer != nil { - localVs = localServer.String() - } - localMinClusterVs := "" - if localMinCluster != nil { - localMinClusterVs = localMinCluster.String() - } - - if err != nil { - lg.Warn( - "failed to check version compatibility", - zap.String("local-member-id", localID.String()), - zap.String("local-member-cluster-id", cid.String()), - zap.String("local-member-server-version", localVs), - zap.String("local-member-server-minimum-cluster-version", localMinClusterVs), - zap.String("remote-peer-server-name", remoteName), - zap.String("remote-peer-server-version", remoteVs), - zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs), - zap.Error(err), - ) - return errIncompatibleVersion - } - if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() { - lg.Warn( - "request cluster ID mismatch", - zap.String("local-member-id", localID.String()), - zap.String("local-member-cluster-id", cid.String()), - zap.String("local-member-server-version", localVs), - zap.String("local-member-server-minimum-cluster-version", localMinClusterVs), - zap.String("remote-peer-server-name", remoteName), - zap.String("remote-peer-server-version", remoteVs), - zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs), - zap.String("remote-peer-cluster-id", gcid), - ) - return errClusterIDMismatch - } - return nil -} - -type closeNotifier struct { - done chan struct{} -} - -func newCloseNotifier() *closeNotifier { - return &closeNotifier{ - done: make(chan struct{}), - } -} - -func (n *closeNotifier) Close() error { - close(n.done) - return nil -} - -func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done } diff --git a/server/etcdserver/api/rafthttp/http_test.go b/server/etcdserver/api/rafthttp/http_test.go deleted file mode 100644 index 8728fce1315..00000000000 --- a/server/etcdserver/api/rafthttp/http_test.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3/raftpb" -) - -func TestServeRaftPrefix(t *testing.T) { - testCases := []struct { - method string - body io.Reader - p Raft - clusterID string - - wcode int - }{ - { - // bad method - "GET", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{}, - "0", - http.StatusMethodNotAllowed, - }, - { - // bad method - "PUT", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{}, - "0", - http.StatusMethodNotAllowed, - }, - { - // bad method - "DELETE", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{}, - "0", - http.StatusMethodNotAllowed, - }, - { - // bad request body - "POST", - &errReader{}, - &fakeRaft{}, - "0", - http.StatusBadRequest, - }, - { - // bad request protobuf - "POST", - strings.NewReader("malformed garbage"), - &fakeRaft{}, - "0", - http.StatusBadRequest, - }, - { - // good request, wrong cluster ID - "POST", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{}, - "1", - http.StatusPreconditionFailed, - }, - { - // good request, Processor failure - "POST", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{ - err: &resWriterToError{code: http.StatusForbidden}, - }, - "0", - http.StatusForbidden, - }, - { - // good request, Processor failure - "POST", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{ - err: &resWriterToError{code: http.StatusInternalServerError}, - }, - "0", - http.StatusInternalServerError, - }, - { - // good request, Processor failure - "POST", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{err: errors.New("blah")}, - "0", - http.StatusInternalServerError, - }, - { - // good request - "POST", - bytes.NewReader( - pbutil.MustMarshal(&raftpb.Message{}), - ), - &fakeRaft{}, - "0", - http.StatusNoContent, - }, - } - for i, tt := range testCases { - req, err := http.NewRequest(tt.method, "foo", tt.body) - if err != nil { - t.Fatalf("#%d: could not create request: %#v", i, err) - } - req.Header.Set("X-Etcd-Cluster-ID", tt.clusterID) - req.Header.Set("X-Server-Version", version.Version) - rw := httptest.NewRecorder() - h := newPipelineHandler(&Transport{Logger: zaptest.NewLogger(t)}, tt.p, types.ID(0)) - - // goroutine because the handler panics to disconnect on raft error - donec := make(chan struct{}) - go func() { - defer func() { - recover() - close(donec) - }() - h.ServeHTTP(rw, req) - }() - <-donec - - if rw.Code != tt.wcode { - t.Errorf("#%d: got code=%d, want %d", i, rw.Code, tt.wcode) - } - } -} - -func TestServeRaftStreamPrefix(t *testing.T) { - tests := []struct { - path string - wtype streamType - }{ - { - RaftStreamPrefix + "/message/1", - streamTypeMessage, - }, - { - RaftStreamPrefix + "/msgapp/1", - streamTypeMsgAppV2, - }, - } - for i, tt := range tests { - req, err := http.NewRequest("GET", "http://localhost:2380"+tt.path, nil) - if err != nil { - t.Fatalf("#%d: could not create request: %#v", i, err) - } - req.Header.Set("X-Etcd-Cluster-ID", "1") - req.Header.Set("X-Server-Version", version.Version) - req.Header.Set("X-Raft-To", "2") - - peer := newFakePeer() - peerGetter := &fakePeerGetter{peers: map[types.ID]Peer{types.ID(1): peer}} - tr := &Transport{} - h := newStreamHandler(tr, peerGetter, &fakeRaft{}, types.ID(2), types.ID(1)) - - rw := httptest.NewRecorder() - go h.ServeHTTP(rw, req) - - var conn *outgoingConn - select { - case conn = <-peer.connc: - case <-time.After(time.Second): - t.Fatalf("#%d: failed to attach outgoingConn", i) - } - if g := rw.Header().Get("X-Server-Version"); g != version.Version { - t.Errorf("#%d: X-Server-Version = %s, want %s", i, g, version.Version) - } - if conn.t != tt.wtype { - t.Errorf("#%d: type = %s, want %s", i, conn.t, tt.wtype) - } - conn.Close() - } -} - -func TestServeRaftStreamPrefixBad(t *testing.T) { - removedID := uint64(5) - tests := []struct { - method string - path string - clusterID string - remote string - - wcode int - }{ - // bad method - { - "PUT", - RaftStreamPrefix + "/message/1", - "1", - "1", - http.StatusMethodNotAllowed, - }, - // bad method - { - "POST", - RaftStreamPrefix + "/message/1", - "1", - "1", - http.StatusMethodNotAllowed, - }, - // bad method - { - "DELETE", - RaftStreamPrefix + "/message/1", - "1", - "1", - http.StatusMethodNotAllowed, - }, - // bad path - { - "GET", - RaftStreamPrefix + "/strange/1", - "1", - "1", - http.StatusNotFound, - }, - // bad path - { - "GET", - RaftStreamPrefix + "/strange", - "1", - "1", - http.StatusNotFound, - }, - // non-existent peer - { - "GET", - RaftStreamPrefix + "/message/2", - "1", - "1", - http.StatusNotFound, - }, - // removed peer - { - "GET", - RaftStreamPrefix + "/message/" + fmt.Sprint(removedID), - "1", - "1", - http.StatusGone, - }, - // wrong cluster ID - { - "GET", - RaftStreamPrefix + "/message/1", - "2", - "1", - http.StatusPreconditionFailed, - }, - // wrong remote id - { - "GET", - RaftStreamPrefix + "/message/1", - "1", - "2", - http.StatusPreconditionFailed, - }, - } - for i, tt := range tests { - req, err := http.NewRequest(tt.method, "http://localhost:2380"+tt.path, nil) - if err != nil { - t.Fatalf("#%d: could not create request: %#v", i, err) - } - req.Header.Set("X-Etcd-Cluster-ID", tt.clusterID) - req.Header.Set("X-Server-Version", version.Version) - req.Header.Set("X-Raft-To", tt.remote) - rw := httptest.NewRecorder() - tr := &Transport{} - peerGetter := &fakePeerGetter{peers: map[types.ID]Peer{types.ID(1): newFakePeer()}} - r := &fakeRaft{removedID: removedID} - h := newStreamHandler(tr, peerGetter, r, types.ID(1), types.ID(1)) - h.ServeHTTP(rw, req) - - if rw.Code != tt.wcode { - t.Errorf("#%d: code = %d, want %d", i, rw.Code, tt.wcode) - } - } -} - -func TestCloseNotifier(t *testing.T) { - c := newCloseNotifier() - select { - case <-c.closeNotify(): - t.Fatalf("received unexpected close notification") - default: - } - c.Close() - select { - case <-c.closeNotify(): - default: - t.Fatalf("failed to get close notification") - } -} - -// errReader implements io.Reader to facilitate a broken request. -type errReader struct{} - -func (er *errReader) Read(_ []byte) (int, error) { return 0, errors.New("some error") } - -type resWriterToError struct { - code int -} - -func (e *resWriterToError) Error() string { return "" } -func (e *resWriterToError) WriteTo(w http.ResponseWriter) { w.WriteHeader(e.code) } - -type fakePeerGetter struct { - peers map[types.ID]Peer -} - -func (pg *fakePeerGetter) Get(id types.ID) Peer { return pg.peers[id] } - -type fakePeer struct { - msgs []raftpb.Message - snapMsgs []snap.Message - peerURLs types.URLs - connc chan *outgoingConn - paused bool -} - -func newFakePeer() *fakePeer { - fakeURL, _ := url.Parse("http://localhost") - return &fakePeer{ - connc: make(chan *outgoingConn, 1), - peerURLs: types.URLs{*fakeURL}, - } -} - -func (pr *fakePeer) send(m raftpb.Message) { - if pr.paused { - return - } - pr.msgs = append(pr.msgs, m) -} - -func (pr *fakePeer) sendSnap(m snap.Message) { - if pr.paused { - return - } - pr.snapMsgs = append(pr.snapMsgs, m) -} - -func (pr *fakePeer) update(urls types.URLs) { pr.peerURLs = urls } -func (pr *fakePeer) attachOutgoingConn(conn *outgoingConn) { pr.connc <- conn } -func (pr *fakePeer) activeSince() time.Time { return time.Time{} } -func (pr *fakePeer) stop() {} -func (pr *fakePeer) Pause() { pr.paused = true } -func (pr *fakePeer) Resume() { pr.paused = false } diff --git a/server/etcdserver/api/rafthttp/metrics.go b/server/etcdserver/api/rafthttp/metrics.go deleted file mode 100644 index 02fff84be7c..00000000000 --- a/server/etcdserver/api/rafthttp/metrics.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import "github.com/prometheus/client_golang/prometheus" - -var ( - activePeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "active_peers", - Help: "The current number of active peer connections.", - }, - []string{"Local", "Remote"}, - ) - - disconnectedPeers = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "disconnected_peers_total", - Help: "The total number of disconnected peers.", - }, - []string{"Local", "Remote"}, - ) - - sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_sent_bytes_total", - Help: "The total number of bytes sent to peers.", - }, - []string{"To"}, - ) - - receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_received_bytes_total", - Help: "The total number of bytes received from peers.", - }, - []string{"From"}, - ) - - sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_sent_failures_total", - Help: "The total number of send failures from peers.", - }, - []string{"To"}, - ) - - recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_received_failures_total", - Help: "The total number of receive failures from peers.", - }, - []string{"From"}, - ) - - snapshotSend = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_send_success", - Help: "Total number of successful snapshot sends", - }, - []string{"To"}, - ) - - snapshotSendInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_send_inflights_total", - Help: "Total number of inflight snapshot sends", - }, - []string{"To"}, - ) - - snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_send_failures", - Help: "Total number of snapshot send failures", - }, - []string{"To"}, - ) - - snapshotSendSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_send_total_duration_seconds", - Help: "Total latency distributions of v3 snapshot sends", - - // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2 - // highest bucket start of 0.1 sec * 2^9 == 51.2 sec - Buckets: prometheus.ExponentialBuckets(0.1, 2, 10), - }, - []string{"To"}, - ) - - snapshotReceive = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_receive_success", - Help: "Total number of successful snapshot receives", - }, - []string{"From"}, - ) - - snapshotReceiveInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_receive_inflights_total", - Help: "Total number of inflight snapshot receives", - }, - []string{"From"}, - ) - - snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_receive_failures", - Help: "Total number of snapshot receive failures", - }, - []string{"From"}, - ) - - snapshotReceiveSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "snapshot_receive_total_duration_seconds", - Help: "Total latency distributions of v3 snapshot receives", - - // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2 - // highest bucket start of 0.1 sec * 2^9 == 51.2 sec - Buckets: prometheus.ExponentialBuckets(0.1, 2, 10), - }, - []string{"From"}, - ) - - rttSec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_round_trip_time_seconds", - Help: "Round-Trip-Time histogram between peers", - - // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 - // highest bucket start of 0.0001 sec * 2^15 == 3.2768 sec - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 16), - }, - []string{"To"}, - ) -) - -func init() { - prometheus.MustRegister(activePeers) - prometheus.MustRegister(disconnectedPeers) - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) - prometheus.MustRegister(sentFailures) - prometheus.MustRegister(recvFailures) - - prometheus.MustRegister(snapshotSend) - prometheus.MustRegister(snapshotSendInflights) - prometheus.MustRegister(snapshotSendFailures) - prometheus.MustRegister(snapshotSendSeconds) - prometheus.MustRegister(snapshotReceive) - prometheus.MustRegister(snapshotReceiveInflights) - prometheus.MustRegister(snapshotReceiveFailures) - prometheus.MustRegister(snapshotReceiveSeconds) - - prometheus.MustRegister(rttSec) -} diff --git a/server/etcdserver/api/rafthttp/msg_codec_test.go b/server/etcdserver/api/rafthttp/msg_codec_test.go deleted file mode 100644 index 9b14b45095e..00000000000 --- a/server/etcdserver/api/rafthttp/msg_codec_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "reflect" - "testing" - - "go.etcd.io/raft/v3/raftpb" -) - -func TestMessage(t *testing.T) { - // Lower readBytesLimit to make test pass in restricted resources environment - originalLimit := readBytesLimit - readBytesLimit = 1000 - defer func() { - readBytesLimit = originalLimit - }() - tests := []struct { - msg raftpb.Message - encodeErr error - decodeErr error - }{ - { - raftpb.Message{ - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 1, - LogTerm: 1, - Index: 3, - Entries: []raftpb.Entry{{Term: 1, Index: 4}}, - }, - nil, - nil, - }, - { - raftpb.Message{ - Type: raftpb.MsgProp, - From: 1, - To: 2, - Entries: []raftpb.Entry{ - {Data: []byte("some data")}, - {Data: []byte("some data")}, - {Data: []byte("some data")}, - }, - }, - nil, - nil, - }, - { - raftpb.Message{ - Type: raftpb.MsgProp, - From: 1, - To: 2, - Entries: []raftpb.Entry{ - {Data: bytes.Repeat([]byte("a"), int(readBytesLimit+10))}, - }, - }, - nil, - ErrExceedSizeLimit, - }, - } - for i, tt := range tests { - b := &bytes.Buffer{} - enc := &messageEncoder{w: b} - if err := enc.encode(&tt.msg); err != tt.encodeErr { - t.Errorf("#%d: encode message error expected %v, got %v", i, tt.encodeErr, err) - continue - } - dec := &messageDecoder{r: b} - m, err := dec.decode() - if err != tt.decodeErr { - t.Errorf("#%d: decode message error expected %v, got %v", i, tt.decodeErr, err) - continue - } - if err == nil { - if !reflect.DeepEqual(m, tt.msg) { - t.Errorf("#%d: message = %+v, want %+v", i, m, tt.msg) - } - } - } -} diff --git a/server/etcdserver/api/rafthttp/msgappv2_codec_test.go b/server/etcdserver/api/rafthttp/msgappv2_codec_test.go deleted file mode 100644 index 50c7bd25c6d..00000000000 --- a/server/etcdserver/api/rafthttp/msgappv2_codec_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "reflect" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" -) - -func TestMsgAppV2(t *testing.T) { - tests := []raftpb.Message{ - linkHeartbeatMessage, - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 1, - LogTerm: 1, - Index: 0, - Entries: []raftpb.Entry{ - {Term: 1, Index: 1, Data: []byte("some data")}, - {Term: 1, Index: 2, Data: []byte("some data")}, - {Term: 1, Index: 3, Data: []byte("some data")}, - }, - }, - // consecutive MsgApp - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 1, - LogTerm: 1, - Index: 3, - Entries: []raftpb.Entry{ - {Term: 1, Index: 4, Data: []byte("some data")}, - }, - }, - linkHeartbeatMessage, - // consecutive MsgApp after linkHeartbeatMessage - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 1, - LogTerm: 1, - Index: 4, - Entries: []raftpb.Entry{ - {Term: 1, Index: 5, Data: []byte("some data")}, - }, - }, - // MsgApp with higher term - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 3, - LogTerm: 1, - Index: 5, - Entries: []raftpb.Entry{ - {Term: 3, Index: 6, Data: []byte("some data")}, - }, - }, - linkHeartbeatMessage, - // consecutive MsgApp - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 3, - LogTerm: 2, - Index: 6, - Entries: []raftpb.Entry{ - {Term: 3, Index: 7, Data: []byte("some data")}, - }, - }, - // consecutive empty MsgApp - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Term: 3, - LogTerm: 2, - Index: 7, - Entries: nil, - }, - linkHeartbeatMessage, - } - b := &bytes.Buffer{} - enc := newMsgAppV2Encoder(b, &stats.FollowerStats{}) - dec := newMsgAppV2Decoder(b, types.ID(2), types.ID(1)) - - for i, tt := range tests { - if err := enc.encode(&tt); err != nil { - t.Errorf("#%d: unexpected encode message error: %v", i, err) - continue - } - m, err := dec.decode() - if err != nil { - t.Errorf("#%d: unexpected decode message error: %v", i, err) - continue - } - if !reflect.DeepEqual(m, tt) { - t.Errorf("#%d: message = %+v, want %+v", i, m, tt) - } - } -} diff --git a/server/etcdserver/api/rafthttp/peer.go b/server/etcdserver/api/rafthttp/peer.go deleted file mode 100644 index 11d17cacee8..00000000000 --- a/server/etcdserver/api/rafthttp/peer.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "go.uber.org/zap" - "golang.org/x/time/rate" -) - -const ( - // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates. - // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for - // tcp keepalive failing to detect a bad connection, which is at minutes level. - // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage - // to keep the connection alive. - // For short term pipeline connections, the connection MUST be killed to avoid it being - // put back to http pkg connection pool. - DefaultConnReadTimeout = 5 * time.Second - DefaultConnWriteTimeout = 5 * time.Second - - recvBufSize = 4096 - // maxPendingProposals holds the proposals during one leader election process. - // Generally one leader election takes at most 1 sec. It should have - // 0-2 election conflicts, and each one takes 0.5 sec. - // We assume the number of concurrent proposers is smaller than 4096. - // One client blocks on its proposal for at least 1 sec, so 4096 is enough - // to hold all proposals. - maxPendingProposals = 4096 - - streamAppV2 = "streamMsgAppV2" - streamMsg = "streamMsg" - pipelineMsg = "pipeline" - sendSnap = "sendMsgSnap" -) - -var ( - ConnReadTimeout = DefaultConnReadTimeout - ConnWriteTimeout = DefaultConnWriteTimeout -) - -type Peer interface { - // send sends the message to the remote peer. The function is non-blocking - // and has no promise that the message will be received by the remote. - // When it fails to send message out, it will report the status to underlying - // raft. - send(m raftpb.Message) - - // sendSnap sends the merged snapshot message to the remote peer. Its behavior - // is similar to send. - sendSnap(m snap.Message) - - // update updates the urls of remote peer. - update(urls types.URLs) - - // attachOutgoingConn attaches the outgoing connection to the peer for - // stream usage. After the call, the ownership of the outgoing - // connection hands over to the peer. The peer will close the connection - // when it is no longer used. - attachOutgoingConn(conn *outgoingConn) - // activeSince returns the time that the connection with the - // peer becomes active. - activeSince() time.Time - // stop performs any necessary finalization and terminates the peer - // elegantly. - stop() -} - -// peer is the representative of a remote raft node. Local raft node sends -// messages to the remote through peer. -// Each peer has two underlying mechanisms to send out a message: stream and -// pipeline. -// A stream is a receiver initialized long-polling connection, which -// is always open to transfer messages. Besides general stream, peer also has -// a optimized stream for sending msgApp since msgApp accounts for large part -// of all messages. Only raft leader uses the optimized stream to send msgApp -// to the remote follower node. -// A pipeline is a series of http clients that send http requests to the remote. -// It is only used when the stream has not been established. -type peer struct { - lg *zap.Logger - - localID types.ID - // id of the remote raft peer node - id types.ID - - r Raft - - status *peerStatus - - picker *urlPicker - - msgAppV2Writer *streamWriter - writer *streamWriter - pipeline *pipeline - snapSender *snapshotSender // snapshot sender to send v3 snapshot messages - msgAppV2Reader *streamReader - msgAppReader *streamReader - - recvc chan raftpb.Message - propc chan raftpb.Message - - mu sync.Mutex - paused bool - - cancel context.CancelFunc // cancel pending works in go routine created by peer. - stopc chan struct{} -} - -func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { - if t.Logger != nil { - t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String())) - } - defer func() { - if t.Logger != nil { - t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String())) - } - }() - - status := newPeerStatus(t.Logger, t.ID, peerID) - picker := newURLPicker(urls) - errorc := t.ErrorC - r := t.Raft - pipeline := &pipeline{ - peerID: peerID, - tr: t, - picker: picker, - status: status, - followerStats: fs, - raft: r, - errorc: errorc, - } - pipeline.start() - - p := &peer{ - lg: t.Logger, - localID: t.ID, - id: peerID, - r: r, - status: status, - picker: picker, - msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), - writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), - pipeline: pipeline, - snapSender: newSnapshotSender(t, picker, peerID, status), - recvc: make(chan raftpb.Message, recvBufSize), - propc: make(chan raftpb.Message, maxPendingProposals), - stopc: make(chan struct{}), - } - - ctx, cancel := context.WithCancel(context.Background()) - p.cancel = cancel - go func() { - for { - select { - case mm := <-p.recvc: - if err := r.Process(ctx, mm); err != nil { - if t.Logger != nil { - t.Logger.Warn("failed to process Raft message", zap.Error(err)) - } - } - case <-p.stopc: - return - } - } - }() - - // r.Process might block for processing proposal when there is no leader. - // Thus propc must be put into a separate routine with recvc to avoid blocking - // processing other raft messages. - go func() { - for { - select { - case mm := <-p.propc: - if err := r.Process(ctx, mm); err != nil { - if t.Logger != nil { - t.Logger.Warn("failed to process Raft message", zap.Error(err)) - } - } - case <-p.stopc: - return - } - } - }() - - p.msgAppV2Reader = &streamReader{ - lg: t.Logger, - peerID: peerID, - typ: streamTypeMsgAppV2, - tr: t, - picker: picker, - status: status, - recvc: p.recvc, - propc: p.propc, - rl: rate.NewLimiter(t.DialRetryFrequency, 1), - } - p.msgAppReader = &streamReader{ - lg: t.Logger, - peerID: peerID, - typ: streamTypeMessage, - tr: t, - picker: picker, - status: status, - recvc: p.recvc, - propc: p.propc, - rl: rate.NewLimiter(t.DialRetryFrequency, 1), - } - - p.msgAppV2Reader.start() - p.msgAppReader.start() - - return p -} - -func (p *peer) send(m raftpb.Message) { - p.mu.Lock() - paused := p.paused - p.mu.Unlock() - - if paused { - return - } - - writec, name := p.pick(m) - select { - case writec <- m: - default: - p.r.ReportUnreachable(m.To) - if isMsgSnap(m) { - p.r.ReportSnapshot(m.To, raft.SnapshotFailure) - } - if p.lg != nil { - p.lg.Warn( - "dropped internal Raft message since sending buffer is full", - zap.String("message-type", m.Type.String()), - zap.String("local-member-id", p.localID.String()), - zap.String("from", types.ID(m.From).String()), - zap.String("remote-peer-id", p.id.String()), - zap.String("remote-peer-name", name), - zap.Bool("remote-peer-active", p.status.isActive()), - ) - } - sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() - } -} - -func (p *peer) sendSnap(m snap.Message) { - go p.snapSender.send(m) -} - -func (p *peer) update(urls types.URLs) { - p.picker.update(urls) -} - -func (p *peer) attachOutgoingConn(conn *outgoingConn) { - var ok bool - switch conn.t { - case streamTypeMsgAppV2: - ok = p.msgAppV2Writer.attach(conn) - case streamTypeMessage: - ok = p.writer.attach(conn) - default: - if p.lg != nil { - p.lg.Panic("unknown stream type", zap.String("type", conn.t.String())) - } - } - if !ok { - conn.Close() - } -} - -func (p *peer) activeSince() time.Time { return p.status.activeSince() } - -// Pause pauses the peer. The peer will simply drops all incoming -// messages without returning an error. -func (p *peer) Pause() { - p.mu.Lock() - defer p.mu.Unlock() - p.paused = true - p.msgAppReader.pause() - p.msgAppV2Reader.pause() -} - -// Resume resumes a paused peer. -func (p *peer) Resume() { - p.mu.Lock() - defer p.mu.Unlock() - p.paused = false - p.msgAppReader.resume() - p.msgAppV2Reader.resume() -} - -func (p *peer) stop() { - if p.lg != nil { - p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String())) - } - - defer func() { - if p.lg != nil { - p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String())) - } - }() - - close(p.stopc) - p.cancel() - p.msgAppV2Writer.stop() - p.writer.stop() - p.pipeline.stop() - p.snapSender.stop() - p.msgAppV2Reader.stop() - p.msgAppReader.stop() -} - -// pick picks a chan for sending the given message. The picked chan and the picked chan -// string name are returned. -func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) { - var ok bool - // Considering MsgSnap may have a big size, e.g., 1G, and will block - // stream for a long time, only use one of the N pipelines to send MsgSnap. - if isMsgSnap(m) { - return p.pipeline.msgc, pipelineMsg - } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) { - return writec, streamAppV2 - } else if writec, ok = p.writer.writec(); ok { - return writec, streamMsg - } - return p.pipeline.msgc, pipelineMsg -} - -func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp } - -func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap } diff --git a/server/etcdserver/api/rafthttp/peer_test.go b/server/etcdserver/api/rafthttp/peer_test.go deleted file mode 100644 index d1a4f679367..00000000000 --- a/server/etcdserver/api/rafthttp/peer_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "testing" - - "go.etcd.io/raft/v3/raftpb" -) - -func TestPeerPick(t *testing.T) { - tests := []struct { - msgappWorking bool - messageWorking bool - m raftpb.Message - wpicked string - }{ - { - true, true, - raftpb.Message{Type: raftpb.MsgSnap}, - pipelineMsg, - }, - { - true, true, - raftpb.Message{Type: raftpb.MsgApp, Term: 1, LogTerm: 1}, - streamAppV2, - }, - { - true, true, - raftpb.Message{Type: raftpb.MsgProp}, - streamMsg, - }, - { - true, true, - raftpb.Message{Type: raftpb.MsgHeartbeat}, - streamMsg, - }, - { - false, true, - raftpb.Message{Type: raftpb.MsgApp, Term: 1, LogTerm: 1}, - streamMsg, - }, - { - false, false, - raftpb.Message{Type: raftpb.MsgApp, Term: 1, LogTerm: 1}, - pipelineMsg, - }, - { - false, false, - raftpb.Message{Type: raftpb.MsgProp}, - pipelineMsg, - }, - { - false, false, - raftpb.Message{Type: raftpb.MsgSnap}, - pipelineMsg, - }, - { - false, false, - raftpb.Message{Type: raftpb.MsgHeartbeat}, - pipelineMsg, - }, - } - for i, tt := range tests { - peer := &peer{ - msgAppV2Writer: &streamWriter{working: tt.msgappWorking}, - writer: &streamWriter{working: tt.messageWorking}, - pipeline: &pipeline{}, - } - _, picked := peer.pick(tt.m) - if picked != tt.wpicked { - t.Errorf("#%d: picked = %v, want %v", i, picked, tt.wpicked) - } - } -} diff --git a/server/etcdserver/api/rafthttp/pipeline_test.go b/server/etcdserver/api/rafthttp/pipeline_test.go deleted file mode 100644 index 325abb97b69..00000000000 --- a/server/etcdserver/api/rafthttp/pipeline_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "errors" - "fmt" - "io" - "net/http" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" -) - -// TestPipelineSend tests that pipeline could send data using roundtripper -// and increase success count in stats. -func TestPipelineSend(t *testing.T) { - tr := &roundTripperRecorder{rec: testutil.NewRecorderStream()} - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - tp := &Transport{pipelineRt: tr} - p := startTestPipeline(t, tp, picker) - - p.msgc <- raftpb.Message{Type: raftpb.MsgApp} - tr.rec.Wait(1) - p.stop() - if p.followerStats.Counts.Success != 1 { - t.Errorf("success = %d, want 1", p.followerStats.Counts.Success) - } -} - -// TestPipelineKeepSendingWhenPostError tests that pipeline can keep -// sending messages if previous messages meet post error. -func TestPipelineKeepSendingWhenPostError(t *testing.T) { - tr := &respRoundTripper{rec: testutil.NewRecorderStream(), err: fmt.Errorf("roundtrip error")} - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - tp := &Transport{pipelineRt: tr} - p := startTestPipeline(t, tp, picker) - defer p.stop() - - for i := 0; i < 50; i++ { - p.msgc <- raftpb.Message{Type: raftpb.MsgApp} - } - - _, err := tr.rec.Wait(50) - if err != nil { - t.Errorf("unexpected wait error %v", err) - } -} - -func TestPipelineExceedMaximumServing(t *testing.T) { - rt := newRoundTripperBlocker() - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - tp := &Transport{pipelineRt: rt} - p := startTestPipeline(t, tp, picker) - defer p.stop() - - // keep the sender busy and make the buffer full - // nothing can go out as we block the sender - for i := 0; i < connPerPipeline+pipelineBufSize; i++ { - select { - case p.msgc <- raftpb.Message{}: - case <-time.After(time.Second): - t.Errorf("failed to send out message") - } - } - - // try to send a data when we are sure the buffer is full - select { - case p.msgc <- raftpb.Message{}: - t.Errorf("unexpected message sendout") - default: - } - - // unblock the senders and force them to send out the data - rt.unblock() - - // It could send new data after previous ones succeed - select { - case p.msgc <- raftpb.Message{}: - case <-time.After(time.Second): - t.Errorf("failed to send out message") - } -} - -// TestPipelineSendFailed tests that when send func meets the post error, -// it increases fail count in stats. -func TestPipelineSendFailed(t *testing.T) { - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - rt := newRespRoundTripper(0, errors.New("blah")) - rt.rec = testutil.NewRecorderStream() - tp := &Transport{pipelineRt: rt} - p := startTestPipeline(t, tp, picker) - - p.msgc <- raftpb.Message{Type: raftpb.MsgApp} - if _, err := rt.rec.Wait(1); err != nil { - t.Fatal(err) - } - - p.stop() - - if p.followerStats.Counts.Fail != 1 { - t.Errorf("fail = %d, want 1", p.followerStats.Counts.Fail) - } -} - -func TestPipelinePost(t *testing.T) { - tr := &roundTripperRecorder{rec: &testutil.RecorderBuffered{}} - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - tp := &Transport{ClusterID: types.ID(1), pipelineRt: tr} - p := startTestPipeline(t, tp, picker) - if err := p.post([]byte("some data")); err != nil { - t.Fatalf("unexpected post error: %v", err) - } - act, err := tr.rec.Wait(1) - if err != nil { - t.Fatal(err) - } - p.stop() - - req := act[0].Params[0].(*http.Request) - - if g := req.Method; g != "POST" { - t.Errorf("method = %s, want %s", g, "POST") - } - if g := req.URL.String(); g != "http://localhost:2380/raft" { - t.Errorf("url = %s, want %s", g, "http://localhost:2380/raft") - } - if g := req.Header.Get("Content-Type"); g != "application/protobuf" { - t.Errorf("content type = %s, want %s", g, "application/protobuf") - } - if g := req.Header.Get("X-Server-Version"); g != version.Version { - t.Errorf("version = %s, want %s", g, version.Version) - } - if g := req.Header.Get("X-Min-Cluster-Version"); g != version.MinClusterVersion { - t.Errorf("min version = %s, want %s", g, version.MinClusterVersion) - } - if g := req.Header.Get("X-Etcd-Cluster-ID"); g != "1" { - t.Errorf("cluster id = %s, want %s", g, "1") - } - b, err := io.ReadAll(req.Body) - if err != nil { - t.Fatalf("unexpected ReadAll error: %v", err) - } - if string(b) != "some data" { - t.Errorf("body = %s, want %s", b, "some data") - } -} - -func TestPipelinePostBad(t *testing.T) { - tests := []struct { - u string - code int - err error - }{ - // RoundTrip returns error - {"http://localhost:2380", 0, errors.New("blah")}, - // unexpected response status code - {"http://localhost:2380", http.StatusOK, nil}, - {"http://localhost:2380", http.StatusCreated, nil}, - } - for i, tt := range tests { - picker := mustNewURLPicker(t, []string{tt.u}) - tp := &Transport{pipelineRt: newRespRoundTripper(tt.code, tt.err)} - p := startTestPipeline(t, tp, picker) - err := p.post([]byte("some data")) - p.stop() - - if err == nil { - t.Errorf("#%d: err = nil, want not nil", i) - } - } -} - -func TestPipelinePostErrorc(t *testing.T) { - tests := []struct { - u string - code int - err error - }{ - {"http://localhost:2380", http.StatusForbidden, nil}, - } - for i, tt := range tests { - picker := mustNewURLPicker(t, []string{tt.u}) - tp := &Transport{pipelineRt: newRespRoundTripper(tt.code, tt.err)} - p := startTestPipeline(t, tp, picker) - p.post([]byte("some data")) - p.stop() - select { - case <-p.errorc: - default: - t.Fatalf("#%d: cannot receive from errorc", i) - } - } -} - -func TestStopBlockedPipeline(t *testing.T) { - picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) - tp := &Transport{pipelineRt: newRoundTripperBlocker()} - p := startTestPipeline(t, tp, picker) - // send many messages that most of them will be blocked in buffer - for i := 0; i < connPerPipeline*10; i++ { - p.msgc <- raftpb.Message{} - } - - done := make(chan struct{}) - go func() { - p.stop() - done <- struct{}{} - }() - select { - case <-done: - case <-time.After(time.Second): - t.Fatalf("failed to stop pipeline in 1s") - } -} - -type roundTripperBlocker struct { - unblockc chan struct{} - mu sync.Mutex - cancel map[*http.Request]chan struct{} -} - -func newRoundTripperBlocker() *roundTripperBlocker { - return &roundTripperBlocker{ - unblockc: make(chan struct{}), - cancel: make(map[*http.Request]chan struct{}), - } -} - -func (t *roundTripperBlocker) unblock() { - close(t.unblockc) -} - -func (t *roundTripperBlocker) CancelRequest(req *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if c, ok := t.cancel[req]; ok { - c <- struct{}{} - delete(t.cancel, req) - } -} - -type respRoundTripper struct { - mu sync.Mutex - rec testutil.Recorder - - code int - header http.Header - err error -} - -func newRespRoundTripper(code int, err error) *respRoundTripper { - return &respRoundTripper{code: code, err: err} -} -func (t *respRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.rec != nil { - t.rec.Record(testutil.Action{Name: "req", Params: []interface{}{req}}) - } - return &http.Response{StatusCode: t.code, Header: t.header, Body: &nopReadCloser{}}, t.err -} - -type roundTripperRecorder struct { - rec testutil.Recorder -} - -func (t *roundTripperRecorder) RoundTrip(req *http.Request) (*http.Response, error) { - if t.rec != nil { - t.rec.Record(testutil.Action{Name: "req", Params: []interface{}{req}}) - } - return &http.Response{StatusCode: http.StatusNoContent, Body: &nopReadCloser{}}, nil -} - -type nopReadCloser struct{} - -func (n *nopReadCloser) Read(p []byte) (int, error) { return 0, io.EOF } -func (n *nopReadCloser) Close() error { return nil } - -func startTestPipeline(t *testing.T, tr *Transport, picker *urlPicker) *pipeline { - p := &pipeline{ - peerID: types.ID(1), - tr: tr, - picker: picker, - status: newPeerStatus(zaptest.NewLogger(t), tr.ID, types.ID(1)), - raft: &fakeRaft{}, - followerStats: &stats.FollowerStats{}, - errorc: make(chan error, 1), - } - p.start() - return p -} diff --git a/server/etcdserver/api/rafthttp/snapshot_test.go b/server/etcdserver/api/rafthttp/snapshot_test.go deleted file mode 100644 index 8f319fe52aa..00000000000 --- a/server/etcdserver/api/rafthttp/snapshot_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3/raftpb" -) - -type strReaderCloser struct{ *strings.Reader } - -func (s strReaderCloser) Close() error { return nil } - -func TestSnapshotSend(t *testing.T) { - tests := []struct { - m raftpb.Message - rc io.ReadCloser - size int64 - - wsent bool - wfiles int - }{ - // sent and receive with no errors - { - m: raftpb.Message{Type: raftpb.MsgSnap, To: 1, Snapshot: &raftpb.Snapshot{}}, - rc: strReaderCloser{strings.NewReader("hello")}, - size: 5, - - wsent: true, - wfiles: 1, - }, - // error when reading snapshot for send - { - m: raftpb.Message{Type: raftpb.MsgSnap, To: 1, Snapshot: &raftpb.Snapshot{}}, - rc: &errReadCloser{fmt.Errorf("snapshot error")}, - size: 1, - - wsent: false, - wfiles: 0, - }, - // sends less than the given snapshot length - { - m: raftpb.Message{Type: raftpb.MsgSnap, To: 1, Snapshot: &raftpb.Snapshot{}}, - rc: strReaderCloser{strings.NewReader("hello")}, - size: 10000, - - wsent: false, - wfiles: 0, - }, - // sends less than actual snapshot length - { - m: raftpb.Message{Type: raftpb.MsgSnap, To: 1, Snapshot: &raftpb.Snapshot{}}, - rc: strReaderCloser{strings.NewReader("hello")}, - size: 1, - - wsent: false, - wfiles: 0, - }, - } - - for i, tt := range tests { - sent, files := testSnapshotSend(t, snap.NewMessage(tt.m, tt.rc, tt.size)) - if tt.wsent != sent { - t.Errorf("#%d: snapshot expected %v, got %v", i, tt.wsent, sent) - } - if tt.wfiles != len(files) { - t.Fatalf("#%d: expected %d files, got %d files", i, tt.wfiles, len(files)) - } - } -} - -func testSnapshotSend(t *testing.T, sm *snap.Message) (bool, []os.DirEntry) { - d := t.TempDir() - - r := &fakeRaft{} - tr := &Transport{pipelineRt: &http.Transport{}, ClusterID: types.ID(1), Raft: r} - ch := make(chan struct{}, 1) - h := &syncHandler{newSnapshotHandler(tr, r, snap.New(zaptest.NewLogger(t), d), types.ID(1)), ch} - srv := httptest.NewServer(h) - defer srv.Close() - - picker := mustNewURLPicker(t, []string{srv.URL}) - snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1))) - defer snapsend.stop() - - snapsend.send(*sm) - - sent := false - select { - case <-time.After(time.Second): - t.Fatalf("timed out sending snapshot") - case sent = <-sm.CloseNotify(): - } - - // wait for handler to finish accepting snapshot - <-ch - - files, rerr := os.ReadDir(d) - if rerr != nil { - t.Fatal(rerr) - } - return sent, files -} - -type errReadCloser struct{ err error } - -func (s *errReadCloser) Read(p []byte) (int, error) { return 0, s.err } -func (s *errReadCloser) Close() error { return s.err } - -type syncHandler struct { - h http.Handler - ch chan<- struct{} -} - -func (sh *syncHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - sh.h.ServeHTTP(w, r) - sh.ch <- struct{}{} -} diff --git a/server/etcdserver/api/rafthttp/stream_test.go b/server/etcdserver/api/rafthttp/stream_test.go deleted file mode 100644 index 0b0c6f2fd53..00000000000 --- a/server/etcdserver/api/rafthttp/stream_test.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "errors" - "io" - "net/http" - "net/http/httptest" - "reflect" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" - - "github.com/coreos/go-semver/semver" - "golang.org/x/time/rate" -) - -// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached -// to streamWriter. After that, streamWriter can use it to send messages -// continuously, and closes it when stopped. -func TestStreamWriterAttachOutgoingConn(t *testing.T) { - sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) - // the expected initial state of streamWriter is not working - if _, ok := sw.writec(); ok { - t.Errorf("initial working status = %v, want false", ok) - } - - // repeat tests to ensure streamWriter can use last attached connection - var wfc *fakeWriteFlushCloser - for i := 0; i < 3; i++ { - prevwfc := wfc - wfc = newFakeWriteFlushCloser(nil) - sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) - - // previous attached connection should be closed - if prevwfc != nil { - select { - case <-prevwfc.closed: - case <-time.After(time.Second): - t.Errorf("#%d: close of previous connection timed out", i) - } - } - - // if prevwfc != nil, the new msgc is ready since prevwfc has closed - // if prevwfc == nil, the first connection may be pending, but the first - // msgc is already available since it's set on calling startStreamwriter - msgc, _ := sw.writec() - msgc <- raftpb.Message{} - - select { - case <-wfc.writec: - case <-time.After(time.Second): - t.Errorf("#%d: failed to write to the underlying connection", i) - } - // write chan is still available - if _, ok := sw.writec(); !ok { - t.Errorf("#%d: working status = %v, want true", i, ok) - } - } - - sw.stop() - // write chan is unavailable since the writer is stopped. - if _, ok := sw.writec(); ok { - t.Errorf("working status after stop = %v, want false", ok) - } - if !wfc.Closed() { - t.Errorf("failed to close the underlying connection") - } -} - -// TestStreamWriterAttachBadOutgoingConn tests that streamWriter with bad -// outgoingConn will close the outgoingConn and fall back to non-working status. -func TestStreamWriterAttachBadOutgoingConn(t *testing.T) { - sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) - defer sw.stop() - wfc := newFakeWriteFlushCloser(errors.New("blah")) - sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) - - sw.msgc <- raftpb.Message{} - select { - case <-wfc.closed: - case <-time.After(time.Second): - t.Errorf("failed to close the underlying connection in time") - } - // no longer working - if _, ok := sw.writec(); ok { - t.Errorf("working = %v, want false", ok) - } -} - -func TestStreamReaderDialRequest(t *testing.T) { - for i, tt := range []streamType{streamTypeMessage, streamTypeMsgAppV2} { - tr := &roundTripperRecorder{rec: &testutil.RecorderBuffered{}} - sr := &streamReader{ - peerID: types.ID(2), - tr: &Transport{streamRt: tr, ClusterID: types.ID(1), ID: types.ID(1)}, - picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), - ctx: context.Background(), - } - sr.dial(tt) - - act, err := tr.rec.Wait(1) - if err != nil { - t.Fatal(err) - } - req := act[0].Params[0].(*http.Request) - - wurl := "http://localhost:2380" + tt.endpoint(zaptest.NewLogger(t)) + "/1" - if req.URL.String() != wurl { - t.Errorf("#%d: url = %s, want %s", i, req.URL.String(), wurl) - } - if w := "GET"; req.Method != w { - t.Errorf("#%d: method = %s, want %s", i, req.Method, w) - } - if g := req.Header.Get("X-Etcd-Cluster-ID"); g != "1" { - t.Errorf("#%d: header X-Etcd-Cluster-ID = %s, want 1", i, g) - } - if g := req.Header.Get("X-Raft-To"); g != "2" { - t.Errorf("#%d: header X-Raft-To = %s, want 2", i, g) - } - } -} - -// TestStreamReaderDialResult tests the result of the dial func call meets the -// HTTP response received. -func TestStreamReaderDialResult(t *testing.T) { - tests := []struct { - code int - err error - wok bool - whalt bool - }{ - {0, errors.New("blah"), false, false}, - {http.StatusOK, nil, true, false}, - {http.StatusMethodNotAllowed, nil, false, false}, - {http.StatusNotFound, nil, false, false}, - {http.StatusPreconditionFailed, nil, false, false}, - {http.StatusGone, nil, false, true}, - } - for i, tt := range tests { - h := http.Header{} - h.Add("X-Server-Version", version.Version) - tr := &respRoundTripper{ - code: tt.code, - header: h, - err: tt.err, - } - sr := &streamReader{ - peerID: types.ID(2), - tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, - picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), - errorc: make(chan error, 1), - ctx: context.Background(), - } - - _, err := sr.dial(streamTypeMessage) - if ok := err == nil; ok != tt.wok { - t.Errorf("#%d: ok = %v, want %v", i, ok, tt.wok) - } - if halt := len(sr.errorc) > 0; halt != tt.whalt { - t.Errorf("#%d: halt = %v, want %v", i, halt, tt.whalt) - } - } -} - -// TestStreamReaderStopOnDial tests a stream reader closes the connection on stop. -func TestStreamReaderStopOnDial(t *testing.T) { - testutil.RegisterLeakDetection(t) - h := http.Header{} - h.Add("X-Server-Version", version.Version) - tr := &respWaitRoundTripper{rrt: &respRoundTripper{code: http.StatusOK, header: h}} - sr := &streamReader{ - peerID: types.ID(2), - tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, - picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), - errorc: make(chan error, 1), - typ: streamTypeMessage, - status: newPeerStatus(zaptest.NewLogger(t), types.ID(1), types.ID(2)), - rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), - } - tr.onResp = func() { - // stop() waits for the run() goroutine to exit, but that exit - // needs a response from RoundTrip() first; use goroutine - go sr.stop() - // wait so that stop() is blocked on run() exiting - time.Sleep(10 * time.Millisecond) - // sr.run() completes dialing then begins decoding while stopped - } - sr.start() - select { - case <-sr.done: - case <-time.After(time.Second): - t.Fatal("streamReader did not stop in time") - } -} - -type respWaitRoundTripper struct { - rrt *respRoundTripper - onResp func() -} - -func (t *respWaitRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - resp, err := t.rrt.RoundTrip(req) - resp.Body = newWaitReadCloser() - t.onResp() - return resp, err -} - -type waitReadCloser struct{ closec chan struct{} } - -func newWaitReadCloser() *waitReadCloser { return &waitReadCloser{make(chan struct{})} } -func (wrc *waitReadCloser) Read(p []byte) (int, error) { - <-wrc.closec - return 0, io.EOF -} -func (wrc *waitReadCloser) Close() error { - close(wrc.closec) - return nil -} - -// TestStreamReaderDialDetectUnsupport tests that dial func could find -// out that the stream type is not supported by the remote. -func TestStreamReaderDialDetectUnsupport(t *testing.T) { - for i, typ := range []streamType{streamTypeMsgAppV2, streamTypeMessage} { - // the response from etcd 2.0 - tr := &respRoundTripper{ - code: http.StatusNotFound, - header: http.Header{}, - } - sr := &streamReader{ - peerID: types.ID(2), - tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, - picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), - ctx: context.Background(), - } - - _, err := sr.dial(typ) - if err != errUnsupportedStreamType { - t.Errorf("#%d: error = %v, want %v", i, err, errUnsupportedStreamType) - } - } -} - -// TestStream tests that streamReader and streamWriter can build stream to -// send messages between each other. -func TestStream(t *testing.T) { - recvc := make(chan raftpb.Message, streamBufSize) - propc := make(chan raftpb.Message, streamBufSize) - msgapp := raftpb.Message{ - Type: raftpb.MsgApp, - From: 2, - To: 1, - Term: 1, - LogTerm: 1, - Index: 3, - Entries: []raftpb.Entry{{Term: 1, Index: 4}}, - } - - tests := []struct { - t streamType - m raftpb.Message - wc chan raftpb.Message - }{ - { - streamTypeMessage, - raftpb.Message{Type: raftpb.MsgProp, To: 2}, - propc, - }, - { - streamTypeMessage, - msgapp, - recvc, - }, - { - streamTypeMsgAppV2, - msgapp, - recvc, - }, - } - for i, tt := range tests { - h := &fakeStreamHandler{t: tt.t} - srv := httptest.NewServer(h) - defer srv.Close() - - sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) - defer sw.stop() - h.sw = sw - - picker := mustNewURLPicker(t, []string{srv.URL}) - tr := &Transport{streamRt: &http.Transport{}, ClusterID: types.ID(1)} - - sr := &streamReader{ - peerID: types.ID(2), - typ: tt.t, - tr: tr, - picker: picker, - status: newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(2)), - recvc: recvc, - propc: propc, - rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), - } - sr.start() - - // wait for stream to work - var writec chan<- raftpb.Message - for { - var ok bool - if writec, ok = sw.writec(); ok { - break - } - time.Sleep(time.Millisecond) - } - - writec <- tt.m - var m raftpb.Message - select { - case m = <-tt.wc: - case <-time.After(time.Second): - t.Fatalf("#%d: failed to receive message from the channel", i) - } - if !reflect.DeepEqual(m, tt.m) { - t.Fatalf("#%d: message = %+v, want %+v", i, m, tt.m) - } - - sr.stop() - } -} - -func TestCheckStreamSupport(t *testing.T) { - tests := []struct { - v *semver.Version - t streamType - w bool - }{ - // support - { - semver.Must(semver.NewVersion("2.1.0")), - streamTypeMsgAppV2, - true, - }, - // ignore patch - { - semver.Must(semver.NewVersion("2.1.9")), - streamTypeMsgAppV2, - true, - }, - // ignore prerelease - { - semver.Must(semver.NewVersion("2.1.0-alpha")), - streamTypeMsgAppV2, - true, - }, - } - for i, tt := range tests { - if g := checkStreamSupport(tt.v, tt.t); g != tt.w { - t.Errorf("#%d: check = %v, want %v", i, g, tt.w) - } - } -} - -func TestStreamSupportCurrentVersion(t *testing.T) { - cv := version.Cluster(version.Version) - cv = cv + ".0" - if _, ok := supportedStream[cv]; !ok { - t.Errorf("Current version does not have stream support.") - } -} - -type fakeWriteFlushCloser struct { - mu sync.Mutex - err error - written int - closed chan struct{} - writec chan struct{} -} - -func newFakeWriteFlushCloser(err error) *fakeWriteFlushCloser { - return &fakeWriteFlushCloser{ - err: err, - closed: make(chan struct{}), - writec: make(chan struct{}, 1), - } -} - -func (wfc *fakeWriteFlushCloser) Write(p []byte) (n int, err error) { - wfc.mu.Lock() - defer wfc.mu.Unlock() - select { - case wfc.writec <- struct{}{}: - default: - } - wfc.written += len(p) - return len(p), wfc.err -} - -func (wfc *fakeWriteFlushCloser) Flush() {} - -func (wfc *fakeWriteFlushCloser) Close() error { - close(wfc.closed) - return wfc.err -} - -func (wfc *fakeWriteFlushCloser) Written() int { - wfc.mu.Lock() - defer wfc.mu.Unlock() - return wfc.written -} - -func (wfc *fakeWriteFlushCloser) Closed() bool { - select { - case <-wfc.closed: - return true - default: - return false - } -} - -type fakeStreamHandler struct { - t streamType - sw *streamWriter -} - -func (h *fakeStreamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Add("X-Server-Version", version.Version) - w.(http.Flusher).Flush() - c := newCloseNotifier() - h.sw.attach(&outgoingConn{ - t: h.t, - Writer: w, - Flusher: w.(http.Flusher), - Closer: c, - }) - <-c.closeNotify() -} diff --git a/server/etcdserver/api/rafthttp/transport.go b/server/etcdserver/api/rafthttp/transport.go deleted file mode 100644 index f4af5f3c2a8..00000000000 --- a/server/etcdserver/api/rafthttp/transport.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "net/http" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "github.com/xiang90/probing" - "go.uber.org/zap" - "golang.org/x/time/rate" -) - -type Raft interface { - Process(ctx context.Context, m raftpb.Message) error - IsIDRemoved(id uint64) bool - ReportUnreachable(id uint64) - ReportSnapshot(id uint64, status raft.SnapshotStatus) -} - -type Transporter interface { - // Start starts the given Transporter. - // Start MUST be called before calling other functions in the interface. - Start() error - // Handler returns the HTTP handler of the transporter. - // A transporter HTTP handler handles the HTTP requests - // from remote peers. - // The handler MUST be used to handle RaftPrefix(/raft) - // endpoint. - Handler() http.Handler - // Send sends out the given messages to the remote peers. - // Each message has a To field, which is an id that maps - // to an existing peer in the transport. - // If the id cannot be found in the transport, the message - // will be ignored. - Send(m []raftpb.Message) - // SendSnapshot sends out the given snapshot message to a remote peer. - // The behavior of SendSnapshot is similar to Send. - SendSnapshot(m snap.Message) - // AddRemote adds a remote with given peer urls into the transport. - // A remote helps newly joined member to catch up the progress of cluster, - // and will not be used after that. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - AddRemote(id types.ID, urls []string) - // AddPeer adds a peer with given peer urls into the transport. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - // Peer urls are used to connect to the remote peer. - AddPeer(id types.ID, urls []string) - // RemovePeer removes the peer with given id. - RemovePeer(id types.ID) - // RemoveAllPeers removes all the existing peers in the transport. - RemoveAllPeers() - // UpdatePeer updates the peer urls of the peer with the given id. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - UpdatePeer(id types.ID, urls []string) - // ActiveSince returns the time that the connection with the peer - // of the given id becomes active. - // If the connection is active since peer was added, it returns the adding time. - // If the connection is currently inactive, it returns zero time. - ActiveSince(id types.ID) time.Time - // ActivePeers returns the number of active peers. - ActivePeers() int - // Stop closes the connections and stops the transporter. - Stop() -} - -// Transport implements Transporter interface. It provides the functionality -// to send raft messages to peers, and receive raft messages from peers. -// User should call Handler method to get a handler to serve requests -// received from peerURLs. -// User needs to call Start before calling other functions, and call -// Stop when the Transport is no longer used. -type Transport struct { - Logger *zap.Logger - - DialTimeout time.Duration // maximum duration before timing out dial of the request - // DialRetryFrequency defines the frequency of streamReader dial retrial attempts; - // a distinct rate limiter is created per every peer (default value: 10 events/sec) - DialRetryFrequency rate.Limit - - TLSInfo transport.TLSInfo // TLS information used when creating connection - - ID types.ID // local member ID - URLs types.URLs // local peer URLs - ClusterID types.ID // raft cluster ID for request validation - Raft Raft // raft state machine, to which the Transport forwards received messages and reports status - Snapshotter *snap.Snapshotter - ServerStats *stats.ServerStats // used to record general transportation statistics - // used to record transportation statistics with followers when - // performing as leader in raft protocol - LeaderStats *stats.LeaderStats - // ErrorC is used to report detected critical errors, e.g., - // the member has been permanently removed from the cluster - // When an error is received from ErrorC, user should stop raft state - // machine and thus stop the Transport. - ErrorC chan error - - streamRt http.RoundTripper // roundTripper used by streams - pipelineRt http.RoundTripper // roundTripper used by pipelines - - mu sync.RWMutex // protect the remote and peer map - remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up - peers map[types.ID]Peer // peers map - - pipelineProber probing.Prober - streamProber probing.Prober -} - -func (t *Transport) Start() error { - var err error - t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout) - if err != nil { - return err - } - t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout) - if err != nil { - return err - } - t.remotes = make(map[types.ID]*remote) - t.peers = make(map[types.ID]Peer) - t.pipelineProber = probing.NewProber(t.pipelineRt) - t.streamProber = probing.NewProber(t.streamRt) - - // If client didn't provide dial retry frequency, use the default - // (100ms backoff between attempts to create a new stream), - // so it doesn't bring too much overhead when retry. - if t.DialRetryFrequency == 0 { - t.DialRetryFrequency = rate.Every(100 * time.Millisecond) - } - return nil -} - -func (t *Transport) Handler() http.Handler { - pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID) - streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID) - snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID) - mux := http.NewServeMux() - mux.Handle(RaftPrefix, pipelineHandler) - mux.Handle(RaftStreamPrefix+"/", streamHandler) - mux.Handle(RaftSnapshotPrefix, snapHandler) - mux.Handle(ProbingPrefix, probing.NewHandler()) - return mux -} - -func (t *Transport) Get(id types.ID) Peer { - t.mu.RLock() - defer t.mu.RUnlock() - return t.peers[id] -} - -func (t *Transport) Send(msgs []raftpb.Message) { - for _, m := range msgs { - if m.To == 0 { - // ignore intentionally dropped message - continue - } - to := types.ID(m.To) - - t.mu.RLock() - p, pok := t.peers[to] - g, rok := t.remotes[to] - t.mu.RUnlock() - - if pok { - if m.Type == raftpb.MsgApp { - t.ServerStats.SendAppendReq(m.Size()) - } - p.send(m) - continue - } - - if rok { - g.send(m) - continue - } - - if t.Logger != nil { - t.Logger.Debug( - "ignored message send request; unknown remote peer target", - zap.String("type", m.Type.String()), - zap.String("unknown-target-peer-id", to.String()), - ) - } - } -} - -func (t *Transport) Stop() { - t.mu.Lock() - defer t.mu.Unlock() - for _, r := range t.remotes { - r.stop() - } - for _, p := range t.peers { - p.stop() - } - t.pipelineProber.RemoveAll() - t.streamProber.RemoveAll() - if tr, ok := t.streamRt.(*http.Transport); ok { - tr.CloseIdleConnections() - } - if tr, ok := t.pipelineRt.(*http.Transport); ok { - tr.CloseIdleConnections() - } - t.peers = nil - t.remotes = nil -} - -// CutPeer drops messages to the specified peer. -func (t *Transport) CutPeer(id types.ID) { - t.mu.RLock() - p, pok := t.peers[id] - g, gok := t.remotes[id] - t.mu.RUnlock() - - if pok { - p.(Pausable).Pause() - } - if gok { - g.Pause() - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (t *Transport) MendPeer(id types.ID) { - t.mu.RLock() - p, pok := t.peers[id] - g, gok := t.remotes[id] - t.mu.RUnlock() - - if pok { - p.(Pausable).Resume() - } - if gok { - g.Resume() - } -} - -func (t *Transport) AddRemote(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - if t.remotes == nil { - // there's no clean way to shutdown the golang http server - // (see: https://github.com/golang/go/issues/4674) before - // stopping the transport; ignore any new connections. - return - } - if _, ok := t.peers[id]; ok { - return - } - if _, ok := t.remotes[id]; ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - if t.Logger != nil { - t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) - } - } - t.remotes[id] = startRemote(t, urls, id) - - if t.Logger != nil { - t.Logger.Info( - "added new remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("remote-peer-id", id.String()), - zap.Strings("remote-peer-urls", us), - ) - } -} - -func (t *Transport) AddPeer(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.peers == nil { - panic("transport stopped") - } - if _, ok := t.peers[id]; ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - if t.Logger != nil { - t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) - } - } - fs := t.LeaderStats.Follower(id.String()) - t.peers[id] = startPeer(t, urls, id, fs) - addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec) - addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec) - - if t.Logger != nil { - t.Logger.Info( - "added remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("remote-peer-id", id.String()), - zap.Strings("remote-peer-urls", us), - ) - } -} - -func (t *Transport) RemovePeer(id types.ID) { - t.mu.Lock() - defer t.mu.Unlock() - t.removePeer(id) -} - -func (t *Transport) RemoveAllPeers() { - t.mu.Lock() - defer t.mu.Unlock() - for id := range t.peers { - t.removePeer(id) - } -} - -// the caller of this function must have the peers mutex. -func (t *Transport) removePeer(id types.ID) { - // etcd may remove a member again on startup due to WAL files replaying. - peer, ok := t.peers[id] - if ok { - peer.stop() - delete(t.peers, id) - delete(t.LeaderStats.Followers, id.String()) - t.pipelineProber.Remove(id.String()) - t.streamProber.Remove(id.String()) - } - - if t.Logger != nil { - if ok { - t.Logger.Info( - "removed remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("removed-remote-peer-id", id.String()), - ) - } else { - t.Logger.Warn( - "skipped removing already removed peer", - zap.String("local-member-id", t.ID.String()), - zap.String("removed-remote-peer-id", id.String()), - ) - } - } -} - -func (t *Transport) UpdatePeer(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - // TODO: return error or just panic? - if _, ok := t.peers[id]; !ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - if t.Logger != nil { - t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) - } - } - t.peers[id].update(urls) - - t.pipelineProber.Remove(id.String()) - addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec) - t.streamProber.Remove(id.String()) - addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec) - - if t.Logger != nil { - t.Logger.Info( - "updated remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("updated-remote-peer-id", id.String()), - zap.Strings("updated-remote-peer-urls", us), - ) - } -} - -func (t *Transport) ActiveSince(id types.ID) time.Time { - t.mu.RLock() - defer t.mu.RUnlock() - if p, ok := t.peers[id]; ok { - return p.activeSince() - } - return time.Time{} -} - -func (t *Transport) SendSnapshot(m snap.Message) { - t.mu.Lock() - defer t.mu.Unlock() - p := t.peers[types.ID(m.To)] - if p == nil { - m.CloseWithError(errMemberNotFound) - return - } - p.sendSnap(m) -} - -// Pausable is a testing interface for pausing transport traffic. -type Pausable interface { - Pause() - Resume() -} - -func (t *Transport) Pause() { - t.mu.RLock() - defer t.mu.RUnlock() - for _, p := range t.peers { - p.(Pausable).Pause() - } -} - -func (t *Transport) Resume() { - t.mu.RLock() - defer t.mu.RUnlock() - for _, p := range t.peers { - p.(Pausable).Resume() - } -} - -// ActivePeers returns a channel that closes when an initial -// peer connection has been established. Use this to wait until the -// first peer connection becomes active. -func (t *Transport) ActivePeers() (cnt int) { - t.mu.RLock() - defer t.mu.RUnlock() - for _, p := range t.peers { - if !p.activeSince().IsZero() { - cnt++ - } - } - return cnt -} diff --git a/server/etcdserver/api/rafthttp/transport_bench_test.go b/server/etcdserver/api/rafthttp/transport_bench_test.go deleted file mode 100644 index 646fa8ee220..00000000000 --- a/server/etcdserver/api/rafthttp/transport_bench_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "context" - "net/http/httptest" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -func BenchmarkSendingMsgApp(b *testing.B) { - // member 1 - tr := &Transport{ - ID: types.ID(1), - ClusterID: types.ID(1), - Raft: &fakeRaft{}, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(b), "1"), - } - tr.Start() - srv := httptest.NewServer(tr.Handler()) - defer srv.Close() - - // member 2 - r := &countRaft{} - tr2 := &Transport{ - ID: types.ID(2), - ClusterID: types.ID(1), - Raft: r, - ServerStats: newServerStats(), - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(b), "2"), - } - tr2.Start() - srv2 := httptest.NewServer(tr2.Handler()) - defer srv2.Close() - - tr.AddPeer(types.ID(2), []string{srv2.URL}) - defer tr.Stop() - tr2.AddPeer(types.ID(1), []string{srv.URL}) - defer tr2.Stop() - if !waitStreamWorking(tr.Get(types.ID(2)).(*peer)) { - b.Fatalf("stream from 1 to 2 is not in work as expected") - } - - b.ReportAllocs() - b.SetBytes(64) - - b.ResetTimer() - data := make([]byte, 64) - for i := 0; i < b.N; i++ { - tr.Send([]raftpb.Message{ - { - Type: raftpb.MsgApp, - From: 1, - To: 2, - Index: uint64(i), - Entries: []raftpb.Entry{ - { - Index: uint64(i + 1), - Data: data, - }, - }, - }, - }) - } - // wait until all messages are received by the target raft - for r.count() != b.N { - time.Sleep(time.Millisecond) - } - b.StopTimer() -} - -type countRaft struct { - mu sync.Mutex - cnt int -} - -func (r *countRaft) Process(ctx context.Context, m raftpb.Message) error { - r.mu.Lock() - defer r.mu.Unlock() - r.cnt++ - return nil -} - -func (r *countRaft) IsIDRemoved(id uint64) bool { return false } - -func (r *countRaft) ReportUnreachable(id uint64) {} - -func (r *countRaft) ReportSnapshot(id uint64, status raft.SnapshotStatus) {} - -func (r *countRaft) count() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.cnt -} diff --git a/server/etcdserver/api/rafthttp/transport_test.go b/server/etcdserver/api/rafthttp/transport_test.go deleted file mode 100644 index 4748279369c..00000000000 --- a/server/etcdserver/api/rafthttp/transport_test.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "net/http" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/raft/v3/raftpb" - - "github.com/xiang90/probing" -) - -// TestTransportSend tests that transport can send messages using correct -// underlying peer, and drop local or unknown-target messages. -func TestTransportSend(t *testing.T) { - peer1 := newFakePeer() - peer2 := newFakePeer() - tr := &Transport{ - ServerStats: stats.NewServerStats("", ""), - peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, - } - wmsgsIgnored := []raftpb.Message{ - // bad local message - {Type: raftpb.MsgBeat}, - // bad remote message - {Type: raftpb.MsgProp, To: 3}, - } - wmsgsTo1 := []raftpb.Message{ - // good message - {Type: raftpb.MsgProp, To: 1}, - {Type: raftpb.MsgApp, To: 1}, - } - wmsgsTo2 := []raftpb.Message{ - // good message - {Type: raftpb.MsgProp, To: 2}, - {Type: raftpb.MsgApp, To: 2}, - } - tr.Send(wmsgsIgnored) - tr.Send(wmsgsTo1) - tr.Send(wmsgsTo2) - - if !reflect.DeepEqual(peer1.msgs, wmsgsTo1) { - t.Errorf("msgs to peer 1 = %+v, want %+v", peer1.msgs, wmsgsTo1) - } - if !reflect.DeepEqual(peer2.msgs, wmsgsTo2) { - t.Errorf("msgs to peer 2 = %+v, want %+v", peer2.msgs, wmsgsTo2) - } -} - -func TestTransportCutMend(t *testing.T) { - peer1 := newFakePeer() - peer2 := newFakePeer() - tr := &Transport{ - ServerStats: stats.NewServerStats("", ""), - peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, - } - - tr.CutPeer(types.ID(1)) - - wmsgsTo := []raftpb.Message{ - // good message - {Type: raftpb.MsgProp, To: 1}, - {Type: raftpb.MsgApp, To: 1}, - } - - tr.Send(wmsgsTo) - if len(peer1.msgs) > 0 { - t.Fatalf("msgs expected to be ignored, got %+v", peer1.msgs) - } - - tr.MendPeer(types.ID(1)) - - tr.Send(wmsgsTo) - if !reflect.DeepEqual(peer1.msgs, wmsgsTo) { - t.Errorf("msgs to peer 1 = %+v, want %+v", peer1.msgs, wmsgsTo) - } -} - -func TestTransportAdd(t *testing.T) { - ls := stats.NewLeaderStats(zaptest.NewLogger(t), "") - tr := &Transport{ - LeaderStats: ls, - streamRt: &roundTripperRecorder{}, - peers: make(map[types.ID]Peer), - pipelineProber: probing.NewProber(nil), - streamProber: probing.NewProber(nil), - } - tr.AddPeer(1, []string{"http://localhost:2380"}) - - if _, ok := ls.Followers["1"]; !ok { - t.Errorf("FollowerStats[1] is nil, want exists") - } - s, ok := tr.peers[types.ID(1)] - if !ok { - tr.Stop() - t.Fatalf("senders[1] is nil, want exists") - } - - // duplicate AddPeer is ignored - tr.AddPeer(1, []string{"http://localhost:2380"}) - ns := tr.peers[types.ID(1)] - if s != ns { - t.Errorf("sender = %v, want %v", ns, s) - } - - tr.Stop() -} - -func TestTransportRemove(t *testing.T) { - tr := &Transport{ - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), ""), - streamRt: &roundTripperRecorder{}, - peers: make(map[types.ID]Peer), - pipelineProber: probing.NewProber(nil), - streamProber: probing.NewProber(nil), - } - tr.AddPeer(1, []string{"http://localhost:2380"}) - tr.RemovePeer(types.ID(1)) - defer tr.Stop() - - if _, ok := tr.peers[types.ID(1)]; ok { - t.Fatalf("senders[1] exists, want removed") - } -} - -func TestTransportRemoveIsIdempotent(t *testing.T) { - tr := &Transport{ - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), ""), - streamRt: &roundTripperRecorder{}, - peers: make(map[types.ID]Peer), - pipelineProber: probing.NewProber(nil), - streamProber: probing.NewProber(nil), - } - - tr.AddPeer(1, []string{"http://localhost:2380"}) - tr.RemovePeer(types.ID(1)) - tr.RemovePeer(types.ID(1)) - defer tr.Stop() - - if _, ok := tr.peers[types.ID(1)]; ok { - t.Fatalf("senders[1] exists, want removed") - } -} - -func TestTransportUpdate(t *testing.T) { - peer := newFakePeer() - tr := &Transport{ - peers: map[types.ID]Peer{types.ID(1): peer}, - pipelineProber: probing.NewProber(nil), - streamProber: probing.NewProber(nil), - } - u := "http://localhost:2380" - tr.UpdatePeer(types.ID(1), []string{u}) - wurls := types.URLs(testutil.MustNewURLs(t, []string{"http://localhost:2380"})) - if !reflect.DeepEqual(peer.peerURLs, wurls) { - t.Errorf("urls = %+v, want %+v", peer.peerURLs, wurls) - } -} - -func TestTransportErrorc(t *testing.T) { - errorc := make(chan error, 1) - tr := &Transport{ - Raft: &fakeRaft{}, - LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), ""), - ErrorC: errorc, - streamRt: newRespRoundTripper(http.StatusForbidden, nil), - pipelineRt: newRespRoundTripper(http.StatusForbidden, nil), - peers: make(map[types.ID]Peer), - pipelineProber: probing.NewProber(nil), - streamProber: probing.NewProber(nil), - } - tr.AddPeer(1, []string{"http://localhost:2380"}) - defer tr.Stop() - - select { - case <-errorc: - t.Fatalf("received unexpected from errorc") - case <-time.After(10 * time.Millisecond): - } - tr.peers[1].send(raftpb.Message{}) - - select { - case <-errorc: - case <-time.After(1 * time.Second): - t.Fatalf("cannot receive error from errorc") - } -} diff --git a/server/etcdserver/api/rafthttp/urlpick_test.go b/server/etcdserver/api/rafthttp/urlpick_test.go deleted file mode 100644 index 6817d0a1468..00000000000 --- a/server/etcdserver/api/rafthttp/urlpick_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "net/url" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -// TestURLPickerPickTwice tests that pick returns a possible url, -// and always returns the same one. -func TestURLPickerPickTwice(t *testing.T) { - picker := mustNewURLPicker(t, []string{"http://127.0.0.1:2380", "http://127.0.0.1:7001"}) - - u := picker.pick() - urlmap := map[url.URL]bool{ - {Scheme: "http", Host: "127.0.0.1:2380"}: true, - {Scheme: "http", Host: "127.0.0.1:7001"}: true, - } - if !urlmap[u] { - t.Errorf("url picked = %+v, want a possible url in %+v", u, urlmap) - } - - // pick out the same url when calling pick again - uu := picker.pick() - if u != uu { - t.Errorf("url picked = %+v, want %+v", uu, u) - } -} - -func TestURLPickerUpdate(t *testing.T) { - picker := mustNewURLPicker(t, []string{"http://127.0.0.1:2380", "http://127.0.0.1:7001"}) - picker.update(testutil.MustNewURLs(t, []string{"http://localhost:2380", "http://localhost:7001"})) - - u := picker.pick() - urlmap := map[url.URL]bool{ - {Scheme: "http", Host: "localhost:2380"}: true, - {Scheme: "http", Host: "localhost:7001"}: true, - } - if !urlmap[u] { - t.Errorf("url picked = %+v, want a possible url in %+v", u, urlmap) - } -} - -func TestURLPickerUnreachable(t *testing.T) { - picker := mustNewURLPicker(t, []string{"http://127.0.0.1:2380", "http://127.0.0.1:7001"}) - u := picker.pick() - picker.unreachable(u) - - uu := picker.pick() - if u == uu { - t.Errorf("url picked = %+v, want other possible urls", uu) - } -} - -func mustNewURLPicker(t *testing.T, us []string) *urlPicker { - urls := testutil.MustNewURLs(t, us) - return newURLPicker(urls) -} diff --git a/server/etcdserver/api/rafthttp/util.go b/server/etcdserver/api/rafthttp/util.go deleted file mode 100644 index 91bc6884e4b..00000000000 --- a/server/etcdserver/api/rafthttp/util.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "fmt" - "io" - "net" - "net/http" - "net/url" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -var ( - errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster") - errMemberNotFound = fmt.Errorf("member not found") -) - -// NewListener returns a listener for raft message transfer between peers. -// It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { - return transport.NewListenerWithOpts(u.Host, u.Scheme, transport.WithTLSInfo(tlsinfo), transport.WithTimeout(ConnReadTimeout, ConnWriteTimeout)) -} - -// NewRoundTripper returns a roundTripper used to send requests -// to rafthttp listener of remote peers. -func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { - // It uses timeout transport to pair with remote timeout listeners. - // It sets no read/write timeout, because message in requests may - // take long time to write out before reading out the response. - return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0) -} - -// newStreamRoundTripper returns a roundTripper used to send stream requests -// to rafthttp listener of remote peers. -// Read/write timeout is set for stream roundTripper to promptly -// find out broken status, which minimizes the number of messages -// sent on broken connection. -func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { - return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) -} - -// createPostRequest creates a HTTP POST request that sends raft message. -func createPostRequest(lg *zap.Logger, u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { - uu := u - uu.Path = path - req, err := http.NewRequest("POST", uu.String(), body) - if err != nil { - if lg != nil { - lg.Panic("unexpected new request error", zap.Error(err)) - } - } - req.Header.Set("Content-Type", ct) - req.Header.Set("X-Server-From", from.String()) - req.Header.Set("X-Server-Version", version.Version) - req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) - req.Header.Set("X-Etcd-Cluster-ID", cid.String()) - setPeerURLsHeader(req, urls) - - return req -} - -// checkPostResponse checks the response of the HTTP POST request that sends -// raft message. -func checkPostResponse(lg *zap.Logger, resp *http.Response, body []byte, req *http.Request, to types.ID) error { - switch resp.StatusCode { - case http.StatusPreconditionFailed: - switch strings.TrimSuffix(string(body), "\n") { - case errIncompatibleVersion.Error(): - if lg != nil { - lg.Error( - "request sent was ignored by peer", - zap.String("remote-peer-id", to.String()), - ) - } - return errIncompatibleVersion - case errClusterIDMismatch.Error(): - if lg != nil { - lg.Error( - "request sent was ignored due to cluster ID mismatch", - zap.String("remote-peer-id", to.String()), - zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")), - zap.String("local-member-cluster-id", req.Header.Get("X-Etcd-Cluster-ID")), - ) - } - return errClusterIDMismatch - default: - return fmt.Errorf("unhandled error %q when precondition failed", string(body)) - } - case http.StatusForbidden: - return errMemberRemoved - case http.StatusNoContent: - return nil - default: - return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String()) - } -} - -// reportCriticalError reports the given error through sending it into -// the given error channel. -// If the error channel is filled up when sending error, it drops the error -// because the fact that error has happened is reported, which is -// good enough. -func reportCriticalError(err error, errc chan<- error) { - select { - case errc <- err: - default: - } -} - -// compareMajorMinorVersion returns an integer comparing two versions based on -// their major and minor version. The result will be 0 if a==b, -1 if a < b, -// and 1 if a > b. -func compareMajorMinorVersion(a, b *semver.Version) int { - na := &semver.Version{Major: a.Major, Minor: a.Minor} - nb := &semver.Version{Major: b.Major, Minor: b.Minor} - switch { - case na.LessThan(*nb): - return -1 - case nb.LessThan(*na): - return 1 - default: - return 0 - } -} - -// serverVersion returns the server version from the given header. -func serverVersion(h http.Header) *semver.Version { - verStr := h.Get("X-Server-Version") - // backward compatibility with etcd 2.0 - if verStr == "" { - verStr = "2.0.0" - } - return semver.Must(semver.NewVersion(verStr)) -} - -// serverVersion returns the min cluster version from the given header. -func minClusterVersion(h http.Header) *semver.Version { - verStr := h.Get("X-Min-Cluster-Version") - // backward compatibility with etcd 2.0 - if verStr == "" { - verStr = "2.0.0" - } - return semver.Must(semver.NewVersion(verStr)) -} - -// checkVersionCompatibility checks whether the given version is compatible -// with the local version. -func checkVersionCompatibility(name string, server, minCluster *semver.Version) ( - localServer *semver.Version, - localMinCluster *semver.Version, - err error) { - localServer = semver.Must(semver.NewVersion(version.Version)) - localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion)) - if compareMajorMinorVersion(server, localMinCluster) == -1 { - return localServer, localMinCluster, fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer) - } - if compareMajorMinorVersion(minCluster, localServer) == 1 { - return localServer, localMinCluster, fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer) - } - return localServer, localMinCluster, nil -} - -// setPeerURLsHeader reports local urls for peer discovery -func setPeerURLsHeader(req *http.Request, urls types.URLs) { - if urls == nil { - // often not set in unit tests - return - } - peerURLs := make([]string, urls.Len()) - for i := range urls { - peerURLs[i] = urls[i].String() - } - req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ",")) -} - -// addRemoteFromRequest adds a remote peer according to an http request header -func addRemoteFromRequest(tr Transporter, r *http.Request) { - if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil { - if urls := r.Header.Get("X-PeerURLs"); urls != "" { - tr.AddRemote(from, strings.Split(urls, ",")) - } - } -} diff --git a/server/etcdserver/api/rafthttp/util_test.go b/server/etcdserver/api/rafthttp/util_test.go deleted file mode 100644 index 743333fbbe3..00000000000 --- a/server/etcdserver/api/rafthttp/util_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "encoding/binary" - "io" - "net/http" - "reflect" - "testing" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/raft/v3/raftpb" - - "github.com/coreos/go-semver/semver" -) - -func TestEntry(t *testing.T) { - tests := []raftpb.Entry{ - {}, - {Term: 1, Index: 1}, - {Term: 1, Index: 1, Data: []byte("some data")}, - } - for i, tt := range tests { - b := &bytes.Buffer{} - if err := writeEntryTo(b, &tt); err != nil { - t.Errorf("#%d: unexpected write ents error: %v", i, err) - continue - } - var ent raftpb.Entry - if err := readEntryFrom(b, &ent); err != nil { - t.Errorf("#%d: unexpected read ents error: %v", i, err) - continue - } - if !reflect.DeepEqual(ent, tt) { - t.Errorf("#%d: ent = %+v, want %+v", i, ent, tt) - } - } -} - -func TestCompareMajorMinorVersion(t *testing.T) { - tests := []struct { - va, vb *semver.Version - w int - }{ - // equal to - { - semver.Must(semver.NewVersion("2.1.0")), - semver.Must(semver.NewVersion("2.1.0")), - 0, - }, - // smaller than - { - semver.Must(semver.NewVersion("2.0.0")), - semver.Must(semver.NewVersion("2.1.0")), - -1, - }, - // bigger than - { - semver.Must(semver.NewVersion("2.2.0")), - semver.Must(semver.NewVersion("2.1.0")), - 1, - }, - // ignore patch - { - semver.Must(semver.NewVersion("2.1.1")), - semver.Must(semver.NewVersion("2.1.0")), - 0, - }, - // ignore prerelease - { - semver.Must(semver.NewVersion("2.1.0-alpha.0")), - semver.Must(semver.NewVersion("2.1.0")), - 0, - }, - } - for i, tt := range tests { - if g := compareMajorMinorVersion(tt.va, tt.vb); g != tt.w { - t.Errorf("#%d: compare = %d, want %d", i, g, tt.w) - } - } -} - -func TestServerVersion(t *testing.T) { - tests := []struct { - h http.Header - wv *semver.Version - }{ - // backward compatibility with etcd 2.0 - { - http.Header{}, - semver.Must(semver.NewVersion("2.0.0")), - }, - { - http.Header{"X-Server-Version": []string{"2.1.0"}}, - semver.Must(semver.NewVersion("2.1.0")), - }, - { - http.Header{"X-Server-Version": []string{"2.1.0-alpha.0+git"}}, - semver.Must(semver.NewVersion("2.1.0-alpha.0+git")), - }, - } - for i, tt := range tests { - v := serverVersion(tt.h) - if v.String() != tt.wv.String() { - t.Errorf("#%d: version = %s, want %s", i, v, tt.wv) - } - } -} - -func TestMinClusterVersion(t *testing.T) { - tests := []struct { - h http.Header - wv *semver.Version - }{ - // backward compatibility with etcd 2.0 - { - http.Header{}, - semver.Must(semver.NewVersion("2.0.0")), - }, - { - http.Header{"X-Min-Cluster-Version": []string{"2.1.0"}}, - semver.Must(semver.NewVersion("2.1.0")), - }, - { - http.Header{"X-Min-Cluster-Version": []string{"2.1.0-alpha.0+git"}}, - semver.Must(semver.NewVersion("2.1.0-alpha.0+git")), - }, - } - for i, tt := range tests { - v := minClusterVersion(tt.h) - if v.String() != tt.wv.String() { - t.Errorf("#%d: version = %s, want %s", i, v, tt.wv) - } - } -} - -func TestCheckVersionCompatibility(t *testing.T) { - ls := semver.Must(semver.NewVersion(version.Version)) - lmc := semver.Must(semver.NewVersion(version.MinClusterVersion)) - tests := []struct { - server *semver.Version - minCluster *semver.Version - wok bool - }{ - // the same version as local - { - ls, - lmc, - true, - }, - // one version lower - { - lmc, - &semver.Version{}, - true, - }, - // one version higher - { - &semver.Version{Major: ls.Major + 1}, - ls, - true, - }, - // too low version - { - &semver.Version{Major: lmc.Major - 1}, - &semver.Version{}, - false, - }, - // too high version - { - &semver.Version{Major: ls.Major + 1, Minor: 1}, - &semver.Version{Major: ls.Major + 1}, - false, - }, - } - for i, tt := range tests { - _, _, err := checkVersionCompatibility("", tt.server, tt.minCluster) - if ok := err == nil; ok != tt.wok { - t.Errorf("#%d: ok = %v, want %v", i, ok, tt.wok) - } - } -} - -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} diff --git a/server/etcdserver/api/snap/db.go b/server/etcdserver/api/snap/db.go deleted file mode 100644 index 1d42557bcc0..00000000000 --- a/server/etcdserver/api/snap/db.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - - humanize "github.com/dustin/go-humanize" - "go.uber.org/zap" -) - -var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") - -// SaveDBFrom saves snapshot of the database from the given reader. It -// guarantees the save operation is atomic. -func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { - start := time.Now() - - f, err := os.CreateTemp(s.dir, "tmp") - if err != nil { - return 0, err - } - var n int64 - n, err = io.Copy(f, r) - if err == nil { - fsyncStart := time.Now() - err = fileutil.Fsync(f) - snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds()) - } - f.Close() - if err != nil { - os.Remove(f.Name()) - return n, err - } - fn := s.dbFilePath(id) - if fileutil.Exist(fn) { - os.Remove(f.Name()) - return n, nil - } - err = os.Rename(f.Name(), fn) - if err != nil { - os.Remove(f.Name()) - return n, err - } - - s.lg.Info( - "saved database snapshot to disk", - zap.String("path", fn), - zap.Int64("bytes", n), - zap.String("size", humanize.Bytes(uint64(n))), - ) - - snapDBSaveSec.Observe(time.Since(start).Seconds()) - return n, nil -} - -// DBFilePath returns the file path for the snapshot of the database with -// given id. If the snapshot does not exist, it returns error. -func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - if _, err := fileutil.ReadDir(s.dir); err != nil { - return "", err - } - fn := s.dbFilePath(id) - if fileutil.Exist(fn) { - return fn, nil - } - if s.lg != nil { - s.lg.Warn( - "failed to find [SNAPSHOT-INDEX].snap.db", - zap.Uint64("snapshot-index", id), - zap.String("snapshot-file-path", fn), - zap.Error(ErrNoDBSnapshot), - ) - } - return "", ErrNoDBSnapshot -} - -func (s *Snapshotter) dbFilePath(id uint64) string { - return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) -} diff --git a/server/etcdserver/api/snap/doc.go b/server/etcdserver/api/snap/doc.go deleted file mode 100644 index dcc5db57982..00000000000 --- a/server/etcdserver/api/snap/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snap handles Raft nodes' states with snapshots. -// The snapshot logic is internal to etcd server and raft package. -package snap diff --git a/server/etcdserver/api/snap/metrics.go b/server/etcdserver/api/snap/metrics.go deleted file mode 100644 index 2affecf4726..00000000000 --- a/server/etcdserver/api/snap/metrics.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import "github.com/prometheus/client_golang/prometheus" - -var ( - snapMarshallingSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "snap", - Name: "save_marshalling_duration_seconds", - Help: "The marshalling cost distributions of save called by snapshot.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - snapSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "snap", - Name: "save_total_duration_seconds", - Help: "The total latency distributions of save called by snapshot.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - snapFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snap", - Name: "fsync_duration_seconds", - Help: "The latency distributions of fsync called by snap.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snap_db", - Name: "save_total_duration_seconds", - Help: "The total latency distributions of v3 snapshot save", - - // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2 - // highest bucket start of 0.1 sec * 2^9 == 51.2 sec - Buckets: prometheus.ExponentialBuckets(0.1, 2, 10), - }) - - snapDBFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snap_db", - Name: "fsync_duration_seconds", - Help: "The latency distributions of fsyncing .snap.db file", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(snapMarshallingSec) - prometheus.MustRegister(snapSaveSec) - prometheus.MustRegister(snapFsyncSec) - prometheus.MustRegister(snapDBSaveSec) - prometheus.MustRegister(snapDBFsyncSec) -} diff --git a/server/etcdserver/api/snap/snappb/snap.pb.go b/server/etcdserver/api/snap/snappb/snap.pb.go deleted file mode 100644 index 6fd2b9c0089..00000000000 --- a/server/etcdserver/api/snap/snappb/snap.pb.go +++ /dev/null @@ -1,344 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: snap.proto - -package snappb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Snapshot struct { - Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_f2e3c045ebf84d00, []int{0} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") -} - -func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) } - -var fileDescriptor_f2e3c045ebf84d00 = []byte{ - // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, - 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, - 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, - 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, - 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, - 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, - 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func encodeVarintSnap(dAtA []byte, offset int, v uint64) int { - offset -= sovSnap(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovSnap(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovSnap(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnap(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnap(x uint64) (n int) { - return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Crc |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnap - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnap - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnap(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnap - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnap(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnap - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnap - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnap - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnap = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/etcdserver/api/snap/snapshotter.go b/server/etcdserver/api/snap/snapshotter.go deleted file mode 100644 index 093ab6bc914..00000000000 --- a/server/etcdserver/api/snap/snapshotter.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "errors" - "fmt" - "hash/crc32" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "go.etcd.io/etcd/client/pkg/v3/verify" - pioutil "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "go.uber.org/zap" -) - -const snapSuffix = ".snap" - -var ( - ErrNoSnapshot = errors.New("snap: no available snapshot") - ErrEmptySnapshot = errors.New("snap: empty snapshot") - ErrCRCMismatch = errors.New("snap: crc mismatch") - crcTable = crc32.MakeTable(crc32.Castagnoli) - - // A map of valid files that can be present in the snap folder. - validFiles = map[string]bool{ - "db": true, - } -) - -type Snapshotter struct { - lg *zap.Logger - dir string -} - -func New(lg *zap.Logger, dir string) *Snapshotter { - if lg == nil { - lg = zap.NewNop() - } - return &Snapshotter{ - lg: lg, - dir: dir, - } -} - -func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { - if raft.IsEmptySnap(snapshot) { - return nil - } - return s.save(&snapshot) -} - -func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { - start := time.Now() - - fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) - b := pbutil.MustMarshal(snapshot) - crc := crc32.Update(0, crcTable, b) - snap := snappb.Snapshot{Crc: crc, Data: b} - d, err := snap.Marshal() - if err != nil { - return err - } - snapMarshallingSec.Observe(time.Since(start).Seconds()) - - spath := filepath.Join(s.dir, fname) - - fsyncStart := time.Now() - err = pioutil.WriteAndSyncFile(spath, d, 0666) - snapFsyncSec.Observe(time.Since(fsyncStart).Seconds()) - - if err != nil { - s.lg.Warn("failed to write a snap file", zap.String("path", spath), zap.Error(err)) - rerr := os.Remove(spath) - if rerr != nil { - s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(rerr)) - } - return err - } - - snapSaveSec.Observe(time.Since(start).Seconds()) - return nil -} - -// Load returns the newest snapshot. -func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { - return s.loadMatching(func(*raftpb.Snapshot) bool { return true }) -} - -// LoadNewestAvailable loads the newest snapshot available that is in walSnaps. -func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) { - return s.loadMatching(func(snapshot *raftpb.Snapshot) bool { - m := snapshot.Metadata - for i := len(walSnaps) - 1; i >= 0; i-- { - if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index { - return true - } - } - return false - }) -} - -// loadMatching returns the newest snapshot where matchFn returns true. -func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) { - names, err := s.snapNames() - if err != nil { - return nil, err - } - var snap *raftpb.Snapshot - for _, name := range names { - if snap, err = s.loadSnap(name); err == nil && matchFn(snap) { - return snap, nil - } - } - return nil, ErrNoSnapshot -} - -func (s *Snapshotter) loadSnap(name string) (*raftpb.Snapshot, error) { - fpath := filepath.Join(s.dir, name) - snap, err := Read(s.lg, fpath) - if err != nil { - brokenPath := fpath + ".broken" - s.lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err)) - if rerr := os.Rename(fpath, brokenPath); rerr != nil { - s.lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr)) - } else { - s.lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath)) - } - } - return snap, err -} - -// Read reads the snapshot named by snapname and returns the snapshot. -func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) { - verify.Assert(lg != nil, "the logger should not be nil") - b, err := os.ReadFile(snapname) - if err != nil { - lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) - return nil, err - } - - if len(b) == 0 { - lg.Warn("failed to read empty snapshot file", zap.String("path", snapname)) - return nil, ErrEmptySnapshot - } - - var serializedSnap snappb.Snapshot - if err = serializedSnap.Unmarshal(b); err != nil { - lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err)) - return nil, err - } - - if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - lg.Warn("failed to read empty snapshot data", zap.String("path", snapname)) - return nil, ErrEmptySnapshot - } - - crc := crc32.Update(0, crcTable, serializedSnap.Data) - if crc != serializedSnap.Crc { - lg.Warn("snap file is corrupt", - zap.String("path", snapname), - zap.Uint32("prev-crc", serializedSnap.Crc), - zap.Uint32("new-crc", crc), - ) - return nil, ErrCRCMismatch - } - - var snap raftpb.Snapshot - if err = snap.Unmarshal(serializedSnap.Data); err != nil { - lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err)) - return nil, err - } - return &snap, nil -} - -// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). -// If there is no available snapshots, an ErrNoSnapshot will be returned. -func (s *Snapshotter) snapNames() ([]string, error) { - dir, err := os.Open(s.dir) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - filenames, err := s.cleanupSnapdir(names) - if err != nil { - return nil, err - } - snaps := s.checkSuffix(filenames) - if len(snaps) == 0 { - return nil, ErrNoSnapshot - } - sort.Sort(sort.Reverse(sort.StringSlice(snaps))) - return snaps, nil -} - -func (s *Snapshotter) checkSuffix(names []string) []string { - var snaps []string - for i := range names { - if strings.HasSuffix(names[i], snapSuffix) { - snaps = append(snaps, names[i]) - } else { - // If we find a file which is not a snapshot then check if it's - // a valid file. If not throw out a warning. - if _, ok := validFiles[names[i]]; !ok { - s.lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i])) - } - } - } - return snaps -} - -// cleanupSnapdir removes any files that should not be in the snapshot directory: -// - db.tmp prefixed files that can be orphaned by defragmentation -func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) { - names = make([]string, 0, len(filenames)) - for _, filename := range filenames { - if strings.HasPrefix(filename, "db.tmp") { - s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename)) - if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { - return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr) - } - } else { - names = append(names, filename) - } - } - return names, nil -} - -func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error { - dir, err := os.Open(s.dir) - if err != nil { - return err - } - defer dir.Close() - filenames, err := dir.Readdirnames(-1) - if err != nil { - return err - } - for _, filename := range filenames { - if strings.HasSuffix(filename, ".snap.db") { - hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db") - index, err := strconv.ParseUint(hexIndex, 16, 64) - if err != nil { - s.lg.Error("failed to parse index from filename", zap.String("path", filename), zap.String("error", err.Error())) - continue - } - if index < snap.Metadata.Index { - s.lg.Info("found orphaned .snap.db file; deleting", zap.String("path", filename)) - if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { - s.lg.Error("failed to remove orphaned .snap.db file", zap.String("path", filename), zap.String("error", rmErr.Error())) - } - } - } - } - return nil -} diff --git a/server/etcdserver/api/snap/snapshotter_test.go b/server/etcdserver/api/snap/snapshotter_test.go deleted file mode 100644 index 6074d4c473b..00000000000 --- a/server/etcdserver/api/snap/snapshotter_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "fmt" - "hash/crc32" - "os" - "path/filepath" - "reflect" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -var testSnap = &raftpb.Snapshot{ - Data: []byte("some snapshot"), - Metadata: raftpb.SnapshotMetadata{ - ConfState: raftpb.ConfState{ - Voters: []uint64{1, 2, 3}, - }, - Index: 1, - Term: 1, - }, -} - -func TestSaveAndLoad(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - ss := New(zaptest.NewLogger(t), dir) - err = ss.save(testSnap) - if err != nil { - t.Fatal(err) - } - - g, err := ss.Load() - if err != nil { - t.Errorf("err = %v, want nil", err) - } - if !reflect.DeepEqual(g, testSnap) { - t.Errorf("snap = %#v, want %#v", g, testSnap) - } -} - -func TestBadCRC(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - ss := New(zaptest.NewLogger(t), dir) - err = ss.save(testSnap) - if err != nil { - t.Fatal(err) - } - defer func() { crcTable = crc32.MakeTable(crc32.Castagnoli) }() - // switch to use another crc table - // fake a crc mismatch - crcTable = crc32.MakeTable(crc32.Koopman) - - _, err = Read(zaptest.NewLogger(t), filepath.Join(dir, fmt.Sprintf("%016x-%016x.snap", 1, 1))) - if err == nil || err != ErrCRCMismatch { - t.Errorf("err = %v, want %v", err, ErrCRCMismatch) - } -} - -func TestFailback(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - large := fmt.Sprintf("%016x-%016x-%016x.snap", 0xFFFF, 0xFFFF, 0xFFFF) - err = os.WriteFile(filepath.Join(dir, large), []byte("bad data"), 0666) - if err != nil { - t.Fatal(err) - } - - ss := New(zaptest.NewLogger(t), dir) - err = ss.save(testSnap) - if err != nil { - t.Fatal(err) - } - - g, err := ss.Load() - if err != nil { - t.Errorf("err = %v, want nil", err) - } - if !reflect.DeepEqual(g, testSnap) { - t.Errorf("snap = %#v, want %#v", g, testSnap) - } - if f, err := os.Open(filepath.Join(dir, large) + ".broken"); err != nil { - t.Fatal("broken snapshot does not exist") - } else { - f.Close() - } -} - -func TestSnapNames(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - for i := 1; i <= 5; i++ { - var f *os.File - if f, err = os.Create(filepath.Join(dir, fmt.Sprintf("%d.snap", i))); err != nil { - t.Fatal(err) - } else { - f.Close() - } - } - ss := New(zaptest.NewLogger(t), dir) - names, err := ss.snapNames() - if err != nil { - t.Errorf("err = %v, want nil", err) - } - if len(names) != 5 { - t.Errorf("len = %d, want 10", len(names)) - } - w := []string{"5.snap", "4.snap", "3.snap", "2.snap", "1.snap"} - if !reflect.DeepEqual(names, w) { - t.Errorf("names = %v, want %v", names, w) - } -} - -func TestLoadNewestSnap(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - ss := New(zaptest.NewLogger(t), dir) - err = ss.save(testSnap) - if err != nil { - t.Fatal(err) - } - - newSnap := *testSnap - newSnap.Metadata.Index = 5 - err = ss.save(&newSnap) - if err != nil { - t.Fatal(err) - } - - cases := []struct { - name string - availableWalSnaps []walpb.Snapshot - expected *raftpb.Snapshot - }{ - { - name: "load-newest", - expected: &newSnap, - }, - { - name: "loadnewestavailable-newest", - availableWalSnaps: []walpb.Snapshot{{Index: 0, Term: 0}, {Index: 1, Term: 1}, {Index: 5, Term: 1}}, - expected: &newSnap, - }, - { - name: "loadnewestavailable-newest-unsorted", - availableWalSnaps: []walpb.Snapshot{{Index: 5, Term: 1}, {Index: 1, Term: 1}, {Index: 0, Term: 0}}, - expected: &newSnap, - }, - { - name: "loadnewestavailable-previous", - availableWalSnaps: []walpb.Snapshot{{Index: 0, Term: 0}, {Index: 1, Term: 1}}, - expected: testSnap, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - var err error - var g *raftpb.Snapshot - if tc.availableWalSnaps != nil { - g, err = ss.LoadNewestAvailable(tc.availableWalSnaps) - } else { - g, err = ss.Load() - } - if err != nil { - t.Errorf("err = %v, want nil", err) - } - if !reflect.DeepEqual(g, tc.expected) { - t.Errorf("snap = %#v, want %#v", g, tc.expected) - } - }) - } -} - -func TestNoSnapshot(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - ss := New(zaptest.NewLogger(t), dir) - _, err = ss.Load() - if err != ErrNoSnapshot { - t.Errorf("err = %v, want %v", err, ErrNoSnapshot) - } -} - -func TestEmptySnapshot(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - err = os.WriteFile(filepath.Join(dir, "1.snap"), []byte(""), 0x700) - if err != nil { - t.Fatal(err) - } - - _, err = Read(zaptest.NewLogger(t), filepath.Join(dir, "1.snap")) - if err != ErrEmptySnapshot { - t.Errorf("err = %v, want %v", err, ErrEmptySnapshot) - } -} - -// TestAllSnapshotBroken ensures snapshotter returns -// ErrNoSnapshot if all the snapshots are broken. -func TestAllSnapshotBroken(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - err = os.WriteFile(filepath.Join(dir, "1.snap"), []byte("bad"), 0x700) - if err != nil { - t.Fatal(err) - } - - ss := New(zaptest.NewLogger(t), dir) - _, err = ss.Load() - if err != ErrNoSnapshot { - t.Errorf("err = %v, want %v", err, ErrNoSnapshot) - } -} - -func TestReleaseSnapDBs(t *testing.T) { - dir := filepath.Join(os.TempDir(), "snapshot") - err := os.Mkdir(dir, 0700) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - snapIndices := []uint64{100, 200, 300, 400} - for _, index := range snapIndices { - filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index)) - if err := os.WriteFile(filename, []byte("snap file\n"), 0644); err != nil { - t.Fatal(err) - } - } - - ss := New(zaptest.NewLogger(t), dir) - - if err := ss.ReleaseSnapDBs(raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 300}}); err != nil { - t.Fatal(err) - } - - deleted := []uint64{100, 200} - for _, index := range deleted { - filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index)) - if fileutil.Exist(filename) { - t.Errorf("expected %s (index: %d) to be deleted, but it still exists", filename, index) - } - } - - retained := []uint64{300, 400} - for _, index := range retained { - filename := filepath.Join(dir, fmt.Sprintf("%016x.snap.db", index)) - if !fileutil.Exist(filename) { - t.Errorf("expected %s (index: %d) to be retained, but it no longer exists", filename, index) - } - } -} diff --git a/server/etcdserver/api/v2discovery/discovery.go b/server/etcdserver/api/v2discovery/discovery.go deleted file mode 100644 index 9f1bc0adf50..00000000000 --- a/server/etcdserver/api/v2discovery/discovery.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2discovery provides an implementation of the cluster discovery that -// is used by etcd with v2 client. -package v2discovery - -import ( - "context" - "errors" - "fmt" - "math" - "net/http" - "net/url" - "path" - "sort" - "strconv" - "strings" - "time" - - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/v2" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap" -) - -var ( - ErrInvalidURL = errors.New("discovery: invalid URL") - ErrBadSizeKey = errors.New("discovery: size key is bad") - ErrSizeNotFound = errors.New("discovery: size key not found") - ErrTokenNotFound = errors.New("discovery: token not found") - ErrDuplicateID = errors.New("discovery: found duplicate id") - ErrDuplicateName = errors.New("discovery: found duplicate name") - ErrFullCluster = errors.New("discovery: cluster is full") - ErrTooManyRetries = errors.New("discovery: too many retries") - ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint") -) - -var ( - // Number of retries discovery will attempt before giving up and erroring out. - nRetries = uint(math.MaxUint32) - maxExpoentialRetries = uint(8) -) - -// JoinCluster will connect to the discovery service at the given url, and -// register the server represented by the given id and config to the cluster -func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) { - d, err := newDiscovery(lg, durl, dproxyurl, id) - if err != nil { - return "", err - } - return d.joinCluster(config) -} - -// GetCluster will connect to the discovery service at the given url and -// retrieve a string describing the cluster -func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) { - d, err := newDiscovery(lg, durl, dproxyurl, 0) - if err != nil { - return "", err - } - return d.getCluster() -} - -type discovery struct { - lg *zap.Logger - cluster string - id types.ID - c client.KeysAPI - retries uint - url *url.URL - - clock clockwork.Clock -} - -// newProxyFunc builds a proxy function from the given string, which should -// represent a URL that can be used as a proxy. It performs basic -// sanitization of the URL and returns any error encountered. -func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) { - if lg == nil { - lg = zap.NewNop() - } - if proxy == "" { - return nil, nil - } - // Do a small amount of URL sanitization to help the user - // Derived from net/http.ProxyFromEnvironment - proxyURL, err := url.Parse(proxy) - if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { - // proxy was bogus. Try prepending "http://" to it and - // see if that parses correctly. If not, we ignore the - // error and complain about the original one - var err2 error - proxyURL, err2 = url.Parse("http://" + proxy) - if err2 == nil { - err = nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) - } - - lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String())) - return http.ProxyURL(proxyURL), nil -} - -func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) { - if lg == nil { - lg = zap.NewNop() - } - u, err := url.Parse(durl) - if err != nil { - return nil, err - } - token := u.Path - u.Path = "" - pf, err := newProxyFunc(lg, dproxyurl) - if err != nil { - return nil, err - } - - // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early - tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) - if err != nil { - return nil, err - } - tr.Proxy = pf - cfg := client.Config{ - Transport: tr, - Endpoints: []string{u.String()}, - } - c, err := client.New(cfg) - if err != nil { - return nil, err - } - dc := client.NewKeysAPIWithPrefix(c, "") - return &discovery{ - lg: lg, - cluster: token, - c: dc, - id: id, - url: u, - clock: clockwork.NewRealClock(), - }, nil -} - -func (d *discovery) joinCluster(config string) (string, error) { - // fast path: if the cluster is full, return the error - // do not need to register to the cluster in this case. - if _, _, _, err := d.checkCluster(); err != nil { - return "", err - } - - if err := d.createSelf(config); err != nil { - // Fails, even on a timeout, if createSelf times out. - // TODO(barakmich): Retrying the same node might want to succeed here - // (ie, createSelf should be idempotent for discovery). - return "", err - } - - nodes, size, index, err := d.checkCluster() - if err != nil { - return "", err - } - - all, err := d.waitNodes(nodes, size, index) - if err != nil { - return "", err - } - - return nodesToCluster(all, size) -} - -func (d *discovery) getCluster() (string, error) { - nodes, size, index, err := d.checkCluster() - if err != nil { - if err == ErrFullCluster { - return nodesToCluster(nodes, size) - } - return "", err - } - - all, err := d.waitNodes(nodes, size, index) - if err != nil { - return "", err - } - return nodesToCluster(all, size) -} - -func (d *discovery) createSelf(contents string) error { - ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - resp, err := d.c.Create(ctx, d.selfKey(), contents) - cancel() - if err != nil { - if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { - return ErrDuplicateID - } - return err - } - - // ensure self appears on the server we connected to - w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) - _, err = w.Next(context.Background()) - return err -} - -func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) { - configKey := path.Join("/", d.cluster, "_config") - ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - // find cluster size - resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) - cancel() - if err != nil { - if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { - return nil, 0, 0, ErrSizeNotFound - } - if err == client.ErrInvalidJSON { - return nil, 0, 0, ErrBadDiscoveryEndpoint - } - if ce, ok := err.(*client.ClusterError); ok { - d.lg.Warn( - "failed to get from discovery server", - zap.String("discovery-url", d.url.String()), - zap.String("path", path.Join(configKey, "size")), - zap.Error(err), - zap.String("err-detail", ce.Detail()), - ) - return d.checkClusterRetry() - } - return nil, 0, 0, err - } - size, err := strconv.ParseUint(resp.Node.Value, 10, 0) - if err != nil { - return nil, 0, 0, ErrBadSizeKey - } - - ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - resp, err = d.c.Get(ctx, d.cluster, nil) - cancel() - if err != nil { - if ce, ok := err.(*client.ClusterError); ok { - d.lg.Warn( - "failed to get from discovery server", - zap.String("discovery-url", d.url.String()), - zap.String("path", d.cluster), - zap.Error(err), - zap.String("err-detail", ce.Detail()), - ) - return d.checkClusterRetry() - } - return nil, 0, 0, err - } - var nodes []*client.Node - // append non-config keys to nodes - for _, n := range resp.Node.Nodes { - if path.Base(n.Key) != path.Base(configKey) { - nodes = append(nodes, n) - } - } - - snodes := sortableNodes{nodes} - sort.Sort(snodes) - - // find self position - for i := range nodes { - if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { - break - } - if uint64(i) >= size-1 { - return nodes[:size], size, resp.Index, ErrFullCluster - } - } - return nodes, size, resp.Index, nil -} - -func (d *discovery) logAndBackoffForRetry(step string) { - d.retries++ - // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward. - retries := d.retries - if retries > maxExpoentialRetries { - retries = maxExpoentialRetries - } - retryTimeInSecond := time.Duration(0x1< size { - nodes = nodes[:size] - } - // watch from the next index - w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true}) - all := make([]*client.Node, len(nodes)) - copy(all, nodes) - for _, n := range all { - if path.Base(n.Key) == path.Base(d.selfKey()) { - d.lg.Info( - "found self from discovery server", - zap.String("discovery-url", d.url.String()), - zap.String("self", path.Base(d.selfKey())), - ) - } else { - d.lg.Info( - "found peer from discovery server", - zap.String("discovery-url", d.url.String()), - zap.String("peer", path.Base(n.Key)), - ) - } - } - - // wait for others - for uint64(len(all)) < size { - d.lg.Info( - "found peers from discovery server; waiting for more", - zap.String("discovery-url", d.url.String()), - zap.Int("found-peers", len(all)), - zap.Int("needed-peers", int(size-uint64(len(all)))), - ) - resp, err := w.Next(context.Background()) - if err != nil { - if ce, ok := err.(*client.ClusterError); ok { - d.lg.Warn( - "error while waiting for peers", - zap.String("discovery-url", d.url.String()), - zap.Error(err), - zap.String("err-detail", ce.Detail()), - ) - return d.waitNodesRetry() - } - return nil, err - } - d.lg.Info( - "found peer from discovery server", - zap.String("discovery-url", d.url.String()), - zap.String("peer", path.Base(resp.Node.Key)), - ) - all = append(all, resp.Node) - } - d.lg.Info( - "found all needed peers from discovery server", - zap.String("discovery-url", d.url.String()), - zap.Int("found-peers", len(all)), - ) - return all, nil -} - -func (d *discovery) selfKey() string { - return path.Join("/", d.cluster, d.id.String()) -} - -func nodesToCluster(ns []*client.Node, size uint64) (string, error) { - s := make([]string, len(ns)) - for i, n := range ns { - s[i] = n.Value - } - us := strings.Join(s, ",") - m, err := types.NewURLsMap(us) - if err != nil { - return us, ErrInvalidURL - } - if uint64(m.Len()) != size { - return us, ErrDuplicateName - } - return us, nil -} - -type sortableNodes struct{ Nodes []*client.Node } - -func (ns sortableNodes) Len() int { return len(ns.Nodes) } -func (ns sortableNodes) Less(i, j int) bool { - return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex -} -func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] } diff --git a/server/etcdserver/api/v2discovery/discovery_test.go b/server/etcdserver/api/v2discovery/discovery_test.go deleted file mode 100644 index 7d42eb14ef0..00000000000 --- a/server/etcdserver/api/v2discovery/discovery_test.go +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2discovery - -import ( - "context" - "errors" - "math" - "math/rand" - "net/http" - "net/url" - "reflect" - "sort" - "strconv" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/v2" - - "github.com/jonboulle/clockwork" -) - -const ( - maxRetryInTest = 3 -) - -func TestNewProxyFuncUnset(t *testing.T) { - pf, err := newProxyFunc(zaptest.NewLogger(t), "") - if pf != nil { - t.Fatal("unexpected non-nil proxyFunc") - } - if err != nil { - t.Fatalf("unexpected non-nil err: %v", err) - } -} - -func TestNewProxyFuncBad(t *testing.T) { - tests := []string{ - "%%", - "http://foo.com/%1", - } - for i, in := range tests { - pf, err := newProxyFunc(zaptest.NewLogger(t), in) - if pf != nil { - t.Errorf("#%d: unexpected non-nil proxyFunc", i) - } - if err == nil { - t.Errorf("#%d: unexpected nil err", i) - } - } -} - -func TestNewProxyFunc(t *testing.T) { - tests := map[string]string{ - "bar.com": "http://bar.com", - "http://disco.foo.bar": "http://disco.foo.bar", - } - for in, w := range tests { - pf, err := newProxyFunc(zaptest.NewLogger(t), in) - if pf == nil { - t.Errorf("%s: unexpected nil proxyFunc", in) - continue - } - if err != nil { - t.Errorf("%s: unexpected non-nil err: %v", in, err) - continue - } - g, err := pf(&http.Request{}) - if err != nil { - t.Errorf("%s: unexpected non-nil err: %v", in, err) - } - if g.String() != w { - t.Errorf("%s: proxyURL=%q, want %q", in, g, w) - } - - } -} - -func TestCheckCluster(t *testing.T) { - cluster := "/prefix/1000" - self := "/1000/1" - - tests := []struct { - nodes []*client.Node - index uint64 - werr error - wsize int - }{ - { - // self is in the size range - []*client.Node{ - {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, - {Key: "/1000/_config/"}, - {Key: self, CreatedIndex: 2}, - {Key: "/1000/2", CreatedIndex: 3}, - {Key: "/1000/3", CreatedIndex: 4}, - {Key: "/1000/4", CreatedIndex: 5}, - }, - 5, - nil, - 3, - }, - { - // self is in the size range - []*client.Node{ - {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, - {Key: "/1000/_config/"}, - {Key: "/1000/2", CreatedIndex: 2}, - {Key: "/1000/3", CreatedIndex: 3}, - {Key: self, CreatedIndex: 4}, - {Key: "/1000/4", CreatedIndex: 5}, - }, - 5, - nil, - 3, - }, - { - // self is out of the size range - []*client.Node{ - {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, - {Key: "/1000/_config/"}, - {Key: "/1000/2", CreatedIndex: 2}, - {Key: "/1000/3", CreatedIndex: 3}, - {Key: "/1000/4", CreatedIndex: 4}, - {Key: self, CreatedIndex: 5}, - }, - 5, - ErrFullCluster, - 3, - }, - { - // self is not in the cluster - []*client.Node{ - {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, - {Key: "/1000/_config/"}, - {Key: "/1000/2", CreatedIndex: 2}, - {Key: "/1000/3", CreatedIndex: 3}, - }, - 3, - nil, - 3, - }, - { - []*client.Node{ - {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, - {Key: "/1000/_config/"}, - {Key: "/1000/2", CreatedIndex: 2}, - {Key: "/1000/3", CreatedIndex: 3}, - {Key: "/1000/4", CreatedIndex: 4}, - }, - 3, - ErrFullCluster, - 3, - }, - { - // bad size key - []*client.Node{ - {Key: "/1000/_config/size", Value: "bad", CreatedIndex: 1}, - }, - 0, - ErrBadSizeKey, - 0, - }, - { - // no size key - []*client.Node{}, - 0, - ErrSizeNotFound, - 0, - }, - } - - for i, tt := range tests { - var rs []*client.Response - if len(tt.nodes) > 0 { - rs = append(rs, &client.Response{Node: tt.nodes[0], Index: tt.index}) - rs = append(rs, &client.Response{ - Node: &client.Node{ - Key: cluster, - Nodes: tt.nodes[1:], - }, - Index: tt.index, - }) - } - c := &clientWithResp{rs: rs} - dBase := newTestDiscovery(t, cluster, 1, c) - - cRetry := &clientWithRetry{failTimes: 3} - cRetry.rs = rs - fc := clockwork.NewFakeClock() - dRetry := newTestDiscoveryWithClock(t, cluster, 1, cRetry, fc) - - for _, d := range []*discovery{dBase, dRetry} { - go func() { - for i := uint(1); i <= maxRetryInTest; i++ { - fc.BlockUntil(1) - fc.Advance(time.Second * (0x1 << i)) - } - }() - ns, size, index, err := d.checkCluster() - if err != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - if reflect.DeepEqual(ns, tt.nodes) { - t.Errorf("#%d: nodes = %v, want %v", i, ns, tt.nodes) - } - if size != uint64(tt.wsize) { - t.Errorf("#%d: size = %v, want %d", i, size, tt.wsize) - } - if index != tt.index { - t.Errorf("#%d: index = %v, want %d", i, index, tt.index) - } - } - } -} - -func TestWaitNodes(t *testing.T) { - all := []*client.Node{ - 0: {Key: "/1000/1", CreatedIndex: 2}, - 1: {Key: "/1000/2", CreatedIndex: 3}, - 2: {Key: "/1000/3", CreatedIndex: 4}, - } - - tests := []struct { - nodes []*client.Node - rs []*client.Response - }{ - { - all, - []*client.Response{}, - }, - { - all[:1], - []*client.Response{ - {Node: &client.Node{Key: "/1000/2", CreatedIndex: 3}}, - {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, - }, - }, - { - all[:2], - []*client.Response{ - {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, - }, - }, - { - append(all, &client.Node{Key: "/1000/4", CreatedIndex: 5}), - []*client.Response{ - {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, - }, - }, - } - - for i, tt := range tests { - // Basic case - c := &clientWithResp{rs: nil, w: &watcherWithResp{rs: tt.rs}} - dBase := newTestDiscovery(t, "1000", 1, c) - - // Retry case - var retryScanResp []*client.Response - if len(tt.nodes) > 0 { - retryScanResp = append(retryScanResp, &client.Response{ - Node: &client.Node{ - Key: "1000", - Value: strconv.Itoa(3), - }, - }) - retryScanResp = append(retryScanResp, &client.Response{ - Node: &client.Node{ - Nodes: tt.nodes, - }, - }) - } - cRetry := &clientWithResp{ - rs: retryScanResp, - w: &watcherWithRetry{rs: tt.rs, failTimes: 2}, - } - fc := clockwork.NewFakeClock() - dRetry := newTestDiscoveryWithClock(t, "1000", 1, cRetry, fc) - - for _, d := range []*discovery{dBase, dRetry} { - go func() { - for i := uint(1); i <= maxRetryInTest; i++ { - fc.BlockUntil(1) - fc.Advance(time.Second * (0x1 << i)) - } - }() - g, err := d.waitNodes(tt.nodes, uint64(3), 0) // we do not care about index in this test - if err != nil { - t.Errorf("#%d: err = %v, want %v", i, err, nil) - } - if !reflect.DeepEqual(g, all) { - t.Errorf("#%d: all = %v, want %v", i, g, all) - } - } - } -} - -func TestCreateSelf(t *testing.T) { - rs := []*client.Response{{Node: &client.Node{Key: "1000/1", CreatedIndex: 2}}} - - w := &watcherWithResp{rs: rs} - errw := &watcherWithErr{err: errors.New("watch err")} - - c := &clientWithResp{rs: rs, w: w} - errc := &clientWithErr{err: errors.New("create err"), w: w} - errdupc := &clientWithErr{err: client.Error{Code: client.ErrorCodeNodeExist}} - errwc := &clientWithResp{rs: rs, w: errw} - - tests := []struct { - c client.KeysAPI - werr error - }{ - // no error - {c, nil}, - // client.create returns an error - {errc, errc.err}, - // watcher.next returns an error - {errwc, errw.err}, - // parse key exist error to duplicate ID error - {errdupc, ErrDuplicateID}, - } - - for i, tt := range tests { - d := newTestDiscovery(t, "1000", 1, tt.c) - if err := d.createSelf(""); err != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, nil) - } - } -} - -func TestNodesToCluster(t *testing.T) { - tests := []struct { - nodes []*client.Node - size uint64 - wcluster string - werr error - }{ - { - []*client.Node{ - 0: {Key: "/1000/1", Value: "1=http://1.1.1.1:2380", CreatedIndex: 1}, - 1: {Key: "/1000/2", Value: "2=http://2.2.2.2:2380", CreatedIndex: 2}, - 2: {Key: "/1000/3", Value: "3=http://3.3.3.3:2380", CreatedIndex: 3}, - }, - 3, - "1=http://1.1.1.1:2380,2=http://2.2.2.2:2380,3=http://3.3.3.3:2380", - nil, - }, - { - []*client.Node{ - 0: {Key: "/1000/1", Value: "1=http://1.1.1.1:2380", CreatedIndex: 1}, - 1: {Key: "/1000/2", Value: "2=http://2.2.2.2:2380", CreatedIndex: 2}, - 2: {Key: "/1000/3", Value: "2=http://3.3.3.3:2380", CreatedIndex: 3}, - }, - 3, - "1=http://1.1.1.1:2380,2=http://2.2.2.2:2380,2=http://3.3.3.3:2380", - ErrDuplicateName, - }, - { - []*client.Node{ - 0: {Key: "/1000/1", Value: "1=1.1.1.1:2380", CreatedIndex: 1}, - 1: {Key: "/1000/2", Value: "2=http://2.2.2.2:2380", CreatedIndex: 2}, - 2: {Key: "/1000/3", Value: "2=http://3.3.3.3:2380", CreatedIndex: 3}, - }, - 3, - "1=1.1.1.1:2380,2=http://2.2.2.2:2380,2=http://3.3.3.3:2380", - ErrInvalidURL, - }, - } - - for i, tt := range tests { - cluster, err := nodesToCluster(tt.nodes, tt.size) - if err != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - if !reflect.DeepEqual(cluster, tt.wcluster) { - t.Errorf("#%d: cluster = %v, want %v", i, cluster, tt.wcluster) - } - } -} - -func TestSortableNodes(t *testing.T) { - ns := []*client.Node{ - 0: {CreatedIndex: 5}, - 1: {CreatedIndex: 1}, - 2: {CreatedIndex: 3}, - 3: {CreatedIndex: 4}, - } - // add some randomness - for i := 0; i < 10000; i++ { - ns = append(ns, &client.Node{CreatedIndex: uint64(rand.Int31())}) - } - sns := sortableNodes{ns} - sort.Sort(sns) - var cis []int - for _, n := range sns.Nodes { - cis = append(cis, int(n.CreatedIndex)) - } - if !sort.IntsAreSorted(cis) { - t.Errorf("isSorted = %v, want %v", sort.IntsAreSorted(cis), true) - } - cis = make([]int, 0) - for _, n := range ns { - cis = append(cis, int(n.CreatedIndex)) - } - if !sort.IntsAreSorted(cis) { - t.Errorf("isSorted = %v, want %v", sort.IntsAreSorted(cis), true) - } -} - -func TestRetryFailure(t *testing.T) { - nRetries = maxRetryInTest - defer func() { nRetries = math.MaxUint32 }() - - cluster := "1000" - c := &clientWithRetry{failTimes: 4} - fc := clockwork.NewFakeClock() - d := newTestDiscoveryWithClock(t, cluster, 1, c, fc) - go func() { - for i := uint(1); i <= maxRetryInTest; i++ { - fc.BlockUntil(1) - fc.Advance(time.Second * (0x1 << i)) - } - }() - if _, _, _, err := d.checkCluster(); err != ErrTooManyRetries { - t.Errorf("err = %v, want %v", err, ErrTooManyRetries) - } -} - -type clientWithResp struct { - rs []*client.Response - w client.Watcher - client.KeysAPI -} - -func (c *clientWithResp) Create(ctx context.Context, key string, value string) (*client.Response, error) { - if len(c.rs) == 0 { - return &client.Response{}, nil - } - r := c.rs[0] - c.rs = c.rs[1:] - return r, nil -} - -func (c *clientWithResp) Get(ctx context.Context, key string, opts *client.GetOptions) (*client.Response, error) { - if len(c.rs) == 0 { - return &client.Response{}, &client.Error{Code: client.ErrorCodeKeyNotFound} - } - r := c.rs[0] - c.rs = append(c.rs[1:], r) - return r, nil -} - -func (c *clientWithResp) Watcher(key string, opts *client.WatcherOptions) client.Watcher { - return c.w -} - -type clientWithErr struct { - err error - w client.Watcher - client.KeysAPI -} - -func (c *clientWithErr) Create(ctx context.Context, key string, value string) (*client.Response, error) { - return &client.Response{}, c.err -} - -func (c *clientWithErr) Get(ctx context.Context, key string, opts *client.GetOptions) (*client.Response, error) { - return &client.Response{}, c.err -} - -func (c *clientWithErr) Watcher(key string, opts *client.WatcherOptions) client.Watcher { - return c.w -} - -type watcherWithResp struct { - client.KeysAPI - rs []*client.Response -} - -func (w *watcherWithResp) Next(context.Context) (*client.Response, error) { - if len(w.rs) == 0 { - return &client.Response{}, nil - } - r := w.rs[0] - w.rs = w.rs[1:] - return r, nil -} - -type watcherWithErr struct { - err error -} - -func (w *watcherWithErr) Next(context.Context) (*client.Response, error) { - return &client.Response{}, w.err -} - -// clientWithRetry will timeout all requests up to failTimes -type clientWithRetry struct { - clientWithResp - failCount int - failTimes int -} - -func (c *clientWithRetry) Create(ctx context.Context, key string, value string) (*client.Response, error) { - if c.failCount < c.failTimes { - c.failCount++ - return nil, &client.ClusterError{Errors: []error{context.DeadlineExceeded}} - } - return c.clientWithResp.Create(ctx, key, value) -} - -func (c *clientWithRetry) Get(ctx context.Context, key string, opts *client.GetOptions) (*client.Response, error) { - if c.failCount < c.failTimes { - c.failCount++ - return nil, &client.ClusterError{Errors: []error{context.DeadlineExceeded}} - } - return c.clientWithResp.Get(ctx, key, opts) -} - -// watcherWithRetry will timeout all requests up to failTimes -type watcherWithRetry struct { - rs []*client.Response - failCount int - failTimes int -} - -func (w *watcherWithRetry) Next(context.Context) (*client.Response, error) { - if w.failCount < w.failTimes { - w.failCount++ - return nil, &client.ClusterError{Errors: []error{context.DeadlineExceeded}} - } - if len(w.rs) == 0 { - return &client.Response{}, nil - } - r := w.rs[0] - w.rs = w.rs[1:] - return r, nil -} - -func newTestDiscovery(t *testing.T, cluster string, id types.ID, c client.KeysAPI) *discovery { - return &discovery{ - lg: zaptest.NewLogger(t), - cluster: cluster, - id: id, - c: c, - url: &url.URL{Scheme: "http", Host: "test.com"}, - } -} - -func newTestDiscoveryWithClock(t *testing.T, cluster string, id types.ID, c client.KeysAPI, clock clockwork.Clock) *discovery { - return &discovery{ - lg: zaptest.NewLogger(t), - cluster: cluster, - id: id, - c: c, - url: &url.URL{Scheme: "http", Host: "test.com"}, - clock: clock, - } -} diff --git a/server/etcdserver/api/v2error/error.go b/server/etcdserver/api/v2error/error.go deleted file mode 100644 index ab24757d761..00000000000 --- a/server/etcdserver/api/v2error/error.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2error describes errors in etcd project. When any change happens, -// https://github.com/etcd-io/website/blob/main/content/docs/v2/errorcode.md -// needs to be updated correspondingly. -// To be deprecated in favor of v3 APIs. -package v2error - -import ( - "encoding/json" - "fmt" - "net/http" -) - -var errors = map[int]string{ - // command related errors - EcodeKeyNotFound: "Key not found", - EcodeTestFailed: "Compare failed", //test and set - EcodeNotFile: "Not a file", - ecodeNoMorePeer: "Reached the max number of peers in the cluster", - EcodeNotDir: "Not a directory", - EcodeNodeExist: "Key already exists", // create - ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", - EcodeRootROnly: "Root is read only", - EcodeDirNotEmpty: "Directory not empty", - ecodeExistingPeerAddr: "Peer address has existed", - EcodeUnauthorized: "The request requires user authentication", - - // Post form related errors - ecodeValueRequired: "Value is Required in POST form", - EcodePrevValueRequired: "PrevValue is Required in POST form", - EcodeTTLNaN: "The given TTL in POST form is not a number", - EcodeIndexNaN: "The given index in POST form is not a number", - ecodeValueOrTTLRequired: "Value or TTL is required in POST form", - ecodeTimeoutNaN: "The given timeout in POST form is not a number", - ecodeNameRequired: "Name is required in POST form", - ecodeIndexOrValueRequired: "Index or value is required", - ecodeIndexValueMutex: "Index and value cannot both be specified", - EcodeInvalidField: "Invalid field", - EcodeInvalidForm: "Invalid POST form", - EcodeRefreshValue: "Value provided on refresh", - EcodeRefreshTTLRequired: "A TTL must be provided on refresh", - - // raft related errors - EcodeRaftInternal: "Raft Internal Error", - EcodeLeaderElect: "During Leader Election", - - // etcd related errors - EcodeWatcherCleared: "watcher is cleared due to etcd recovery", - EcodeEventIndexCleared: "The event in requested index is outdated and cleared", - ecodeStandbyInternal: "Standby Internal Error", - ecodeInvalidActiveSize: "Invalid active size", - ecodeInvalidRemoveDelay: "Standby remove delay", - - // client related errors - ecodeClientInternal: "Client Internal Error", -} - -var errorStatus = map[int]int{ - EcodeKeyNotFound: http.StatusNotFound, - EcodeNotFile: http.StatusForbidden, - EcodeDirNotEmpty: http.StatusForbidden, - EcodeUnauthorized: http.StatusUnauthorized, - EcodeTestFailed: http.StatusPreconditionFailed, - EcodeNodeExist: http.StatusPreconditionFailed, - EcodeRaftInternal: http.StatusInternalServerError, - EcodeLeaderElect: http.StatusInternalServerError, -} - -const ( - EcodeKeyNotFound = 100 - EcodeTestFailed = 101 - EcodeNotFile = 102 - ecodeNoMorePeer = 103 - EcodeNotDir = 104 - EcodeNodeExist = 105 - ecodeKeyIsPreserved = 106 - EcodeRootROnly = 107 - EcodeDirNotEmpty = 108 - ecodeExistingPeerAddr = 109 - EcodeUnauthorized = 110 - - ecodeValueRequired = 200 - EcodePrevValueRequired = 201 - EcodeTTLNaN = 202 - EcodeIndexNaN = 203 - ecodeValueOrTTLRequired = 204 - ecodeTimeoutNaN = 205 - ecodeNameRequired = 206 - ecodeIndexOrValueRequired = 207 - ecodeIndexValueMutex = 208 - EcodeInvalidField = 209 - EcodeInvalidForm = 210 - EcodeRefreshValue = 211 - EcodeRefreshTTLRequired = 212 - - EcodeRaftInternal = 300 - EcodeLeaderElect = 301 - - EcodeWatcherCleared = 400 - EcodeEventIndexCleared = 401 - ecodeStandbyInternal = 402 - ecodeInvalidActiveSize = 403 - ecodeInvalidRemoveDelay = 404 - - ecodeClientInternal = 500 -) - -type Error struct { - ErrorCode int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause,omitempty"` - Index uint64 `json:"index"` -} - -func NewError(errorCode int, cause string, index uint64) *Error { - return &Error{ - ErrorCode: errorCode, - Message: errors[errorCode], - Cause: cause, - Index: index, - } -} - -// Error is for the error interface -func (e Error) Error() string { - return e.Message + " (" + e.Cause + ")" -} - -func (e Error) toJsonString() string { - b, _ := json.Marshal(e) - return string(b) -} - -func (e Error) StatusCode() int { - status, ok := errorStatus[e.ErrorCode] - if !ok { - status = http.StatusBadRequest - } - return status -} - -func (e Error) WriteTo(w http.ResponseWriter) error { - w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(e.StatusCode()) - _, err := w.Write([]byte(e.toJsonString() + "\n")) - return err -} diff --git a/server/etcdserver/api/v2error/error_test.go b/server/etcdserver/api/v2error/error_test.go deleted file mode 100644 index 39b3cc0f6eb..00000000000 --- a/server/etcdserver/api/v2error/error_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2error - -import ( - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" -) - -func TestErrorWriteTo(t *testing.T) { - for k := range errors { - err := NewError(k, "", 1) - rr := httptest.NewRecorder() - err.WriteTo(rr) - - if err.StatusCode() != rr.Code { - t.Errorf("HTTP status code %d, want %d", rr.Code, err.StatusCode()) - } - - gbody := strings.TrimSuffix(rr.Body.String(), "\n") - if err.toJsonString() != gbody { - t.Errorf("HTTP body %q, want %q", gbody, err.toJsonString()) - } - - wheader := http.Header(map[string][]string{ - "Content-Type": {"application/json"}, - "X-Etcd-Index": {"1"}, - }) - - if !reflect.DeepEqual(wheader, rr.HeaderMap) { - t.Errorf("HTTP headers %v, want %v", rr.HeaderMap, wheader) - } - } - -} diff --git a/server/etcdserver/api/v2stats/queue.go b/server/etcdserver/api/v2stats/queue.go deleted file mode 100644 index 2c3dff3d0ff..00000000000 --- a/server/etcdserver/api/v2stats/queue.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2stats - -import ( - "sync" - "time" -) - -const ( - queueCapacity = 200 -) - -// RequestStats represent the stats for a request. -// It encapsulates the sending time and the size of the request. -type RequestStats struct { - SendingTime time.Time - Size int -} - -type statsQueue struct { - items [queueCapacity]*RequestStats - size int - front int - back int - totalReqSize int - rwl sync.RWMutex -} - -func (q *statsQueue) Len() int { - return q.size -} - -func (q *statsQueue) ReqSize() int { - return q.totalReqSize -} - -// FrontAndBack gets the front and back elements in the queue -// We must grab front and back together with the protection of the lock -func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) { - q.rwl.RLock() - defer q.rwl.RUnlock() - if q.size != 0 { - return q.items[q.front], q.items[q.back] - } - return nil, nil -} - -// Insert function insert a RequestStats into the queue and update the records -func (q *statsQueue) Insert(p *RequestStats) { - q.rwl.Lock() - defer q.rwl.Unlock() - - q.back = (q.back + 1) % queueCapacity - - if q.size == queueCapacity { //dequeue - q.totalReqSize -= q.items[q.front].Size - q.front = (q.back + 1) % queueCapacity - } else { - q.size++ - } - - q.items[q.back] = p - q.totalReqSize += q.items[q.back].Size - -} - -// Rate function returns the package rate and byte rate -func (q *statsQueue) Rate() (float64, float64) { - front, back := q.frontAndBack() - - if front == nil || back == nil { - return 0, 0 - } - - if time.Since(back.SendingTime) > time.Second { - q.Clear() - return 0, 0 - } - - sampleDuration := back.SendingTime.Sub(front.SendingTime) - - pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second) - - br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second) - - return pr, br -} - -// Clear function clear up the statsQueue -func (q *statsQueue) Clear() { - q.rwl.Lock() - defer q.rwl.Unlock() - q.back = -1 - q.front = 0 - q.size = 0 - q.totalReqSize = 0 -} diff --git a/server/etcdserver/api/v2stats/server.go b/server/etcdserver/api/v2stats/server.go deleted file mode 100644 index e8d218a7209..00000000000 --- a/server/etcdserver/api/v2stats/server.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2stats - -import ( - "encoding/json" - "log" - "sync" - "time" - - "go.etcd.io/raft/v3" -) - -// ServerStats encapsulates various statistics about an EtcdServer and its -// communication with other members of the cluster -type ServerStats struct { - serverStats - sync.Mutex -} - -func NewServerStats(name, id string) *ServerStats { - ss := &ServerStats{ - serverStats: serverStats{ - Name: name, - ID: id, - }, - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{back: -1} - ss.recvRateQueue = &statsQueue{back: -1} - return ss -} - -type serverStats struct { - Name string `json:"name"` - // ID is the raft ID of the node. - // TODO(jonboulle): use ID instead of name? - ID string `json:"id"` - State raft.StateType `json:"state"` - StartTime time.Time `json:"startTime"` - - LeaderInfo struct { - Name string `json:"leader"` - Uptime string `json:"uptime"` - StartTime time.Time `json:"startTime"` - } `json:"leaderInfo"` - - RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt"` - RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` - RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` - - SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"` - SendingPkgRate float64 `json:"sendPkgRate,omitempty"` - SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"` - - sendRateQueue *statsQueue - recvRateQueue *statsQueue -} - -func (ss *ServerStats) JSON() []byte { - ss.Lock() - stats := ss.serverStats - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() - stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - ss.Unlock() - b, err := json.Marshal(stats) - // TODO(jonboulle): appropriate error handling? - if err != nil { - log.Printf("stats: error marshalling server stats: %v", err) - } - return b -} - -// RecvAppendReq updates the ServerStats in response to an AppendRequest -// from the given leader being received -func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { - ss.Lock() - defer ss.Unlock() - - now := time.Now() - - ss.State = raft.StateFollower - if leader != ss.LeaderInfo.Name { - ss.LeaderInfo.Name = leader - ss.LeaderInfo.StartTime = now - } - - ss.recvRateQueue.Insert( - &RequestStats{ - SendingTime: now, - Size: reqSize, - }, - ) - ss.RecvAppendRequestCnt++ -} - -// SendAppendReq updates the ServerStats in response to an AppendRequest -// being sent by this server -func (ss *ServerStats) SendAppendReq(reqSize int) { - ss.Lock() - defer ss.Unlock() - - ss.becomeLeader() - - ss.sendRateQueue.Insert( - &RequestStats{ - SendingTime: time.Now(), - Size: reqSize, - }, - ) - - ss.SendAppendRequestCnt++ -} - -func (ss *ServerStats) BecomeLeader() { - ss.Lock() - defer ss.Unlock() - ss.becomeLeader() -} - -func (ss *ServerStats) becomeLeader() { - if ss.State != raft.StateLeader { - ss.State = raft.StateLeader - ss.LeaderInfo.Name = ss.ID - ss.LeaderInfo.StartTime = time.Now() - } -} diff --git a/server/etcdserver/api/v2store/doc.go b/server/etcdserver/api/v2store/doc.go deleted file mode 100644 index 1933e4cd5ac..00000000000 --- a/server/etcdserver/api/v2store/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2store defines etcd's in-memory key/value store in v2 API. -// To be deprecated in favor of v3 storage. -package v2store diff --git a/server/etcdserver/api/v2store/event.go b/server/etcdserver/api/v2store/event.go deleted file mode 100644 index 33e901744d5..00000000000 --- a/server/etcdserver/api/v2store/event.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -const ( - Get = "get" - Create = "create" - Set = "set" - Update = "update" - Delete = "delete" - CompareAndSwap = "compareAndSwap" - CompareAndDelete = "compareAndDelete" - Expire = "expire" -) - -type Event struct { - Action string `json:"action"` - Node *NodeExtern `json:"node,omitempty"` - PrevNode *NodeExtern `json:"prevNode,omitempty"` - EtcdIndex uint64 `json:"-"` - Refresh bool `json:"refresh,omitempty"` -} - -func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event { - n := &NodeExtern{ - Key: key, - ModifiedIndex: modifiedIndex, - CreatedIndex: createdIndex, - } - - return &Event{ - Action: action, - Node: n, - } -} - -func (e *Event) IsCreated() bool { - if e.Action == Create { - return true - } - return e.Action == Set && e.PrevNode == nil -} - -func (e *Event) Index() uint64 { - return e.Node.ModifiedIndex -} - -func (e *Event) Clone() *Event { - return &Event{ - Action: e.Action, - EtcdIndex: e.EtcdIndex, - Node: e.Node.Clone(), - PrevNode: e.PrevNode.Clone(), - } -} - -func (e *Event) SetRefresh() { - e.Refresh = true -} diff --git a/server/etcdserver/api/v2store/event_test.go b/server/etcdserver/api/v2store/event_test.go deleted file mode 100644 index 6fc25fd74c9..00000000000 --- a/server/etcdserver/api/v2store/event_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "testing" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" -) - -// TestEventQueue tests a queue with capacity = 100 -// Add 200 events into that queue, and test if the -// previous 100 events have been swapped out. -func TestEventQueue(t *testing.T) { - - eh := newEventHistory(100) - - // Add - for i := 0; i < 200; i++ { - e := newEvent(Create, "/foo", uint64(i), uint64(i)) - eh.addEvent(e) - } - - // Test - j := 100 - i := eh.Queue.Front - n := eh.Queue.Size - for ; n > 0; n-- { - e := eh.Queue.Events[i] - if e.Index() != uint64(j) { - t.Fatalf("queue error!") - } - j++ - i = (i + 1) % eh.Queue.Capacity - } -} - -func TestScanHistory(t *testing.T) { - eh := newEventHistory(100) - - // Add - eh.addEvent(newEvent(Create, "/foo", 1, 1)) - eh.addEvent(newEvent(Create, "/foo/bar", 2, 2)) - eh.addEvent(newEvent(Create, "/foo/foo", 3, 3)) - eh.addEvent(newEvent(Create, "/foo/bar/bar", 4, 4)) - eh.addEvent(newEvent(Create, "/foo/foo/foo", 5, 5)) - - // Delete a dir - de := newEvent(Delete, "/foo", 6, 6) - de.PrevNode = newDir(nil, "/foo", 1, nil, Permanent).Repr(false, false, nil) - eh.addEvent(de) - - e, err := eh.scan("/foo", false, 1) - if err != nil || e.Index() != 1 { - t.Fatalf("scan error [/foo] [1] %d (%v)", e.Index(), err) - } - - e, err = eh.scan("/foo/bar", false, 1) - - if err != nil || e.Index() != 2 { - t.Fatalf("scan error [/foo/bar] [2] %d (%v)", e.Index(), err) - } - - e, err = eh.scan("/foo/bar", true, 3) - - if err != nil || e.Index() != 4 { - t.Fatalf("scan error [/foo/bar/bar] [4] %d (%v)", e.Index(), err) - } - - e, err = eh.scan("/foo/foo/foo", false, 6) - if err != nil || e.Index() != 6 { - t.Fatalf("scan error [/foo/foo/foo] [6] %d (%v)", e.Index(), err) - } - - e, _ = eh.scan("/foo/bar", true, 7) - if e != nil { - t.Fatalf("bad index shoud reuturn nil") - } -} - -func TestEventIndexHistoryCleared(t *testing.T) { - eh := newEventHistory(5) - - // Add - eh.addEvent(newEvent(Create, "/foo", 1, 1)) - eh.addEvent(newEvent(Create, "/foo/bar", 2, 2)) - eh.addEvent(newEvent(Create, "/foo/foo", 3, 3)) - eh.addEvent(newEvent(Create, "/foo/bar/bar", 4, 4)) - eh.addEvent(newEvent(Create, "/foo/foo/foo", 5, 5)) - - // Add a new event which will replace/de-queue the first entry - eh.addEvent(newEvent(Create, "/foo/bar/bar/bar", 6, 6)) - - // test for the event which has been replaced. - _, err := eh.scan("/foo", false, 1) - if err == nil || err.ErrorCode != v2error.EcodeEventIndexCleared { - t.Fatalf("scan error cleared index should return err with %d got (%v)", v2error.EcodeEventIndexCleared, err) - } -} - -// TestFullEventQueue tests a queue with capacity = 10 -// Add 1000 events into that queue, and test if scanning -// works still for previous events. -func TestFullEventQueue(t *testing.T) { - - eh := newEventHistory(10) - - // Add - for i := 0; i < 1000; i++ { - ce := newEvent(Create, "/foo", uint64(i), uint64(i)) - eh.addEvent(ce) - e, err := eh.scan("/foo", true, uint64(i-1)) - if i > 0 { - if e == nil || err != nil { - t.Fatalf("scan error [/foo] [%v] %v", i-1, i) - } - } - } -} - -func TestCloneEvent(t *testing.T) { - e1 := &Event{ - Action: Create, - EtcdIndex: 1, - Node: nil, - PrevNode: nil, - } - e2 := e1.Clone() - if e2.Action != Create { - t.Fatalf("Action=%q, want %q", e2.Action, Create) - } - if e2.EtcdIndex != e1.EtcdIndex { - t.Fatalf("EtcdIndex=%d, want %d", e2.EtcdIndex, e1.EtcdIndex) - } - // Changing the cloned node should not affect the original - e2.Action = Delete - e2.EtcdIndex = uint64(5) - if e1.Action != Create { - t.Fatalf("Action=%q, want %q", e1.Action, Create) - } - if e1.EtcdIndex != uint64(1) { - t.Fatalf("EtcdIndex=%d, want %d", e1.EtcdIndex, uint64(1)) - } - if e2.Action != Delete { - t.Fatalf("Action=%q, want %q", e2.Action, Delete) - } - if e2.EtcdIndex != uint64(5) { - t.Fatalf("EtcdIndex=%d, want %d", e2.EtcdIndex, uint64(5)) - } -} diff --git a/server/etcdserver/api/v2store/heap_test.go b/server/etcdserver/api/v2store/heap_test.go deleted file mode 100644 index 9c18e150d7c..00000000000 --- a/server/etcdserver/api/v2store/heap_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "fmt" - "testing" - "time" -) - -func TestHeapPushPop(t *testing.T) { - h := newTtlKeyHeap() - - // add from older expire time to earlier expire time - // the path is equal to ttl from now - for i := 0; i < 10; i++ { - path := fmt.Sprintf("%v", 10-i) - m := time.Duration(10 - i) - n := newKV(nil, path, path, 0, nil, time.Now().Add(time.Second*m)) - h.push(n) - } - - min := time.Now() - - for i := 0; i < 10; i++ { - node := h.pop() - if node.ExpireTime.Before(min) { - t.Fatal("heap sort wrong!") - } - min = node.ExpireTime - } - -} - -func TestHeapUpdate(t *testing.T) { - h := newTtlKeyHeap() - - kvs := make([]*node, 10) - - // add from older expire time to earlier expire time - // the path is equal to ttl from now - for i := range kvs { - path := fmt.Sprintf("%v", 10-i) - m := time.Duration(10 - i) - n := newKV(nil, path, path, 0, nil, time.Now().Add(time.Second*m)) - kvs[i] = n - h.push(n) - } - - // Path 7 - kvs[3].ExpireTime = time.Now().Add(time.Second * 11) - - // Path 5 - kvs[5].ExpireTime = time.Now().Add(time.Second * 12) - - h.update(kvs[3]) - h.update(kvs[5]) - - min := time.Now() - - for i := 0; i < 10; i++ { - node := h.pop() - if node.ExpireTime.Before(min) { - t.Fatal("heap sort wrong!") - } - min = node.ExpireTime - - if i == 8 { - if node.Path != "7" { - t.Fatal("heap sort wrong!", node.Path) - } - } - - if i == 9 { - if node.Path != "5" { - t.Fatal("heap sort wrong!") - } - } - - } - -} diff --git a/server/etcdserver/api/v2store/metrics.go b/server/etcdserver/api/v2store/metrics.go deleted file mode 100644 index 5adea1efdd1..00000000000 --- a/server/etcdserver/api/v2store/metrics.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import "github.com/prometheus/client_golang/prometheus" - -// Set of raw Prometheus metrics. -// Labels -// * action = declared in event.go -// * outcome = Outcome -// Do not increment directly, use Report* methods. -var ( - readCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "reads_total", - Help: "Total number of reads action by (get/getRecursive), local to this member.", - }, []string{"action"}) - - writeCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "writes_total", - Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.", - }, []string{"action"}) - - readFailedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "reads_failed_total", - Help: "Failed read actions by (get/getRecursive), local to this member.", - }, []string{"action"}) - - writeFailedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "writes_failed_total", - Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.", - }, []string{"action"}) - - expireCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "expires_total", - Help: "Total number of expired keys.", - }) - - watchRequests = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "watch_requests_total", - Help: "Total number of incoming watch requests (new or reestablished).", - }) - - watcherCount = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "watchers", - Help: "Count of currently active watchers.", - }) -) - -const ( - GetRecursive = "getRecursive" -) - -func init() { - if prometheus.Register(readCounter) != nil { - // Tests will try to double register since the tests use both - // store and store_test packages; ignore second attempts. - return - } - prometheus.MustRegister(writeCounter) - prometheus.MustRegister(expireCounter) - prometheus.MustRegister(watchRequests) - prometheus.MustRegister(watcherCount) -} - -func reportReadSuccess(readAction string) { - readCounter.WithLabelValues(readAction).Inc() -} - -func reportReadFailure(readAction string) { - readCounter.WithLabelValues(readAction).Inc() - readFailedCounter.WithLabelValues(readAction).Inc() -} - -func reportWriteSuccess(writeAction string) { - writeCounter.WithLabelValues(writeAction).Inc() -} - -func reportWriteFailure(writeAction string) { - writeCounter.WithLabelValues(writeAction).Inc() - writeFailedCounter.WithLabelValues(writeAction).Inc() -} - -func reportExpiredKey() { - expireCounter.Inc() -} - -func reportWatchRequest() { - watchRequests.Inc() -} - -func reportWatcherAdded() { - watcherCount.Inc() -} - -func reportWatcherRemoved() { - watcherCount.Dec() -} diff --git a/server/etcdserver/api/v2store/node.go b/server/etcdserver/api/v2store/node.go deleted file mode 100644 index 9fe6263e2e8..00000000000 --- a/server/etcdserver/api/v2store/node.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "path" - "sort" - "time" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "github.com/jonboulle/clockwork" -) - -// explanations of Compare function result -const ( - CompareMatch = iota - CompareIndexNotMatch - CompareValueNotMatch - CompareNotMatch -) - -var Permanent time.Time - -// node is the basic element in the store system. -// A key-value pair will have a string value -// A directory will have a children map -type node struct { - Path string - - CreatedIndex uint64 - ModifiedIndex uint64 - - Parent *node `json:"-"` // should not encode this field! avoid circular dependency. - - ExpireTime time.Time - Value string // for key-value pair - Children map[string]*node // for directory - - // A reference to the store this node is attached to. - store *store -} - -// newKV creates a Key-Value pair -func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node { - return &node{ - Path: nodePath, - CreatedIndex: createdIndex, - ModifiedIndex: createdIndex, - Parent: parent, - store: store, - ExpireTime: expireTime, - Value: value, - } -} - -// newDir creates a directory -func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node { - return &node{ - Path: nodePath, - CreatedIndex: createdIndex, - ModifiedIndex: createdIndex, - Parent: parent, - ExpireTime: expireTime, - Children: make(map[string]*node), - store: store, - } -} - -// IsHidden function checks if the node is a hidden node. A hidden node -// will begin with '_' -// A hidden node will not be shown via get command under a directory -// For example if we have /foo/_hidden and /foo/notHidden, get "/foo" -// will only return /foo/notHidden -func (n *node) IsHidden() bool { - _, name := path.Split(n.Path) - - return name[0] == '_' -} - -// IsPermanent function checks if the node is a permanent one. -func (n *node) IsPermanent() bool { - // we use a uninitialized time.Time to indicate the node is a - // permanent one. - // the uninitialized time.Time should equal zero. - return n.ExpireTime.IsZero() -} - -// IsDir function checks whether the node is a directory. -// If the node is a directory, the function will return true. -// Otherwise the function will return false. -func (n *node) IsDir() bool { - return n.Children != nil -} - -// Read function gets the value of the node. -// If the receiver node is not a key-value pair, a "Not A File" error will be returned. -func (n *node) Read() (string, *v2error.Error) { - if n.IsDir() { - return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) - } - - return n.Value, nil -} - -// Write function set the value of the node to the given value. -// If the receiver node is a directory, a "Not A File" error will be returned. -func (n *node) Write(value string, index uint64) *v2error.Error { - if n.IsDir() { - return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) - } - - n.Value = value - n.ModifiedIndex = index - - return nil -} - -func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) { - if !n.IsPermanent() { - /* compute ttl as: - ceiling( (expireTime - timeNow) / nanosecondsPerSecond ) - which ranges from 1..n - rather than as: - ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1 - which ranges 1..n+1 - */ - ttlN := n.ExpireTime.Sub(clock.Now()) - ttl := ttlN / time.Second - if (ttlN % time.Second) > 0 { - ttl++ - } - t := n.ExpireTime.UTC() - return &t, int64(ttl) - } - return nil, 0 -} - -// List function return a slice of nodes under the receiver node. -// If the receiver node is not a directory, a "Not A Directory" error will be returned. -func (n *node) List() ([]*node, *v2error.Error) { - if !n.IsDir() { - return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) - } - - nodes := make([]*node, len(n.Children)) - - i := 0 - for _, node := range n.Children { - nodes[i] = node - i++ - } - - return nodes, nil -} - -// GetChild function returns the child node under the directory node. -// On success, it returns the file node -func (n *node) GetChild(name string) (*node, *v2error.Error) { - if !n.IsDir() { - return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex) - } - - child, ok := n.Children[name] - - if ok { - return child, nil - } - - return nil, nil -} - -// Add function adds a node to the receiver node. -// If the receiver is not a directory, a "Not A Directory" error will be returned. -// If there is an existing node with the same name under the directory, a "Already Exist" -// error will be returned -func (n *node) Add(child *node) *v2error.Error { - if !n.IsDir() { - return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) - } - - _, name := path.Split(child.Path) - - if _, ok := n.Children[name]; ok { - return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex) - } - - n.Children[name] = child - - return nil -} - -// Remove function remove the node. -func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error { - if !n.IsDir() { // key-value pair - _, name := path.Split(n.Path) - - // find its parent and remove the node from the map - if n.Parent != nil && n.Parent.Children[name] == n { - delete(n.Parent.Children, name) - } - - if callback != nil { - callback(n.Path) - } - - if !n.IsPermanent() { - n.store.ttlKeyHeap.remove(n) - } - - return nil - } - - if !dir { - // cannot delete a directory without dir set to true - return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex) - } - - if len(n.Children) != 0 && !recursive { - // cannot delete a directory if it is not empty and the operation - // is not recursive - return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) - } - - for _, child := range n.Children { // delete all children - child.Remove(true, true, callback) - } - - // delete self - _, name := path.Split(n.Path) - if n.Parent != nil && n.Parent.Children[name] == n { - delete(n.Parent.Children, name) - - if callback != nil { - callback(n.Path) - } - - if !n.IsPermanent() { - n.store.ttlKeyHeap.remove(n) - } - } - - return nil -} - -func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern { - if n.IsDir() { - node := &NodeExtern{ - Key: n.Path, - Dir: true, - ModifiedIndex: n.ModifiedIndex, - CreatedIndex: n.CreatedIndex, - } - node.Expiration, node.TTL = n.expirationAndTTL(clock) - - if !recursive { - return node - } - - children, _ := n.List() - node.Nodes = make(NodeExterns, len(children)) - - // we do not use the index in the children slice directly - // we need to skip the hidden one - i := 0 - - for _, child := range children { - - if child.IsHidden() { // get will not list hidden node - continue - } - - node.Nodes[i] = child.Repr(recursive, sorted, clock) - - i++ - } - - // eliminate hidden nodes - node.Nodes = node.Nodes[:i] - if sorted { - sort.Sort(node.Nodes) - } - - return node - } - - // since n.Value could be changed later, so we need to copy the value out - value := n.Value - node := &NodeExtern{ - Key: n.Path, - Value: &value, - ModifiedIndex: n.ModifiedIndex, - CreatedIndex: n.CreatedIndex, - } - node.Expiration, node.TTL = n.expirationAndTTL(clock) - return node -} - -func (n *node) UpdateTTL(expireTime time.Time) { - if !n.IsPermanent() { - if expireTime.IsZero() { - // from ttl to permanent - n.ExpireTime = expireTime - // remove from ttl heap - n.store.ttlKeyHeap.remove(n) - return - } - - // update ttl - n.ExpireTime = expireTime - // update ttl heap - n.store.ttlKeyHeap.update(n) - return - } - - if expireTime.IsZero() { - return - } - - // from permanent to ttl - n.ExpireTime = expireTime - // push into ttl heap - n.store.ttlKeyHeap.push(n) -} - -// Compare function compares node index and value with provided ones. -// second result value explains result and equals to one of Compare.. constants -func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { - indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex - valueMatch := prevValue == "" || n.Value == prevValue - ok = valueMatch && indexMatch - switch { - case valueMatch && indexMatch: - which = CompareMatch - case indexMatch && !valueMatch: - which = CompareValueNotMatch - case valueMatch && !indexMatch: - which = CompareIndexNotMatch - default: - which = CompareNotMatch - } - return ok, which -} - -// Clone function clone the node recursively and return the new node. -// If the node is a directory, it will clone all the content under this directory. -// If the node is a key-value pair, it will clone the pair. -func (n *node) Clone() *node { - if !n.IsDir() { - newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime) - newkv.ModifiedIndex = n.ModifiedIndex - return newkv - } - - clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime) - clone.ModifiedIndex = n.ModifiedIndex - - for key, child := range n.Children { - clone.Children[key] = child.Clone() - } - - return clone -} - -// recoverAndclean function help to do recovery. -// Two things need to be done: 1. recovery structure; 2. delete expired nodes -// -// If the node is a directory, it will help recover children's parent pointer and recursively -// call this function on its children. -// We check the expire last since we need to recover the whole structure first and add all the -// notifications into the event history. -func (n *node) recoverAndclean() { - if n.IsDir() { - for _, child := range n.Children { - child.Parent = n - child.store = n.store - child.recoverAndclean() - } - } - - if !n.ExpireTime.IsZero() { - n.store.ttlKeyHeap.push(n) - } -} diff --git a/server/etcdserver/api/v2store/node_extern.go b/server/etcdserver/api/v2store/node_extern.go deleted file mode 100644 index b3bf5f3c976..00000000000 --- a/server/etcdserver/api/v2store/node_extern.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "sort" - "time" - - "github.com/jonboulle/clockwork" -) - -// NodeExtern is the external representation of the -// internal node with additional fields -// PrevValue is the previous value of the node -// TTL is time to live in second -type NodeExtern struct { - Key string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` - Dir bool `json:"dir,omitempty"` - Expiration *time.Time `json:"expiration,omitempty"` - TTL int64 `json:"ttl,omitempty"` - Nodes NodeExterns `json:"nodes,omitempty"` - ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` - CreatedIndex uint64 `json:"createdIndex,omitempty"` -} - -func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) { - if n.IsDir() { // node is a directory - eNode.Dir = true - - children, _ := n.List() - eNode.Nodes = make(NodeExterns, len(children)) - - // we do not use the index in the children slice directly - // we need to skip the hidden one - i := 0 - - for _, child := range children { - if child.IsHidden() { // get will not return hidden nodes - continue - } - - eNode.Nodes[i] = child.Repr(recursive, sorted, clock) - i++ - } - - // eliminate hidden nodes - eNode.Nodes = eNode.Nodes[:i] - - if sorted { - sort.Sort(eNode.Nodes) - } - - } else { // node is a file - value, _ := n.Read() - eNode.Value = &value - } - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock) -} - -func (eNode *NodeExtern) Clone() *NodeExtern { - if eNode == nil { - return nil - } - nn := &NodeExtern{ - Key: eNode.Key, - Dir: eNode.Dir, - TTL: eNode.TTL, - ModifiedIndex: eNode.ModifiedIndex, - CreatedIndex: eNode.CreatedIndex, - } - if eNode.Value != nil { - s := *eNode.Value - nn.Value = &s - } - if eNode.Expiration != nil { - t := *eNode.Expiration - nn.Expiration = &t - } - if eNode.Nodes != nil { - nn.Nodes = make(NodeExterns, len(eNode.Nodes)) - for i, n := range eNode.Nodes { - nn.Nodes[i] = n.Clone() - } - } - return nn -} - -type NodeExterns []*NodeExtern - -// interfaces for sorting - -func (ns NodeExterns) Len() int { - return len(ns) -} - -func (ns NodeExterns) Less(i, j int) bool { - return ns[i].Key < ns[j].Key -} - -func (ns NodeExterns) Swap(i, j int) { - ns[i], ns[j] = ns[j], ns[i] -} diff --git a/server/etcdserver/api/v2store/node_extern_test.go b/server/etcdserver/api/v2store/node_extern_test.go deleted file mode 100644 index 2a73496d63a..00000000000 --- a/server/etcdserver/api/v2store/node_extern_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNodeExternClone(t *testing.T) { - var eNode *NodeExtern - if g := eNode.Clone(); g != nil { - t.Fatalf("nil.Clone=%v, want nil", g) - } - - const ( - key string = "/foo/bar" - ttl int64 = 123456789 - ci uint64 = 123 - mi uint64 = 321 - ) - var ( - val = "some_data" - valp = &val - exp = time.Unix(12345, 67890) - expp = &exp - child = NodeExtern{} - childp = &child - childs = []*NodeExtern{childp} - ) - - eNode = &NodeExtern{ - Key: key, - TTL: ttl, - CreatedIndex: ci, - ModifiedIndex: mi, - Value: valp, - Expiration: expp, - Nodes: childs, - } - - gNode := eNode.Clone() - // Check the clone is as expected - assert.Equal(t, gNode.Key, key) - assert.Equal(t, gNode.TTL, ttl) - assert.Equal(t, gNode.CreatedIndex, ci) - assert.Equal(t, gNode.ModifiedIndex, mi) - // values should be the same - assert.Equal(t, *gNode.Value, val) - assert.Equal(t, *gNode.Expiration, exp) - assert.Equal(t, len(gNode.Nodes), len(childs)) - assert.Equal(t, *gNode.Nodes[0], child) - // but pointers should differ - if gNode.Value == eNode.Value { - t.Fatalf("expected value pointers to differ, but got same!") - } - if gNode.Expiration == eNode.Expiration { - t.Fatalf("expected expiration pointers to differ, but got same!") - } - if sameSlice(gNode.Nodes, eNode.Nodes) { - t.Fatalf("expected nodes pointers to differ, but got same!") - } - // Original should be the same - assert.Equal(t, eNode.Key, key) - assert.Equal(t, eNode.TTL, ttl) - assert.Equal(t, eNode.CreatedIndex, ci) - assert.Equal(t, eNode.ModifiedIndex, mi) - assert.Equal(t, eNode.Value, valp) - assert.Equal(t, eNode.Expiration, expp) - if !sameSlice(eNode.Nodes, childs) { - t.Fatalf("expected nodes pointer to same, but got different!") - } - // Change the clone and ensure the original is not affected - gNode.Key = "/baz" - gNode.TTL = 0 - gNode.Nodes[0].Key = "uno" - assert.Equal(t, eNode.Key, key) - assert.Equal(t, eNode.TTL, ttl) - assert.Equal(t, eNode.CreatedIndex, ci) - assert.Equal(t, eNode.ModifiedIndex, mi) - assert.Equal(t, *eNode.Nodes[0], child) - // Change the original and ensure the clone is not affected - eNode.Key = "/wuf" - assert.Equal(t, eNode.Key, "/wuf") - assert.Equal(t, gNode.Key, "/baz") -} - -func sameSlice(a, b []*NodeExtern) bool { - va := reflect.ValueOf(a) - vb := reflect.ValueOf(b) - return va.Len() == vb.Len() && va.Pointer() == vb.Pointer() -} diff --git a/server/etcdserver/api/v2store/node_test.go b/server/etcdserver/api/v2store/node_test.go deleted file mode 100644 index ad5dd9ac54b..00000000000 --- a/server/etcdserver/api/v2store/node_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "testing" - "time" - - "github.com/jonboulle/clockwork" -) - -var ( - key, val = "foo", "bar" - val1, val2 = "bar1", "bar2" - expiration = time.Minute -) - -func TestNewKVIs(t *testing.T) { - nd := newTestNode() - - if nd.IsHidden() { - t.Errorf("nd.Hidden() = %v, want = false", nd.IsHidden()) - } - - if nd.IsPermanent() { - t.Errorf("nd.IsPermanent() = %v, want = false", nd.IsPermanent()) - } - - if nd.IsDir() { - t.Errorf("nd.IsDir() = %v, want = false", nd.IsDir()) - } -} - -func TestNewKVReadWriteCompare(t *testing.T) { - nd := newTestNode() - - if v, err := nd.Read(); v != val || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val) - } - - if err := nd.Write(val1, nd.CreatedIndex+1); err != nil { - t.Errorf("nd.Write error = %v, want = nil", err) - } else { - if v, err := nd.Read(); v != val1 || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val1) - } - } - if err := nd.Write(val2, nd.CreatedIndex+2); err != nil { - t.Errorf("nd.Write error = %v, want = nil", err) - } else { - if v, err := nd.Read(); v != val2 || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val2) - } - } - - if ok, which := nd.Compare(val2, 2); !ok || which != 0 { - t.Errorf("ok = %v and which = %d, want ok = true and which = 0", ok, which) - } -} - -func TestNewKVExpiration(t *testing.T) { - nd := newTestNode() - - if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { - t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) - } - - newExpiration := time.Hour - nd.UpdateTTL(time.Now().Add(newExpiration)) - if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { - t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) - } - if ns, err := nd.List(); ns != nil || err == nil { - t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) - } - - en := nd.Repr(false, false, clockwork.NewFakeClock()) - if en.Key != nd.Path { - t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) - } - if *(en.Value) != nd.Value { - t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) - } -} - -func TestNewKVListReprCompareClone(t *testing.T) { - nd := newTestNode() - - if ns, err := nd.List(); ns != nil || err == nil { - t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) - } - - en := nd.Repr(false, false, clockwork.NewFakeClock()) - if en.Key != nd.Path { - t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) - } - if *(en.Value) != nd.Value { - t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) - } - - cn := nd.Clone() - if cn.Path != nd.Path { - t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) - } - if cn.Value != nd.Value { - t.Errorf("cn.Value = %s, want = %s", cn.Value, nd.Value) - } -} - -func TestNewKVRemove(t *testing.T) { - nd := newTestNode() - - if v, err := nd.Read(); v != val || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val) - } - - if err := nd.Write(val1, nd.CreatedIndex+1); err != nil { - t.Errorf("nd.Write error = %v, want = nil", err) - } else { - if v, err := nd.Read(); v != val1 || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val1) - } - } - if err := nd.Write(val2, nd.CreatedIndex+2); err != nil { - t.Errorf("nd.Write error = %v, want = nil", err) - } else { - if v, err := nd.Read(); v != val2 || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val2) - } - } - - if err := nd.Remove(false, false, nil); err != nil { - t.Errorf("nd.Remove err = %v, want = nil", err) - } else { - // still readable - if v, err := nd.Read(); v != val2 || err != nil { - t.Errorf("value = %s and err = %v, want value = %s and err = nil", v, err, val2) - } - if len(nd.store.ttlKeyHeap.array) != 0 { - t.Errorf("len(nd.store.ttlKeyHeap.array) = %d, want = 0", len(nd.store.ttlKeyHeap.array)) - } - if len(nd.store.ttlKeyHeap.keyMap) != 0 { - t.Errorf("len(nd.store.ttlKeyHeap.keyMap) = %d, want = 0", len(nd.store.ttlKeyHeap.keyMap)) - } - } -} - -func TestNewDirIs(t *testing.T) { - nd, _ := newTestNodeDir() - if nd.IsHidden() { - t.Errorf("nd.Hidden() = %v, want = false", nd.IsHidden()) - } - - if nd.IsPermanent() { - t.Errorf("nd.IsPermanent() = %v, want = false", nd.IsPermanent()) - } - - if !nd.IsDir() { - t.Errorf("nd.IsDir() = %v, want = true", nd.IsDir()) - } -} - -func TestNewDirReadWriteListReprClone(t *testing.T) { - nd, _ := newTestNodeDir() - - if _, err := nd.Read(); err == nil { - t.Errorf("err = %v, want err != nil", err) - } - - if err := nd.Write(val, nd.CreatedIndex+1); err == nil { - t.Errorf("err = %v, want err != nil", err) - } - - if ns, err := nd.List(); ns == nil && err != nil { - t.Errorf("nodes = %v and err = %v, want nodes = nil and err == nil", ns, err) - } - - en := nd.Repr(false, false, clockwork.NewFakeClock()) - if en.Key != nd.Path { - t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) - } - - cn := nd.Clone() - if cn.Path != nd.Path { - t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) - } -} - -func TestNewDirExpirationTTL(t *testing.T) { - nd, _ := newTestNodeDir() - - if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { - t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) - } - - newExpiration := time.Hour - nd.UpdateTTL(time.Now().Add(newExpiration)) - if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { - t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) - } -} - -func TestNewDirChild(t *testing.T) { - nd, child := newTestNodeDir() - - if err := nd.Add(child); err != nil { - t.Errorf("nd.Add(child) err = %v, want = nil", err) - } else { - if len(nd.Children) == 0 { - t.Errorf("len(nd.Children) = %d, want = 1", len(nd.Children)) - } - } - - if err := child.Remove(true, true, nil); err != nil { - t.Errorf("child.Remove err = %v, want = nil", err) - } else { - if len(nd.Children) != 0 { - t.Errorf("len(nd.Children) = %d, want = 0", len(nd.Children)) - } - } -} - -func newTestNode() *node { - nd := newKV(newStore(), key, val, 0, nil, time.Now().Add(expiration)) - return nd -} - -func newTestNodeDir() (*node, *node) { - s := newStore() - nd := newDir(s, key, 0, nil, time.Now().Add(expiration)) - cKey, cVal := "hello", "world" - child := newKV(s, cKey, cVal, 0, nd, time.Now().Add(expiration)) - return nd, child -} diff --git a/server/etcdserver/api/v2store/stats.go b/server/etcdserver/api/v2store/stats.go deleted file mode 100644 index 9151799da7b..00000000000 --- a/server/etcdserver/api/v2store/stats.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "encoding/json" - "sync/atomic" -) - -const ( - SetSuccess = iota - SetFail - DeleteSuccess - DeleteFail - CreateSuccess - CreateFail - UpdateSuccess - UpdateFail - CompareAndSwapSuccess - CompareAndSwapFail - GetSuccess - GetFail - ExpireCount - CompareAndDeleteSuccess - CompareAndDeleteFail -) - -type Stats struct { - // Number of get requests - - GetSuccess uint64 `json:"getsSuccess"` - GetFail uint64 `json:"getsFail"` - - // Number of sets requests - - SetSuccess uint64 `json:"setsSuccess"` - SetFail uint64 `json:"setsFail"` - - // Number of delete requests - - DeleteSuccess uint64 `json:"deleteSuccess"` - DeleteFail uint64 `json:"deleteFail"` - - // Number of update requests - - UpdateSuccess uint64 `json:"updateSuccess"` - UpdateFail uint64 `json:"updateFail"` - - // Number of create requests - - CreateSuccess uint64 `json:"createSuccess"` - CreateFail uint64 `json:"createFail"` - - // Number of testAndSet requests - - CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"` - CompareAndSwapFail uint64 `json:"compareAndSwapFail"` - - // Number of compareAndDelete requests - - CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"` - CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"` - - ExpireCount uint64 `json:"expireCount"` - - Watchers uint64 `json:"watchers"` -} - -func newStats() *Stats { - s := new(Stats) - return s -} - -func (s *Stats) clone() *Stats { - return &Stats{ - GetSuccess: atomic.LoadUint64(&s.GetSuccess), - GetFail: atomic.LoadUint64(&s.GetFail), - SetSuccess: atomic.LoadUint64(&s.SetSuccess), - SetFail: atomic.LoadUint64(&s.SetFail), - DeleteSuccess: atomic.LoadUint64(&s.DeleteSuccess), - DeleteFail: atomic.LoadUint64(&s.DeleteFail), - UpdateSuccess: atomic.LoadUint64(&s.UpdateSuccess), - UpdateFail: atomic.LoadUint64(&s.UpdateFail), - CreateSuccess: atomic.LoadUint64(&s.CreateSuccess), - CreateFail: atomic.LoadUint64(&s.CreateFail), - CompareAndSwapSuccess: atomic.LoadUint64(&s.CompareAndSwapSuccess), - CompareAndSwapFail: atomic.LoadUint64(&s.CompareAndSwapFail), - CompareAndDeleteSuccess: atomic.LoadUint64(&s.CompareAndDeleteSuccess), - CompareAndDeleteFail: atomic.LoadUint64(&s.CompareAndDeleteFail), - ExpireCount: atomic.LoadUint64(&s.ExpireCount), - Watchers: atomic.LoadUint64(&s.Watchers), - } -} - -func (s *Stats) toJson() []byte { - b, _ := json.Marshal(s) - return b -} - -func (s *Stats) Inc(field int) { - switch field { - case SetSuccess: - atomic.AddUint64(&s.SetSuccess, 1) - case SetFail: - atomic.AddUint64(&s.SetFail, 1) - case CreateSuccess: - atomic.AddUint64(&s.CreateSuccess, 1) - case CreateFail: - atomic.AddUint64(&s.CreateFail, 1) - case DeleteSuccess: - atomic.AddUint64(&s.DeleteSuccess, 1) - case DeleteFail: - atomic.AddUint64(&s.DeleteFail, 1) - case GetSuccess: - atomic.AddUint64(&s.GetSuccess, 1) - case GetFail: - atomic.AddUint64(&s.GetFail, 1) - case UpdateSuccess: - atomic.AddUint64(&s.UpdateSuccess, 1) - case UpdateFail: - atomic.AddUint64(&s.UpdateFail, 1) - case CompareAndSwapSuccess: - atomic.AddUint64(&s.CompareAndSwapSuccess, 1) - case CompareAndSwapFail: - atomic.AddUint64(&s.CompareAndSwapFail, 1) - case CompareAndDeleteSuccess: - atomic.AddUint64(&s.CompareAndDeleteSuccess, 1) - case CompareAndDeleteFail: - atomic.AddUint64(&s.CompareAndDeleteFail, 1) - case ExpireCount: - atomic.AddUint64(&s.ExpireCount, 1) - } -} diff --git a/server/etcdserver/api/v2store/stats_test.go b/server/etcdserver/api/v2store/stats_test.go deleted file mode 100644 index ba684fa2376..00000000000 --- a/server/etcdserver/api/v2store/stats_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// TestStoreStatsGetSuccess ensures that a successful Get is recorded in the stats. -func TestStoreStatsGetSuccess(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.Get("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.GetSuccess, "") -} - -// TestStoreStatsGetFail ensures that a failed Get is recorded in the stats. -func TestStoreStatsGetFail(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.Get("/no_such_key", false, false) - assert.Equal(t, uint64(1), s.Stats.GetFail, "") -} - -// TestStoreStatsCreateSuccess ensures that a successful Create is recorded in the stats. -func TestStoreStatsCreateSuccess(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CreateSuccess, "") -} - -// TestStoreStatsCreateFail ensures that a failed Create is recorded in the stats. -func TestStoreStatsCreateFail(t *testing.T) { - s := newStore() - s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CreateFail, "") -} - -// TestStoreStatsUpdateSuccess ensures that a successful Update is recorded in the stats. -func TestStoreStatsUpdateSuccess(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.UpdateSuccess, "") -} - -// TestStoreStatsUpdateFail ensures that a failed Update is recorded in the stats. -func TestStoreStatsUpdateFail(t *testing.T) { - s := newStore() - s.Update("/foo", "bar", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.UpdateFail, "") -} - -// TestStoreStatsCompareAndSwapSuccess ensures that a successful CAS is recorded in the stats. -func TestStoreStatsCompareAndSwapSuccess(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.CompareAndSwap("/foo", "bar", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CompareAndSwapSuccess, "") -} - -// TestStoreStatsCompareAndSwapFail ensures that a failed CAS is recorded in the stats. -func TestStoreStatsCompareAndSwapFail(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.CompareAndSwap("/foo", "wrong_value", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CompareAndSwapFail, "") -} - -// TestStoreStatsDeleteSuccess ensures that a successful Delete is recorded in the stats. -func TestStoreStatsDeleteSuccess(t *testing.T) { - s := newStore() - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.Delete("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.DeleteSuccess, "") -} - -// TestStoreStatsDeleteFail ensures that a failed Delete is recorded in the stats. -func TestStoreStatsDeleteFail(t *testing.T) { - s := newStore() - s.Delete("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.DeleteFail, "") -} - -// TestStoreStatsExpireCount ensures that the number of expirations is recorded in the stats. -func TestStoreStatsExpireCount(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - assert.Equal(t, uint64(0), s.Stats.ExpireCount, "") - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - assert.Equal(t, uint64(1), s.Stats.ExpireCount, "") -} diff --git a/server/etcdserver/api/v2store/store.go b/server/etcdserver/api/v2store/store.go deleted file mode 100644 index 32cb26ad964..00000000000 --- a/server/etcdserver/api/v2store/store.go +++ /dev/null @@ -1,799 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "encoding/json" - "fmt" - "path" - "strconv" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "github.com/jonboulle/clockwork" -) - -// The default version to set when the store is first initialized. -const defaultVersion = 2 - -var minExpireTime time.Time - -func init() { - minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") -} - -type Store interface { - Version() int - Index() uint64 - - Get(nodePath string, recursive, sorted bool) (*Event, error) - Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) - Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) - Create(nodePath string, dir bool, value string, unique bool, - expireOpts TTLOptionSet) (*Event, error) - CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, - value string, expireOpts TTLOptionSet) (*Event, error) - Delete(nodePath string, dir, recursive bool) (*Event, error) - CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) - - Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error) - - Save() ([]byte, error) - Recovery(state []byte) error - - Clone() Store - SaveNoCopy() ([]byte, error) - - JsonStats() []byte - DeleteExpiredKeys(cutoff time.Time) - - HasTTLKeys() bool -} - -type TTLOptionSet struct { - ExpireTime time.Time - Refresh bool -} - -type store struct { - Root *node - WatcherHub *watcherHub - CurrentIndex uint64 - Stats *Stats - CurrentVersion int - ttlKeyHeap *ttlKeyHeap // need to recovery manually - worldLock sync.RWMutex // stop the world lock - clock clockwork.Clock - readonlySet types.Set -} - -// New creates a store where the given namespaces will be created as initial directories. -func New(namespaces ...string) Store { - s := newStore(namespaces...) - s.clock = clockwork.NewRealClock() - return s -} - -func newStore(namespaces ...string) *store { - s := new(store) - s.CurrentVersion = defaultVersion - s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent) - for _, namespace := range namespaces { - s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent)) - } - s.Stats = newStats() - s.WatcherHub = newWatchHub(1000) - s.ttlKeyHeap = newTtlKeyHeap() - s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...) - return s -} - -// Version retrieves current version of the store. -func (s *store) Version() int { - return s.CurrentVersion -} - -// Index retrieves the current index of the store. -func (s *store) Index() uint64 { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - return s.CurrentIndex -} - -// Get returns a get event. -// If recursive is true, it will return all the content under the node path. -// If sorted is true, it will sort the content by keys. -func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { - var err *v2error.Error - - s.worldLock.RLock() - defer s.worldLock.RUnlock() - - defer func() { - if err == nil { - s.Stats.Inc(GetSuccess) - if recursive { - reportReadSuccess(GetRecursive) - } else { - reportReadSuccess(Get) - } - return - } - - s.Stats.Inc(GetFail) - if recursive { - reportReadFailure(GetRecursive) - } else { - reportReadFailure(Get) - } - }() - - n, err := s.internalGet(nodePath) - if err != nil { - return nil, err - } - - e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.Node.loadInternalNode(n, recursive, sorted, s.clock) - - return e, nil -} - -// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl. -// If the node has already existed, create will fail. -// If any node on the path is a file, create will fail. -func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) { - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CreateSuccess) - reportWriteSuccess(Create) - return - } - - s.Stats.Inc(CreateFail) - reportWriteFailure(Create) - }() - - e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create) - if err != nil { - return nil, err - } - - e.EtcdIndex = s.CurrentIndex - s.WatcherHub.notify(e) - - return e, nil -} - -// Set creates or replace the node at nodePath. -func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(SetSuccess) - reportWriteSuccess(Set) - return - } - - s.Stats.Inc(SetFail) - reportWriteFailure(Set) - }() - - // Get prevNode value - n, getErr := s.internalGet(nodePath) - if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound { - err = getErr - return nil, err - } - - if expireOpts.Refresh { - if getErr != nil { - err = getErr - return nil, err - } - value = n.Value - } - - // Set new value - e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set) - if err != nil { - return nil, err - } - e.EtcdIndex = s.CurrentIndex - - // Put prevNode into event - if getErr == nil { - prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) - prev.Node.loadInternalNode(n, false, false, s.clock) - e.PrevNode = prev.Node - } - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - return e, nil -} - -// returns user-readable cause of failed comparison -func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string { - switch which { - case CompareIndexNotMatch: - return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex) - case CompareValueNotMatch: - return fmt.Sprintf("[%v != %v]", prevValue, n.Value) - default: - return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex) - } -} - -func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, - value string, expireOpts TTLOptionSet) (*Event, error) { - - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CompareAndSwapSuccess) - reportWriteSuccess(CompareAndSwap) - return - } - - s.Stats.Inc(CompareAndSwapFail) - reportWriteFailure(CompareAndSwap) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) - } - - n, err := s.internalGet(nodePath) - if err != nil { - return nil, err - } - if n.IsDir() { // can only compare and swap file - err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) - return nil, err - } - - // If both of the prevValue and prevIndex are given, we will test both of them. - // Command will be executed, only if both of the tests are successful. - if ok, which := n.Compare(prevValue, prevIndex); !ok { - cause := getCompareFailCause(n, which, prevValue, prevIndex) - err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) - return nil, err - } - - if expireOpts.Refresh { - value = n.Value - } - - // update etcd index - s.CurrentIndex++ - - e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - // if test succeed, write the value - if err := n.Write(value, s.CurrentIndex); err != nil { - return nil, err - } - n.UpdateTTL(expireOpts.ExpireTime) - - // copy the value for safety - valueCopy := value - eNode.Value = &valueCopy - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - return e, nil -} - -// Delete deletes the node at the given path. -// If the node is a directory, recursive must be true to delete it. -func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(DeleteSuccess) - reportWriteSuccess(Delete) - return - } - - s.Stats.Inc(DeleteFail) - reportWriteFailure(Delete) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) - } - - // recursive implies dir - if recursive { - dir = true - } - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - - nextIndex := s.CurrentIndex + 1 - e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex) - e.EtcdIndex = nextIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - if n.IsDir() { - eNode.Dir = true - } - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - err = n.Remove(dir, recursive, callback) - if err != nil { - return nil, err - } - - // update etcd index - s.CurrentIndex++ - - s.WatcherHub.notify(e) - - return e, nil -} - -func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) { - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CompareAndDeleteSuccess) - reportWriteSuccess(CompareAndDelete) - return - } - - s.Stats.Inc(CompareAndDeleteFail) - reportWriteFailure(CompareAndDelete) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - if n.IsDir() { // can only compare and delete file - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) - } - - // If both of the prevValue and prevIndex are given, we will test both of them. - // Command will be executed, only if both of the tests are successful. - if ok, which := n.Compare(prevValue, prevIndex); !ok { - cause := getCompareFailCause(n, which, prevValue, prevIndex) - return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) - } - - // update etcd index - s.CurrentIndex++ - - e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = n.Repr(false, false, s.clock) - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - err = n.Remove(false, false, callback) - if err != nil { - return nil, err - } - - s.WatcherHub.notify(e) - - return e, nil -} - -func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - - key = path.Clean(path.Join("/", key)) - if sinceIndex == 0 { - sinceIndex = s.CurrentIndex + 1 - } - // WatcherHub does not know about the current index, so we need to pass it in - w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex) - if err != nil { - return nil, err - } - - return w, nil -} - -// walk walks all the nodePath and apply the walkFunc on each directory -func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) { - components := strings.Split(nodePath, "/") - - curr := s.Root - var err *v2error.Error - - for i := 1; i < len(components); i++ { - if len(components[i]) == 0 { // ignore empty string - return curr, nil - } - - curr, err = walkFunc(curr, components[i]) - if err != nil { - return nil, err - } - } - - return curr, nil -} - -// Update updates the value/ttl of the node. -// If the node is a file, the value and the ttl can be updated. -// If the node is a directory, only the ttl can be updated. -func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) { - var err *v2error.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(UpdateSuccess) - reportWriteSuccess(Update) - return - } - - s.Stats.Inc(UpdateFail) - reportWriteFailure(Update) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) - } - - currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - if n.IsDir() && len(newValue) != 0 { - // if the node is a directory, we cannot update value to non-empty - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) - } - - if expireOpts.Refresh { - newValue = n.Value - } - - e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex) - e.EtcdIndex = nextIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - if err := n.Write(newValue, nextIndex); err != nil { - return nil, fmt.Errorf("nodePath %v : %v", nodePath, err) - } - - if n.IsDir() { - eNode.Dir = true - } else { - // copy the value for safety - newValueCopy := newValue - eNode.Value = &newValueCopy - } - - // update ttl - n.UpdateTTL(expireOpts.ExpireTime) - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - s.CurrentIndex = nextIndex - - return e, nil -} - -func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, - expireTime time.Time, action string) (*Event, *v2error.Error) { - - currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 - - if unique { // append unique item under the node path - nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10)) - } - - nodePath = path.Clean(path.Join("/", nodePath)) - - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex) - } - - // Assume expire times that are way in the past are - // This can occur when the time is serialized to JS - if expireTime.Before(minExpireTime) { - expireTime = Permanent - } - - dirName, nodeName := path.Split(nodePath) - - // walk through the nodePath, create dirs and get the last directory node - d, err := s.walk(dirName, s.checkDir) - - if err != nil { - s.Stats.Inc(SetFail) - reportWriteFailure(action) - err.Index = currIndex - return nil, err - } - - e := newEvent(action, nodePath, nextIndex, nextIndex) - eNode := e.Node - - n, _ := d.GetChild(nodeName) - - // force will try to replace an existing file - if n != nil { - if replace { - if n.IsDir() { - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) - } - e.PrevNode = n.Repr(false, false, s.clock) - - if err := n.Remove(false, false, nil); err != nil { - return nil, err - } - } else { - return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex) - } - } - - if !dir { // create file - // copy the value for safety - valueCopy := value - eNode.Value = &valueCopy - - n = newKV(s, nodePath, value, nextIndex, d, expireTime) - - } else { // create directory - eNode.Dir = true - - n = newDir(s, nodePath, nextIndex, d, expireTime) - } - - // we are sure d is a directory and does not have the children with name n.Name - if err := d.Add(n); err != nil { - return nil, err - } - - // node with TTL - if !n.IsPermanent() { - s.ttlKeyHeap.push(n) - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - } - - s.CurrentIndex = nextIndex - - return e, nil -} - -// InternalGet gets the node of the given nodePath. -func (s *store) internalGet(nodePath string) (*node, *v2error.Error) { - nodePath = path.Clean(path.Join("/", nodePath)) - - walkFunc := func(parent *node, name string) (*node, *v2error.Error) { - - if !parent.IsDir() { - err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex) - return nil, err - } - - child, ok := parent.Children[name] - if ok { - return child, nil - } - - return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) - } - - f, err := s.walk(nodePath, walkFunc) - - if err != nil { - return nil, err - } - return f, nil -} - -// DeleteExpiredKeys will delete all expired keys -func (s *store) DeleteExpiredKeys(cutoff time.Time) { - s.worldLock.Lock() - defer s.worldLock.Unlock() - - for { - node := s.ttlKeyHeap.top() - if node == nil || node.ExpireTime.After(cutoff) { - break - } - - s.CurrentIndex++ - e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = node.Repr(false, false, s.clock) - if node.IsDir() { - e.Node.Dir = true - } - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - s.ttlKeyHeap.pop() - node.Remove(true, true, callback) - - reportExpiredKey() - s.Stats.Inc(ExpireCount) - - s.WatcherHub.notify(e) - } - -} - -// checkDir will check whether the component is a directory under parent node. -// If it is a directory, this function will return the pointer to that node. -// If it does not exist, this function will create a new directory and return the pointer to that node. -// If it is a file, this function will return error. -func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) { - node, ok := parent.Children[dirName] - - if ok { - if node.IsDir() { - return node, nil - } - - return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex) - } - - n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent) - - parent.Children[dirName] = n - - return n, nil -} - -// Save saves the static state of the store system. -// It will not be able to save the state of watchers. -// It will not save the parent field of the node. Or there will -// be cyclic dependencies issue for the json package. -func (s *store) Save() ([]byte, error) { - b, err := json.Marshal(s.Clone()) - if err != nil { - return nil, err - } - - return b, nil -} - -func (s *store) SaveNoCopy() ([]byte, error) { - b, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return b, nil -} - -func (s *store) Clone() Store { - s.worldLock.RLock() - - clonedStore := newStore() - clonedStore.CurrentIndex = s.CurrentIndex - clonedStore.Root = s.Root.Clone() - clonedStore.WatcherHub = s.WatcherHub.clone() - clonedStore.Stats = s.Stats.clone() - clonedStore.CurrentVersion = s.CurrentVersion - - s.worldLock.RUnlock() - return clonedStore -} - -// Recovery recovers the store system from a static state -// It needs to recover the parent field of the nodes. -// It needs to delete the expired nodes since the saved time and also -// needs to create monitoring goroutines. -func (s *store) Recovery(state []byte) error { - s.worldLock.Lock() - defer s.worldLock.Unlock() - err := json.Unmarshal(state, s) - - if err != nil { - return err - } - - s.ttlKeyHeap = newTtlKeyHeap() - - s.Root.recoverAndclean() - return nil -} - -func (s *store) JsonStats() []byte { - s.Stats.Watchers = uint64(s.WatcherHub.count) - return s.Stats.toJson() -} - -func (s *store) HasTTLKeys() bool { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - return s.ttlKeyHeap.Len() != 0 -} diff --git a/server/etcdserver/api/v2store/store_bench_test.go b/server/etcdserver/api/v2store/store_bench_test.go deleted file mode 100644 index f8f939aff19..00000000000 --- a/server/etcdserver/api/v2store/store_bench_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "encoding/json" - "fmt" - "runtime" - "testing" -) - -func BenchmarkStoreSet128Bytes(b *testing.B) { - benchStoreSet(b, 128, nil) -} - -func BenchmarkStoreSet1024Bytes(b *testing.B) { - benchStoreSet(b, 1024, nil) -} - -func BenchmarkStoreSet4096Bytes(b *testing.B) { - benchStoreSet(b, 4096, nil) -} - -func BenchmarkStoreSetWithJson128Bytes(b *testing.B) { - benchStoreSet(b, 128, json.Marshal) -} - -func BenchmarkStoreSetWithJson1024Bytes(b *testing.B) { - benchStoreSet(b, 1024, json.Marshal) -} - -func BenchmarkStoreSetWithJson4096Bytes(b *testing.B) { - benchStoreSet(b, 4096, json.Marshal) -} - -func BenchmarkStoreDelete(b *testing.B) { - b.StopTimer() - - s := newStore() - kvs, _ := generateNRandomKV(b.N, 128) - - memStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(memStats) - - for i := 0; i < b.N; i++ { - _, err := s.Set(kvs[i][0], false, kvs[i][1], TTLOptionSet{ExpireTime: Permanent}) - if err != nil { - panic(err) - } - } - - setMemStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(setMemStats) - - b.StartTimer() - - for i := range kvs { - s.Delete(kvs[i][0], false, false) - } - - b.StopTimer() - - // clean up - e, err := s.Get("/", false, false) - if err != nil { - panic(err) - } - - for _, n := range e.Node.Nodes { - _, err := s.Delete(n.Key, true, true) - if err != nil { - panic(err) - } - } - s.WatcherHub.EventHistory = nil - - deleteMemStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(deleteMemStats) - - fmt.Printf("\nBefore set Alloc: %v; After set Alloc: %v, After delete Alloc: %v\n", - memStats.Alloc/1000, setMemStats.Alloc/1000, deleteMemStats.Alloc/1000) -} - -func BenchmarkWatch(b *testing.B) { - b.StopTimer() - s := newStore() - kvs, _ := generateNRandomKV(b.N, 128) - b.StartTimer() - - memStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(memStats) - - for i := 0; i < b.N; i++ { - w, _ := s.Watch(kvs[i][0], false, false, 0) - - e := newEvent("set", kvs[i][0], uint64(i+1), uint64(i+1)) - s.WatcherHub.notify(e) - <-w.EventChan() - s.CurrentIndex++ - } - - s.WatcherHub.EventHistory = nil - afterMemStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(afterMemStats) - fmt.Printf("\nBefore Alloc: %v; After Alloc: %v\n", - memStats.Alloc/1000, afterMemStats.Alloc/1000) -} - -func BenchmarkWatchWithSet(b *testing.B) { - b.StopTimer() - s := newStore() - kvs, _ := generateNRandomKV(b.N, 128) - b.StartTimer() - - for i := 0; i < b.N; i++ { - w, _ := s.Watch(kvs[i][0], false, false, 0) - - s.Set(kvs[i][0], false, "test", TTLOptionSet{ExpireTime: Permanent}) - <-w.EventChan() - } -} - -func BenchmarkWatchWithSetBatch(b *testing.B) { - b.StopTimer() - s := newStore() - kvs, _ := generateNRandomKV(b.N, 128) - b.StartTimer() - - watchers := make([]Watcher, b.N) - - for i := 0; i < b.N; i++ { - watchers[i], _ = s.Watch(kvs[i][0], false, false, 0) - } - - for i := 0; i < b.N; i++ { - s.Set(kvs[i][0], false, "test", TTLOptionSet{ExpireTime: Permanent}) - } - - for i := 0; i < b.N; i++ { - <-watchers[i].EventChan() - } - -} - -func BenchmarkWatchOneKey(b *testing.B) { - s := newStore() - watchers := make([]Watcher, b.N) - - for i := 0; i < b.N; i++ { - watchers[i], _ = s.Watch("/foo", false, false, 0) - } - - s.Set("/foo", false, "", TTLOptionSet{ExpireTime: Permanent}) - - for i := 0; i < b.N; i++ { - <-watchers[i].EventChan() - } -} - -func benchStoreSet(b *testing.B, valueSize int, process func(interface{}) ([]byte, error)) { - s := newStore() - b.StopTimer() - kvs, size := generateNRandomKV(b.N, valueSize) - b.StartTimer() - - for i := 0; i < b.N; i++ { - resp, err := s.Set(kvs[i][0], false, kvs[i][1], TTLOptionSet{ExpireTime: Permanent}) - if err != nil { - panic(err) - } - - if process != nil { - _, err = process(resp) - if err != nil { - panic(err) - } - } - } - - b.StopTimer() - memStats := new(runtime.MemStats) - runtime.GC() - runtime.ReadMemStats(memStats) - fmt.Printf("\nAlloc: %vKB; Data: %vKB; Kvs: %v; Alloc/Data:%v\n", - memStats.Alloc/1000, size/1000, b.N, memStats.Alloc/size) -} - -func generateNRandomKV(n int, valueSize int) ([][]string, uint64) { - var size uint64 - kvs := make([][]string, n) - bytes := make([]byte, valueSize) - - for i := 0; i < n; i++ { - kvs[i] = make([]string, 2) - kvs[i][0] = fmt.Sprintf("/%010d/%010d/%010d", n, n, n) - kvs[i][1] = string(bytes) - size = size + uint64(len(kvs[i][0])) + uint64(len(kvs[i][1])) - } - - return kvs, size -} diff --git a/server/etcdserver/api/v2store/store_ttl_test.go b/server/etcdserver/api/v2store/store_ttl_test.go deleted file mode 100644 index 7da08f229c8..00000000000 --- a/server/etcdserver/api/v2store/store_ttl_test.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "github.com/jonboulle/clockwork" -) - -// TestMinExpireTime ensures that any TTL <= minExpireTime becomes Permanent -func TestMinExpireTime(t *testing.T) { - s := newStore() - fc := clockwork.NewFakeClock() - s.clock = fc - // FakeClock starts at 0, so minExpireTime should be far in the future.. but just in case - testutil.AssertTrue(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!") - s.Create("/foo", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(3 * time.Second)}) - fc.Advance(5 * time.Second) - // Ensure it hasn't expired - s.DeleteExpiredKeys(fc.Now()) - var eidx uint64 = 1 - e, err := s.Get("/foo", true, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "get") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, e.Node.TTL, int64(0)) -} - -// TestStoreGetDirectory ensures that the store can recursively retrieve a directory listing. -// Note that hidden files should not be returned. -func TestStoreGetDirectory(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/bar", false, "X", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/_hidden", false, "*", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/baz", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/baz/bat", false, "Y", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/baz/_hidden", false, "*", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/baz/ttl", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(time.Second * 3)}) - var eidx uint64 = 7 - e, err := s.Get("/foo", true, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "get") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, len(e.Node.Nodes), 2) - var bazNodes NodeExterns - for _, node := range e.Node.Nodes { - switch node.Key { - case "/foo/bar": - assert.Equal(t, *node.Value, "X") - assert.Equal(t, node.Dir, false) - case "/foo/baz": - assert.Equal(t, node.Dir, true) - assert.Equal(t, len(node.Nodes), 2) - bazNodes = node.Nodes - default: - t.Errorf("key = %s, not matched", node.Key) - } - } - for _, node := range bazNodes { - switch node.Key { - case "/foo/baz/bat": - assert.Equal(t, *node.Value, "Y") - assert.Equal(t, node.Dir, false) - case "/foo/baz/ttl": - assert.Equal(t, *node.Value, "Y") - assert.Equal(t, node.Dir, false) - assert.Equal(t, node.TTL, int64(3)) - default: - t.Errorf("key = %s, not matched", node.Key) - } - } -} - -// TestStoreUpdateValueTTL ensures that the store can update the TTL on a value. -func TestStoreUpdateValueTTL(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - _, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - testutil.AssertNil(t, err) - e, _ := s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz") - assert.Equal(t, e.EtcdIndex, eidx) - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - e, err = s.Get("/foo", false, false) - testutil.AssertNil(t, e) - assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound) -} - -// TestStoreUpdateDirTTL ensures that the store can update the TTL on a directory. -func TestStoreUpdateDirTTL(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 = 3 - _, err := s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - testutil.AssertNil(t, err) - _, err = s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) - testutil.AssertNil(t, err) - e, err := s.Update("/foo/bar", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - testutil.AssertNil(t, err) - assert.Equal(t, e.Node.Dir, false) - assert.Equal(t, e.EtcdIndex, eidx) - e, _ = s.Get("/foo/bar", false, false) - assert.Equal(t, *e.Node.Value, "") - assert.Equal(t, e.EtcdIndex, eidx) - - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - e, err = s.Get("/foo/bar", false, false) - testutil.AssertNil(t, e) - assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound) -} - -// TestStoreWatchExpire ensures that the store can watch for key expiration. -func TestStoreWatchExpire(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 = 3 - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(400 * time.Millisecond)}) - s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(450 * time.Millisecond)}) - s.Create("/foodir", true, "", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - - w, _ := s.Watch("/", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - c := w.EventChan() - e := nbselect(c) - testutil.AssertNil(t, e) - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - eidx = 4 - e = nbselect(c) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foo") - w, _ = s.Watch("/", true, false, 5) - eidx = 6 - assert.Equal(t, w.StartIndex(), eidx) - e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foofoo") - w, _ = s.Watch("/", true, false, 6) - e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foodir") - assert.Equal(t, e.Node.Dir, true) -} - -// TestStoreWatchExpireRefresh ensures that the store can watch for key expiration when refreshing. -func TestStoreWatchExpireRefresh(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(1200 * time.Millisecond), Refresh: true}) - - // Make sure we set watch updates when Refresh is true for newly created keys - w, _ := s.Watch("/", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - c := w.EventChan() - e := nbselect(c) - testutil.AssertNil(t, e) - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - eidx = 3 - e = nbselect(c) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foo") - - s.Update("/foofoo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - w, _ = s.Watch("/", true, false, 4) - fc.Advance(700 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - eidx = 5 // We should skip 4 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true - assert.Equal(t, w.StartIndex(), eidx-1) - e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foofoo") -} - -// TestStoreWatchExpireEmptyRefresh ensures that the store can watch for key expiration when refreshing with an empty value. -func TestStoreWatchExpireEmptyRefresh(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - // Should be no-op - fc.Advance(200 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - - s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - w, _ := s.Watch("/", true, false, 2) - fc.Advance(700 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - eidx = 3 // We should skip 2 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true - assert.Equal(t, w.StartIndex(), eidx-1) - e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") -} - -// TestStoreWatchNoRefresh updates TTL of a key (set TTLOptionSet.Refresh to false) and send notification -func TestStoreWatchNoRefresh(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - var eidx uint64 - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - // Should be no-op - fc.Advance(200 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - - // Update key's TTL with setting `TTLOptionSet.Refresh` to false will cause an update event - s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: false}) - w, _ := s.Watch("/", true, false, 2) - fc.Advance(700 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - eidx = 2 - assert.Equal(t, w.StartIndex(), eidx) - e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") -} - -// TestStoreRefresh ensures that the store can update the TTL on a value with refresh. -func TestStoreRefresh(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - s.Create("/bar", true, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - s.Create("/bar/z", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - _, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - testutil.AssertNil(t, err) - - _, err = s.Set("/foo", false, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - testutil.AssertNil(t, err) - - _, err = s.Update("/bar/z", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - testutil.AssertNil(t, err) - - _, err = s.CompareAndSwap("/foo", "bar", 0, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - testutil.AssertNil(t, err) -} - -// TestStoreRecoverWithExpiration ensures that the store can recover from a previously saved state that includes an expiring key. -func TestStoreRecoverWithExpiration(t *testing.T) { - s := newStore() - s.clock = newFakeClock() - - fc := newFakeClock() - - var eidx uint64 = 4 - s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)}) - b, err := s.Save() - testutil.AssertNil(t, err) - - time.Sleep(10 * time.Millisecond) - - s2 := newStore() - s2.clock = fc - - s2.Recovery(b) - - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - - e, err := s.Get("/foo/x", false, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, *e.Node.Value, "bar") - - e, err = s.Get("/foo/y", false, false) - testutil.AssertNotNil(t, err) - testutil.AssertNil(t, e) -} - -// TestStoreWatchExpireWithHiddenKey ensures that the store doesn't see expirations of hidden keys. -func TestStoreWatchExpireWithHiddenKey(t *testing.T) { - s := newStore() - fc := newFakeClock() - s.clock = fc - - s.Create("/_foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - s.Create("/foofoo", false, "barbarbar", false, TTLOptionSet{ExpireTime: fc.Now().Add(time.Second)}) - - w, _ := s.Watch("/", true, false, 0) - c := w.EventChan() - e := nbselect(c) - testutil.AssertNil(t, e) - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - e = nbselect(c) - testutil.AssertNil(t, e) - fc.Advance(600 * time.Millisecond) - s.DeleteExpiredKeys(fc.Now()) - e = nbselect(c) - assert.Equal(t, e.Action, "expire") - assert.Equal(t, e.Node.Key, "/foofoo") -} - -// newFakeClock creates a new FakeClock that has been advanced to at least minExpireTime -func newFakeClock() clockwork.FakeClock { - fc := clockwork.NewFakeClock() - for minExpireTime.After(fc.Now()) { - fc.Advance((0x1 << 62) * time.Nanosecond) - } - return fc -} - -// Performs a non-blocking select on an event channel. -func nbselect(c <-chan *Event) *Event { - select { - case e := <-c: - return e - default: - return nil - } -} diff --git a/server/etcdserver/api/v2store/watcher.go b/server/etcdserver/api/v2store/watcher.go deleted file mode 100644 index 4b1e846a2f9..00000000000 --- a/server/etcdserver/api/v2store/watcher.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -type Watcher interface { - EventChan() chan *Event - StartIndex() uint64 // The EtcdIndex at which the Watcher was created - Remove() -} - -type watcher struct { - eventChan chan *Event - stream bool - recursive bool - sinceIndex uint64 - startIndex uint64 - hub *watcherHub - removed bool - remove func() -} - -func (w *watcher) EventChan() chan *Event { - return w.eventChan -} - -func (w *watcher) StartIndex() uint64 { - return w.startIndex -} - -// notify function notifies the watcher. If the watcher interests in the given path, -// the function will return true. -func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool { - // watcher is interested the path in three cases and under one condition - // the condition is that the event happens after the watcher's sinceIndex - - // 1. the path at which the event happens is the path the watcher is watching at. - // For example if the watcher is watching at "/foo" and the event happens at "/foo", - // the watcher must be interested in that event. - - // 2. the watcher is a recursive watcher, it interests in the event happens after - // its watching path. For example if watcher A watches at "/foo" and it is a recursive - // one, it will interest in the event happens at "/foo/bar". - - // 3. when we delete a directory, we need to force notify all the watchers who watches - // at the file we need to delete. - // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher - // should get notified even if "/foo" is not the path it is watching. - if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex { - // We cannot block here if the eventChan capacity is full, otherwise - // etcd will hang. eventChan capacity is full when the rate of - // notifications are higher than our send rate. - // If this happens, we close the channel. - select { - case w.eventChan <- e: - default: - // We have missed a notification. Remove the watcher. - // Removing the watcher also closes the eventChan. - w.remove() - } - return true - } - return false -} - -// Remove removes the watcher from watcherHub -// The actual remove function is guaranteed to only be executed once -func (w *watcher) Remove() { - w.hub.mutex.Lock() - defer w.hub.mutex.Unlock() - - close(w.eventChan) - if w.remove != nil { - w.remove() - } -} - -// nopWatcher is a watcher that receives nothing, always blocking. -type nopWatcher struct{} - -func NewNopWatcher() Watcher { return &nopWatcher{} } -func (w *nopWatcher) EventChan() chan *Event { return nil } -func (w *nopWatcher) StartIndex() uint64 { return 0 } -func (w *nopWatcher) Remove() {} diff --git a/server/etcdserver/api/v2store/watcher_hub.go b/server/etcdserver/api/v2store/watcher_hub.go deleted file mode 100644 index dc5c8f2bb57..00000000000 --- a/server/etcdserver/api/v2store/watcher_hub.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import ( - "container/list" - "path" - "strings" - "sync" - "sync/atomic" - - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" -) - -// A watcherHub contains all subscribed watchers -// watchers is a map with watched path as key and watcher as value -// EventHistory keeps the old events for watcherHub. It is used to help -// watcher to get a continuous event history. Or a watcher might miss the -// event happens between the end of the first watch command and the start -// of the second command. -type watcherHub struct { - // count must be the first element to keep 64-bit alignment for atomic - // access - - count int64 // current number of watchers. - - mutex sync.Mutex - watchers map[string]*list.List - EventHistory *EventHistory -} - -// newWatchHub creates a watcherHub. The capacity determines how many events we will -// keep in the eventHistory. -// Typically, we only need to keep a small size of history[smaller than 20K]. -// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000 -func newWatchHub(capacity int) *watcherHub { - return &watcherHub{ - watchers: make(map[string]*list.List), - EventHistory: newEventHistory(capacity), - } -} - -// Watch function returns a Watcher. -// If recursive is true, the first change after index under key will be sent to the event channel of the watcher. -// If recursive is false, the first change after index at key will be sent to the event channel of the watcher. -// If index is zero, watch will start from the current index + 1. -func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) { - reportWatchRequest() - event, err := wh.EventHistory.scan(key, recursive, index) - - if err != nil { - err.Index = storeIndex - return nil, err - } - - w := &watcher{ - eventChan: make(chan *Event, 100), // use a buffered channel - recursive: recursive, - stream: stream, - sinceIndex: index, - startIndex: storeIndex, - hub: wh, - } - - wh.mutex.Lock() - defer wh.mutex.Unlock() - // If the event exists in the known history, append the EtcdIndex and return immediately - if event != nil { - ne := event.Clone() - ne.EtcdIndex = storeIndex - w.eventChan <- ne - return w, nil - } - - l, ok := wh.watchers[key] - - var elem *list.Element - - if ok { // add the new watcher to the back of the list - elem = l.PushBack(w) - } else { // create a new list and add the new watcher - l = list.New() - elem = l.PushBack(w) - wh.watchers[key] = l - } - - w.remove = func() { - if w.removed { // avoid removing it twice - return - } - w.removed = true - l.Remove(elem) - atomic.AddInt64(&wh.count, -1) - reportWatcherRemoved() - if l.Len() == 0 { - delete(wh.watchers, key) - } - } - - atomic.AddInt64(&wh.count, 1) - reportWatcherAdded() - - return w, nil -} - -func (wh *watcherHub) add(e *Event) { - wh.EventHistory.addEvent(e) -} - -// notify function accepts an event and notify to the watchers. -func (wh *watcherHub) notify(e *Event) { - e = wh.EventHistory.addEvent(e) // add event into the eventHistory - - segments := strings.Split(e.Node.Key, "/") - - currPath := "/" - - // walk through all the segments of the path and notify the watchers - // if the path is "/foo/bar", it will notify watchers with path "/", - // "/foo" and "/foo/bar" - - for _, segment := range segments { - currPath = path.Join(currPath, segment) - // notify the watchers who interests in the changes of current path - wh.notifyWatchers(e, currPath, false) - } -} - -func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) { - wh.mutex.Lock() - defer wh.mutex.Unlock() - - l, ok := wh.watchers[nodePath] - if ok { - curr := l.Front() - - for curr != nil { - next := curr.Next() // save reference to the next one in the list - - w, _ := curr.Value.(*watcher) - - originalPath := e.Node.Key == nodePath - if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) { - if !w.stream { // do not remove the stream watcher - // if we successfully notify a watcher - // we need to remove the watcher from the list - // and decrease the counter - w.removed = true - l.Remove(curr) - atomic.AddInt64(&wh.count, -1) - reportWatcherRemoved() - } - } - - curr = next // update current to the next element in the list - } - - if l.Len() == 0 { - // if we have notified all watcher in the list - // we can delete the list - delete(wh.watchers, nodePath) - } - } -} - -// clone function clones the watcherHub and return the cloned one. -// only clone the static content. do not clone the current watchers. -func (wh *watcherHub) clone() *watcherHub { - clonedHistory := wh.EventHistory.clone() - - return &watcherHub{ - EventHistory: clonedHistory, - } -} - -// isHidden checks to see if key path is considered hidden to watch path i.e. the -// last element is hidden or it's within a hidden directory -func isHidden(watchPath, keyPath string) bool { - // When deleting a directory, watchPath might be deeper than the actual keyPath - // For example, when deleting /foo we also need to notify watchers on /foo/bar. - if len(watchPath) > len(keyPath) { - return false - } - // if watch path is just a "/", after path will start without "/" - // add a "/" to deal with the special case when watchPath is "/" - afterPath := path.Clean("/" + keyPath[len(watchPath):]) - return strings.Contains(afterPath, "/_") -} diff --git a/server/etcdserver/api/v2store/watcher_hub_test.go b/server/etcdserver/api/v2store/watcher_hub_test.go deleted file mode 100644 index 61d3f79dcbe..00000000000 --- a/server/etcdserver/api/v2store/watcher_hub_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import "testing" - -// TestIsHidden tests isHidden functions. -func TestIsHidden(t *testing.T) { - // watch at "/" - // key is "/_foo", hidden to "/" - // expected: hidden = true - watch := "/" - key := "/_foo" - hidden := isHidden(watch, key) - if !hidden { - t.Fatalf("%v should be hidden to %v\n", key, watch) - } - - // watch at "/_foo" - // key is "/_foo", not hidden to "/_foo" - // expected: hidden = false - watch = "/_foo" - hidden = isHidden(watch, key) - if hidden { - t.Fatalf("%v should not be hidden to %v\n", key, watch) - } - - // watch at "/_foo/" - // key is "/_foo/foo", not hidden to "/_foo" - key = "/_foo/foo" - hidden = isHidden(watch, key) - if hidden { - t.Fatalf("%v should not be hidden to %v\n", key, watch) - } - - // watch at "/_foo/" - // key is "/_foo/_foo", hidden to "/_foo" - key = "/_foo/_foo" - hidden = isHidden(watch, key) - if !hidden { - t.Fatalf("%v should be hidden to %v\n", key, watch) - } - - // watch at "/_foo/foo" - // key is "/_foo" - watch = "_foo/foo" - key = "/_foo/" - hidden = isHidden(watch, key) - if hidden { - t.Fatalf("%v should not be hidden to %v\n", key, watch) - } -} diff --git a/server/etcdserver/api/v2store/watcher_test.go b/server/etcdserver/api/v2store/watcher_test.go deleted file mode 100644 index e0901028fba..00000000000 --- a/server/etcdserver/api/v2store/watcher_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store - -import "testing" - -func TestWatcher(t *testing.T) { - s := newStore() - wh := s.WatcherHub - w, err := wh.watch("/foo", true, false, 1, 1) - if err != nil { - t.Fatalf("%v", err) - } - c := w.EventChan() - - select { - case <-c: - t.Fatal("should not receive from channel before send the event") - default: - // do nothing - } - - e := newEvent(Create, "/foo/bar", 1, 1) - - wh.notify(e) - - re := <-c - - if e != re { - t.Fatal("recv != send") - } - - w, _ = wh.watch("/foo", false, false, 2, 1) - c = w.EventChan() - - e = newEvent(Create, "/foo/bar", 2, 2) - - wh.notify(e) - - select { - case re = <-c: - t.Fatal("should not receive from channel if not recursive ", re) - default: - // do nothing - } - - e = newEvent(Create, "/foo", 3, 3) - - wh.notify(e) - - re = <-c - - if e != re { - t.Fatal("recv != send") - } - - // ensure we are doing exact matching rather than prefix matching - w, _ = wh.watch("/fo", true, false, 1, 1) - c = w.EventChan() - - select { - case re = <-c: - t.Fatal("should not receive from channel:", re) - default: - // do nothing - } - - e = newEvent(Create, "/fo/bar", 3, 3) - - wh.notify(e) - - re = <-c - - if e != re { - t.Fatal("recv != send") - } - -} diff --git a/server/etcdserver/api/v3alarm/alarms.go b/server/etcdserver/api/v3alarm/alarms.go deleted file mode 100644 index 6dfcfd11797..00000000000 --- a/server/etcdserver/api/v3alarm/alarms.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3alarm manages health status alarms in etcd. -package v3alarm - -import ( - "sync" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/storage/backend" - - "go.uber.org/zap" -) - -type BackendGetter interface { - Backend() backend.Backend -} - -type AlarmBackend interface { - CreateAlarmBucket() - MustPutAlarm(member *pb.AlarmMember) - MustDeleteAlarm(alarm *pb.AlarmMember) - GetAllAlarms() ([]*pb.AlarmMember, error) - ForceCommit() -} - -type alarmSet map[types.ID]*pb.AlarmMember - -// AlarmStore persists alarms to the backend. -type AlarmStore struct { - lg *zap.Logger - mu sync.Mutex - types map[pb.AlarmType]alarmSet - - be AlarmBackend -} - -func NewAlarmStore(lg *zap.Logger, be AlarmBackend) (*AlarmStore, error) { - if lg == nil { - lg = zap.NewNop() - } - ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), be: be} - err := ret.restore() - return ret, err -} - -func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { - a.mu.Lock() - defer a.mu.Unlock() - - newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at} - if m := a.addToMap(newAlarm); m != newAlarm { - return m - } - - a.be.MustPutAlarm(newAlarm) - return newAlarm -} - -func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { - a.mu.Lock() - defer a.mu.Unlock() - - t := a.types[at] - if t == nil { - t = make(alarmSet) - a.types[at] = t - } - m := t[id] - if m == nil { - return nil - } - - delete(t, id) - - a.be.MustDeleteAlarm(m) - return m -} - -func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { - a.mu.Lock() - defer a.mu.Unlock() - if at == pb.AlarmType_NONE { - for _, t := range a.types { - for _, m := range t { - ret = append(ret, m) - } - } - return ret - } - for _, m := range a.types[at] { - ret = append(ret, m) - } - return ret -} - -func (a *AlarmStore) restore() error { - a.be.CreateAlarmBucket() - ms, err := a.be.GetAllAlarms() - if err != nil { - return err - } - for _, m := range ms { - a.addToMap(m) - } - a.be.ForceCommit() - return err -} - -func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember { - t := a.types[newAlarm.Alarm] - if t == nil { - t = make(alarmSet) - a.types[newAlarm.Alarm] = t - } - m := t[types.ID(newAlarm.MemberID)] - if m != nil { - return m - } - t[types.ID(newAlarm.MemberID)] = newAlarm - return newAlarm -} diff --git a/server/etcdserver/api/v3client/doc.go b/server/etcdserver/api/v3client/doc.go deleted file mode 100644 index a6a4d7edfa9..00000000000 --- a/server/etcdserver/api/v3client/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3client provides clientv3 interfaces from an etcdserver. -// -// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: -// -// import ( -// "context" -// -// "go.etcd.io/etcd/server/v3/embed" -// "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" -// ) -// -// ... -// -// // create an embedded EtcdServer from the default configuration -// cfg := embed.NewConfig() -// cfg.Dir = "default.etcd" -// e, err := embed.StartEtcd(cfg) -// if err != nil { -// // handle error! -// } -// -// // wrap the EtcdServer with v3client -// cli := v3client.New(e.Server) -// -// // use like an ordinary clientv3 -// resp, err := cli.Put(context.TODO(), "some-key", "it works!") -// if err != nil { -// // handle error! -// } -package v3client diff --git a/server/etcdserver/api/v3compactor/compactor_test.go b/server/etcdserver/api/v3compactor/compactor_test.go deleted file mode 100644 index c7b4252d1c3..00000000000 --- a/server/etcdserver/api/v3compactor/compactor_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3compactor - -import ( - "context" - "sync/atomic" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -type fakeCompactable struct { - testutil.Recorder -} - -func (fc *fakeCompactable) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - fc.Record(testutil.Action{Name: "c", Params: []interface{}{r}}) - return &pb.CompactionResponse{}, nil -} - -type fakeRevGetter struct { - testutil.Recorder - rev int64 -} - -func (fr *fakeRevGetter) Rev() int64 { - fr.Record(testutil.Action{Name: "g"}) - rev := atomic.AddInt64(&fr.rev, 1) - return rev -} - -func (fr *fakeRevGetter) SetRev(rev int64) { - atomic.StoreInt64(&fr.rev, rev) -} diff --git a/server/etcdserver/api/v3compactor/periodic_test.go b/server/etcdserver/api/v3compactor/periodic_test.go deleted file mode 100644 index 1da7f4c4d1d..00000000000 --- a/server/etcdserver/api/v3compactor/periodic_test.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3compactor - -import ( - "errors" - "reflect" - "testing" - "time" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestPeriodicHourly(t *testing.T) { - retentionHours := 2 - retentionDuration := time.Duration(retentionHours) * time.Hour - - fc := clockwork.NewFakeClock() - // TODO: Do not depand or real time (Recorder.Wait) in unit tests. - rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0} - compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)} - tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable) - - tb.Run() - defer tb.Stop() - - initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10 - - // compaction doesn't happen til 2 hours elapse - for i := 0; i < initialIntervals; i++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - // very first compaction - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - expectedRevision := int64(1) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - - // simulate 3 hours - // now compactor kicks in, every hour - for i := 0; i < 3; i++ { - // advance one hour, one revision for each interval - for j := 0; j < intervalsPerPeriod; j++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - a, err = compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - - expectedRevision = int64((i + 1) * 10) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - } -} - -func TestPeriodicMinutes(t *testing.T) { - retentionMinutes := 5 - retentionDuration := time.Duration(retentionMinutes) * time.Minute - - fc := clockwork.NewFakeClock() - rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0} - compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)} - tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable) - - tb.Run() - defer tb.Stop() - - initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10 - - // compaction doesn't happen til 5 minutes elapse - for i := 0; i < initialIntervals; i++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - // very first compaction - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - expectedRevision := int64(1) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - - // compaction happens at every interval - for i := 0; i < 5; i++ { - // advance 5-minute, one revision for each interval - for j := 0; j < intervalsPerPeriod; j++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - - expectedRevision = int64((i + 1) * 10) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - } -} - -func TestPeriodicPause(t *testing.T) { - fc := clockwork.NewFakeClock() - retentionDuration := time.Hour - rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0} - compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)} - tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable) - - tb.Run() - tb.Pause() - - n := tb.getRetentions() - - // tb will collect 3 hours of revisions but not compact since paused - for i := 0; i < n*3; i++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - // t.revs = [21 22 23 24 25 26 27 28 29 30] - - select { - case a := <-compactable.Chan(): - t.Fatalf("unexpected action %v", a) - case <-time.After(10 * time.Millisecond): - } - - // tb resumes to being blocked on the clock - tb.Resume() - rg.Wait(1) - - // unblock clock, will kick off a compaction at T=3h6m by retry - fc.Advance(tb.getRetryInterval()) - - // T=3h6m - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - - // compact the revision from hour 2:06 - wreq := &pb.CompactionRequest{Revision: int64(1 + 2*n + 1)} - if !reflect.DeepEqual(a[0].Params[0], wreq) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) - } -} - -func TestPeriodicSkipRevNotChange(t *testing.T) { - retentionMinutes := 5 - retentionDuration := time.Duration(retentionMinutes) * time.Minute - - fc := clockwork.NewFakeClock() - rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0} - compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)} - tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable) - - tb.Run() - defer tb.Stop() - - initialIntervals, intervalsPerPeriod := tb.getRetentions(), 10 - - // first compaction happens til 5 minutes elapsed - for i := 0; i < initialIntervals; i++ { - // every time set the same revision with 100 - rg.SetRev(int64(100)) - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - // very first compaction - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - - // first compaction the compact revision will be 100+1 - expectedRevision := int64(100 + 1) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - - // compaction doesn't happens at every interval since revision not change - for i := 0; i < 5; i++ { - for j := 0; j < intervalsPerPeriod; j++ { - rg.SetRev(int64(100)) - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - _, err := compactable.Wait(1) - if err == nil { - t.Fatal(errors.New("should not compact since the revision not change")) - } - } - - // when revision changed, compaction is normally - for i := 0; i < initialIntervals; i++ { - rg.Wait(1) - fc.Advance(tb.getRetryInterval()) - } - - a, err = compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - - expectedRevision = int64(100 + 2) - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } -} diff --git a/server/etcdserver/api/v3compactor/revision.go b/server/etcdserver/api/v3compactor/revision.go deleted file mode 100644 index 37492f2b4d5..00000000000 --- a/server/etcdserver/api/v3compactor/revision.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3compactor - -import ( - "context" - "sync" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap" -) - -// Revision compacts the log by purging revisions older than -// the configured reivison number. Compaction happens every 5 minutes. -type Revision struct { - lg *zap.Logger - - clock clockwork.Clock - retention int64 - - rg RevGetter - c Compactable - - ctx context.Context - cancel context.CancelFunc - - mu sync.Mutex - paused bool -} - -// newRevision creates a new instance of Revisonal compactor that purges -// the log older than retention revisions from the current revision. -func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision { - rc := &Revision{ - lg: lg, - clock: clock, - retention: retention, - rg: rg, - c: c, - } - rc.ctx, rc.cancel = context.WithCancel(context.Background()) - return rc -} - -const revInterval = 5 * time.Minute - -// Run runs revision-based compactor. -func (rc *Revision) Run() { - prev := int64(0) - go func() { - for { - select { - case <-rc.ctx.Done(): - return - case <-rc.clock.After(revInterval): - rc.mu.Lock() - p := rc.paused - rc.mu.Unlock() - if p { - continue - } - } - - rev := rc.rg.Rev() - rc.retention - if rev <= 0 || rev == prev { - continue - } - - now := time.Now() - rc.lg.Info( - "starting auto revision compaction", - zap.Int64("revision", rev), - zap.Int64("revision-compaction-retention", rc.retention), - ) - _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { - prev = rev - rc.lg.Info( - "completed auto revision compaction", - zap.Int64("revision", rev), - zap.Int64("revision-compaction-retention", rc.retention), - zap.Duration("took", time.Since(now)), - ) - } else { - rc.lg.Warn( - "failed auto revision compaction", - zap.Int64("revision", rev), - zap.Int64("revision-compaction-retention", rc.retention), - zap.Duration("retry-interval", revInterval), - zap.Error(err), - ) - } - } - }() -} - -// Stop stops revision-based compactor. -func (rc *Revision) Stop() { - rc.cancel() -} - -// Pause pauses revision-based compactor. -func (rc *Revision) Pause() { - rc.mu.Lock() - rc.paused = true - rc.mu.Unlock() -} - -// Resume resumes revision-based compactor. -func (rc *Revision) Resume() { - rc.mu.Lock() - rc.paused = false - rc.mu.Unlock() -} diff --git a/server/etcdserver/api/v3compactor/revision_test.go b/server/etcdserver/api/v3compactor/revision_test.go deleted file mode 100644 index e66cfc0859d..00000000000 --- a/server/etcdserver/api/v3compactor/revision_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3compactor - -import ( - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - - "github.com/jonboulle/clockwork" -) - -func TestRevision(t *testing.T) { - fc := clockwork.NewFakeClock() - rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0} - compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)} - tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable) - - tb.Run() - defer tb.Stop() - - fc.Advance(revInterval) - rg.Wait(1) - // nothing happens - - rg.SetRev(99) // will be 100 - expectedRevision := int64(90) - fc.Advance(revInterval) - rg.Wait(1) - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } - - // skip the same revision - rg.SetRev(99) // will be 100 - rg.Wait(1) - // nothing happens - - rg.SetRev(199) // will be 200 - expectedRevision = int64(190) - fc.Advance(revInterval) - rg.Wait(1) - a, err = compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) - } -} - -func TestRevisionPause(t *testing.T) { - fc := clockwork.NewFakeClock() - rg := &fakeRevGetter{testutil.NewRecorderStream(), 99} // will be 100 - compactable := &fakeCompactable{testutil.NewRecorderStream()} - tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable) - - tb.Run() - tb.Pause() - - // tb will collect 3 hours of revisions but not compact since paused - n := int(time.Hour / revInterval) - for i := 0; i < 3*n; i++ { - fc.Advance(revInterval) - } - // tb ends up waiting for the clock - - select { - case a := <-compactable.Chan(): - t.Fatalf("unexpected action %v", a) - case <-time.After(10 * time.Millisecond): - } - - // tb resumes to being blocked on the clock - tb.Resume() - - // unblock clock, will kick off a compaction at hour 3:05 - fc.Advance(revInterval) - rg.Wait(1) - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - wreq := &pb.CompactionRequest{Revision: int64(90)} - if !reflect.DeepEqual(a[0].Params[0], wreq) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) - } -} diff --git a/server/etcdserver/api/v3discovery/discovery.go b/server/etcdserver/api/v3discovery/discovery.go deleted file mode 100644 index 9d6f03cec71..00000000000 --- a/server/etcdserver/api/v3discovery/discovery.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3discovery provides an implementation of the cluster discovery that -// is used by etcd with v3 client. -package v3discovery - -import ( - "context" - "errors" - "math" - "path" - "sort" - "strconv" - "strings" - "time" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" -) - -const ( - discoveryPrefix = "/_etcd/registry" -) - -var ( - ErrInvalidURL = errors.New("discovery: invalid peer URL") - ErrBadSizeKey = errors.New("discovery: size key is bad") - ErrSizeNotFound = errors.New("discovery: size key not found") - ErrFullCluster = errors.New("discovery: cluster is full") - ErrTooManyRetries = errors.New("discovery: too many retries") -) - -var ( - // Number of retries discovery will attempt before giving up and error out. - nRetries = uint(math.MaxUint32) - maxExponentialRetries = uint(8) -) - -type DiscoveryConfig struct { - clientv3.ConfigSpec `json:"client"` - Token string `json:"token"` -} - -type memberInfo struct { - // peerRegKey is the key used by the member when registering in the - // discovery service. - // Format: "/_etcd/registry//members/". - peerRegKey string - // peerURLsMap format: "peerName=peerURLs", i.e., "member1=http://127.0.0.1:2380". - peerURLsMap string - // createRev is the member's CreateRevision in the etcd cluster backing - // the discovery service. - createRev int64 -} - -type clusterInfo struct { - clusterToken string - members []memberInfo -} - -// key prefix for each cluster: "/_etcd/registry/". -func getClusterKeyPrefix(cluster string) string { - return path.Join(discoveryPrefix, cluster) -} - -// key format for cluster size: "/_etcd/registry//_config/size". -func getClusterSizeKey(cluster string) string { - return path.Join(getClusterKeyPrefix(cluster), "_config/size") -} - -// key prefix for each member: "/_etcd/registry//members". -func getMemberKeyPrefix(clusterToken string) string { - return path.Join(getClusterKeyPrefix(clusterToken), "members") -} - -// key format for each member: "/_etcd/registry//members/". -func getMemberKey(cluster, memberId string) string { - return path.Join(getMemberKeyPrefix(cluster), memberId) -} - -// GetCluster will connect to the discovery service at the given endpoints and -// retrieve a string describing the cluster -func GetCluster(lg *zap.Logger, cfg *DiscoveryConfig) (cs string, rerr error) { - d, err := newDiscovery(lg, cfg, 0) - if err != nil { - return "", err - } - - defer d.close() - defer func() { - if rerr != nil { - d.lg.Error( - "discovery failed to get cluster", - zap.String("cluster", cs), - zap.Error(rerr), - ) - } else { - d.lg.Info( - "discovery got cluster successfully", - zap.String("cluster", cs), - ) - } - }() - - return d.getCluster() -} - -// JoinCluster will connect to the discovery service at the endpoints, and -// register the server represented by the given id and config to the cluster. -// The parameter `config` is supposed to be in the format "memberName=peerURLs", -// such as "member1=http://127.0.0.1:2380". -// -// The final returned string has the same format as "--initial-cluster", such as -// "infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380". -func JoinCluster(lg *zap.Logger, cfg *DiscoveryConfig, id types.ID, config string) (cs string, rerr error) { - d, err := newDiscovery(lg, cfg, id) - if err != nil { - return "", err - } - - defer d.close() - defer func() { - if rerr != nil { - d.lg.Error( - "discovery failed to join cluster", - zap.String("cluster", cs), - zap.Error(rerr), - ) - } else { - d.lg.Info( - "discovery joined cluster successfully", - zap.String("cluster", cs), - ) - } - }() - - return d.joinCluster(config) -} - -type discovery struct { - lg *zap.Logger - clusterToken string - memberId types.ID - c *clientv3.Client - retries uint - - cfg *DiscoveryConfig - - clock clockwork.Clock -} - -func newDiscovery(lg *zap.Logger, dcfg *DiscoveryConfig, id types.ID) (*discovery, error) { - if lg == nil { - lg = zap.NewNop() - } - - lg = lg.With(zap.String("discovery-token", dcfg.Token), zap.String("discovery-endpoints", strings.Join(dcfg.Endpoints, ","))) - cfg, err := clientv3.NewClientConfig(&dcfg.ConfigSpec, lg) - if err != nil { - return nil, err - } - - c, err := clientv3.New(*cfg) - if err != nil { - return nil, err - } - return &discovery{ - lg: lg, - clusterToken: dcfg.Token, - memberId: id, - c: c, - cfg: dcfg, - clock: clockwork.NewRealClock(), - }, nil -} - -func (d *discovery) getCluster() (string, error) { - cls, clusterSize, rev, err := d.checkCluster() - if err != nil { - if err == ErrFullCluster { - return cls.getInitClusterStr(clusterSize) - } - return "", err - } - - for cls.Len() < clusterSize { - d.waitPeers(cls, clusterSize, rev) - } - - return cls.getInitClusterStr(clusterSize) -} - -func (d *discovery) joinCluster(config string) (string, error) { - _, _, _, err := d.checkCluster() - if err != nil { - return "", err - } - - if err := d.registerSelf(config); err != nil { - return "", err - } - - cls, clusterSize, rev, err := d.checkCluster() - if err != nil { - return "", err - } - - for cls.Len() < clusterSize { - d.waitPeers(cls, clusterSize, rev) - } - - return cls.getInitClusterStr(clusterSize) -} - -func (d *discovery) getClusterSize() (int, error) { - configKey := getClusterSizeKey(d.clusterToken) - ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) - defer cancel() - - resp, err := d.c.Get(ctx, configKey) - if err != nil { - d.lg.Warn( - "failed to get cluster size from discovery service", - zap.String("clusterSizeKey", configKey), - zap.Error(err), - ) - return 0, err - } - - if len(resp.Kvs) == 0 { - return 0, ErrSizeNotFound - } - - clusterSize, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 0) - if err != nil || clusterSize <= 0 { - return 0, ErrBadSizeKey - } - - return int(clusterSize), nil -} - -func (d *discovery) getClusterMembers() (*clusterInfo, int64, error) { - membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) - ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) - defer cancel() - - resp, err := d.c.Get(ctx, membersKeyPrefix, clientv3.WithPrefix()) - if err != nil { - d.lg.Warn( - "failed to get cluster members from discovery service", - zap.String("membersKeyPrefix", membersKeyPrefix), - zap.Error(err), - ) - return nil, 0, err - } - - cls := &clusterInfo{clusterToken: d.clusterToken} - for _, kv := range resp.Kvs { - mKey := strings.TrimSpace(string(kv.Key)) - mValue := strings.TrimSpace(string(kv.Value)) - - if err := cls.add(mKey, mValue, kv.CreateRevision); err != nil { - d.lg.Warn( - err.Error(), - zap.String("memberKey", mKey), - zap.String("memberInfo", mValue), - ) - } else { - d.lg.Info( - "found peer from discovery service", - zap.String("memberKey", mKey), - zap.String("memberInfo", mValue), - ) - } - } - - return cls, resp.Header.Revision, nil -} - -func (d *discovery) checkClusterRetry() (*clusterInfo, int, int64, error) { - if d.retries < nRetries { - d.logAndBackoffForRetry("cluster status check") - return d.checkCluster() - } - return nil, 0, 0, ErrTooManyRetries -} - -func (d *discovery) checkCluster() (*clusterInfo, int, int64, error) { - clusterSize, err := d.getClusterSize() - if err != nil { - if err == ErrSizeNotFound || err == ErrBadSizeKey { - return nil, 0, 0, err - } - - return d.checkClusterRetry() - } - - cls, rev, err := d.getClusterMembers() - if err != nil { - return d.checkClusterRetry() - } - d.retries = 0 - - // find self position - memberSelfId := getMemberKey(d.clusterToken, d.memberId.String()) - idx := 0 - for _, m := range cls.members { - if m.peerRegKey == memberSelfId { - break - } - if idx >= clusterSize-1 { - return cls, clusterSize, rev, ErrFullCluster - } - idx++ - } - return cls, clusterSize, rev, nil -} - -func (d *discovery) registerSelfRetry(contents string) error { - if d.retries < nRetries { - d.logAndBackoffForRetry("register member itself") - return d.registerSelf(contents) - } - return ErrTooManyRetries -} - -func (d *discovery) registerSelf(contents string) error { - ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout) - memberKey := getMemberKey(d.clusterToken, d.memberId.String()) - _, err := d.c.Put(ctx, memberKey, contents) - cancel() - - if err != nil { - d.lg.Warn( - "failed to register members itself to the discovery service", - zap.String("memberKey", memberKey), - zap.Error(err), - ) - return d.registerSelfRetry(contents) - } - d.retries = 0 - - d.lg.Info( - "register member itself successfully", - zap.String("memberKey", memberKey), - zap.String("memberInfo", contents), - ) - - return nil -} - -func (d *discovery) waitPeers(cls *clusterInfo, clusterSize int, rev int64) { - // watch from the next revision - membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) - w := d.c.Watch(context.Background(), membersKeyPrefix, clientv3.WithPrefix(), clientv3.WithRev(rev+1)) - - d.lg.Info( - "waiting for peers from discovery service", - zap.Int("clusterSize", clusterSize), - zap.Int("found-peers", cls.Len()), - ) - - // waiting for peers until all needed peers are returned - for wresp := range w { - for _, ev := range wresp.Events { - mKey := strings.TrimSpace(string(ev.Kv.Key)) - mValue := strings.TrimSpace(string(ev.Kv.Value)) - - if err := cls.add(mKey, mValue, ev.Kv.CreateRevision); err != nil { - d.lg.Warn( - err.Error(), - zap.String("memberKey", mKey), - zap.String("memberInfo", mValue), - ) - } else { - d.lg.Info( - "found peer from discovery service", - zap.String("memberKey", mKey), - zap.String("memberInfo", mValue), - ) - } - } - - if cls.Len() >= clusterSize { - break - } - } - - d.lg.Info( - "found all needed peers from discovery service", - zap.Int("clusterSize", clusterSize), - zap.Int("found-peers", cls.Len()), - ) -} - -func (d *discovery) logAndBackoffForRetry(step string) { - d.retries++ - // logAndBackoffForRetry stops exponential backoff when the retries are - // more than maxExpoentialRetries and is set to a constant backoff afterward. - retries := d.retries - if retries > maxExponentialRetries { - retries = maxExponentialRetries - } - retryTimeInSecond := time.Duration(0x1< clusterSize { - peerURLs = peerURLs[:clusterSize] - } - - us := strings.Join(peerURLs, ",") - _, err := types.NewURLsMap(us) - if err != nil { - return us, ErrInvalidURL - } - - return us, nil -} - -func (cls *clusterInfo) getPeerURLs() []string { - var peerURLs []string - for _, peer := range cls.members { - peerURLs = append(peerURLs, peer.peerURLsMap) - } - return peerURLs -} diff --git a/server/etcdserver/api/v3discovery/discovery_test.go b/server/etcdserver/api/v3discovery/discovery_test.go deleted file mode 100644 index 79df7378419..00000000000 --- a/server/etcdserver/api/v3discovery/discovery_test.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3discovery - -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/jonboulle/clockwork" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" -) - -// fakeKVForClusterSize is used to test getClusterSize. -type fakeKVForClusterSize struct { - *fakeBaseKV - clusterSizeStr string -} - -// Get when we only need to overwrite the method `Get`. -func (fkv *fakeKVForClusterSize) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - if fkv.clusterSizeStr == "" { - // cluster size isn't configured in this case. - return &clientv3.GetResponse{}, nil - } - - return &clientv3.GetResponse{ - Kvs: []*mvccpb.KeyValue{ - { - Value: []byte(fkv.clusterSizeStr), - }, - }, - }, nil -} - -func TestGetClusterSize(t *testing.T) { - cases := []struct { - name string - clusterSizeStr string - expectedErr error - expectedSize int - }{ - { - name: "cluster size not defined", - clusterSizeStr: "", - expectedErr: ErrSizeNotFound, - }, - { - name: "invalid cluster size", - clusterSizeStr: "invalidSize", - expectedErr: ErrBadSizeKey, - }, - { - name: "valid cluster size", - clusterSizeStr: "3", - expectedErr: nil, - expectedSize: 3, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - d := &discovery{ - lg: lg, - c: &clientv3.Client{ - KV: &fakeKVForClusterSize{ - fakeBaseKV: &fakeBaseKV{}, - clusterSizeStr: tc.clusterSizeStr, - }, - }, - cfg: &DiscoveryConfig{}, - clusterToken: "fakeToken", - } - - if cs, err := d.getClusterSize(); err != tc.expectedErr { - t.Errorf("Unexpected error, expected: %v got: %v", tc.expectedErr, err) - } else { - if err == nil && cs != tc.expectedSize { - t.Errorf("Unexpected cluster size, expected: %d got: %d", tc.expectedSize, cs) - } - } - }) - } -} - -// fakeKVForClusterMembers is used to test getClusterMembers. -type fakeKVForClusterMembers struct { - *fakeBaseKV - members []memberInfo -} - -// Get when we only need to overwrite method `Get`. -func (fkv *fakeKVForClusterMembers) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - kvs := memberInfoToKeyValues(fkv.members) - - return &clientv3.GetResponse{ - Header: &etcdserverpb.ResponseHeader{ - Revision: 10, - }, - Kvs: kvs, - }, nil -} - -func memberInfoToKeyValues(members []memberInfo) []*mvccpb.KeyValue { - kvs := make([]*mvccpb.KeyValue, 0) - for _, mi := range members { - kvs = append(kvs, &mvccpb.KeyValue{ - Key: []byte(mi.peerRegKey), - Value: []byte(mi.peerURLsMap), - CreateRevision: mi.createRev, - }) - } - - return kvs -} - -func TestGetClusterMembers(t *testing.T) { - actualMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - { - // invalid peer registry key - peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - // invalid peer info format - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - // duplicate peer - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 2, - }, - } - - // sort by CreateRevision - expectedMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - } - - lg := zaptest.NewLogger(t) - - d := &discovery{ - lg: lg, - c: &clientv3.Client{ - KV: &fakeKVForClusterMembers{ - fakeBaseKV: &fakeBaseKV{}, - members: actualMemberInfo, - }, - }, - cfg: &DiscoveryConfig{}, - clusterToken: "fakeToken", - } - - clsInfo, _, err := d.getClusterMembers() - if err != nil { - t.Errorf("Failed to get cluster members, error: %v", err) - } - - if clsInfo.Len() != len(expectedMemberInfo) { - t.Errorf("unexpected member count, expected: %d, got: %d", len(expectedMemberInfo), clsInfo.Len()) - } - - for i, m := range clsInfo.members { - if m != expectedMemberInfo[i] { - t.Errorf("unexpected member[%d], expected: %v, got: %v", i, expectedMemberInfo[i], m) - } - } -} - -// fakeKVForCheckCluster is used to test checkCluster. -type fakeKVForCheckCluster struct { - *fakeBaseKV - t *testing.T - token string - clusterSizeStr string - members []memberInfo - getSizeRetries int - getMembersRetries int -} - -// Get when we only need to overwrite method `Get`. -func (fkv *fakeKVForCheckCluster) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - clusterSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", fkv.token) - clusterMembersKey := fmt.Sprintf("/_etcd/registry/%s/members", fkv.token) - - if key == clusterSizeKey { - if fkv.getSizeRetries > 0 { - fkv.getSizeRetries-- - // discovery client should retry on error. - return nil, errors.New("get cluster size failed") - } - return &clientv3.GetResponse{ - Kvs: []*mvccpb.KeyValue{ - { - Value: []byte(fkv.clusterSizeStr), - }, - }, - }, nil - - } else if key == clusterMembersKey { - if fkv.getMembersRetries > 0 { - fkv.getMembersRetries-- - // discovery client should retry on error. - return nil, errors.New("get cluster members failed") - } - kvs := memberInfoToKeyValues(fkv.members) - - return &clientv3.GetResponse{ - Header: &etcdserverpb.ResponseHeader{ - Revision: 10, - }, - Kvs: kvs, - }, nil - } else { - fkv.t.Errorf("unexpected key: %s", key) - return nil, fmt.Errorf("unexpected key: %s", key) - } -} - -func TestCheckCluster(t *testing.T) { - actualMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - { - // invalid peer registry key - peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - // invalid peer info format - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - // duplicate peer - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 2, - }, - } - - // sort by CreateRevision - expectedMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - } - - cases := []struct { - name string - memberId types.ID - getSizeRetries int - getMembersRetries int - expectedError error - }{ - { - name: "no retries", - memberId: 101, - getSizeRetries: 0, - getMembersRetries: 0, - expectedError: nil, - }, - { - name: "2 retries for getClusterSize", - memberId: 102, - getSizeRetries: 2, - getMembersRetries: 0, - expectedError: nil, - }, - { - name: "2 retries for getClusterMembers", - memberId: 103, - getSizeRetries: 0, - getMembersRetries: 2, - expectedError: nil, - }, - { - name: "error due to cluster full", - memberId: 104, - getSizeRetries: 0, - getMembersRetries: 0, - expectedError: ErrFullCluster, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - - fkv := &fakeKVForCheckCluster{ - fakeBaseKV: &fakeBaseKV{}, - t: t, - token: "fakeToken", - clusterSizeStr: "3", - members: actualMemberInfo, - getSizeRetries: tc.getSizeRetries, - getMembersRetries: tc.getMembersRetries, - } - - d := &discovery{ - lg: lg, - c: &clientv3.Client{ - KV: fkv, - }, - cfg: &DiscoveryConfig{}, - clusterToken: "fakeToken", - memberId: tc.memberId, - clock: clockwork.NewRealClock(), - } - - clsInfo, _, _, err := d.checkCluster() - if err != tc.expectedError { - t.Errorf("Unexpected error, expected: %v, got: %v", tc.expectedError, err) - } - - if err == nil { - if fkv.getSizeRetries != 0 || fkv.getMembersRetries != 0 { - t.Errorf("Discovery client did not retry checking cluster on error, remaining etries: (%d, %d)", fkv.getSizeRetries, fkv.getMembersRetries) - } - - if clsInfo.Len() != len(expectedMemberInfo) { - t.Errorf("Unexpected member count, expected: %d, got: %d", len(expectedMemberInfo), clsInfo.Len()) - } - - for mIdx, m := range clsInfo.members { - if m != expectedMemberInfo[mIdx] { - t.Errorf("Unexpected member[%d], expected: %v, got: %v", mIdx, expectedMemberInfo[mIdx], m) - } - } - } - }) - } -} - -// fakeKVForRegisterSelf is used to test registerSelf. -type fakeKVForRegisterSelf struct { - *fakeBaseKV - t *testing.T - expectedRegKey string - expectedRegValue string - retries int -} - -// Put when we only need to overwrite method `Put`. -func (fkv *fakeKVForRegisterSelf) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { - if key != fkv.expectedRegKey { - fkv.t.Errorf("unexpected register key, expected: %s, got: %s", fkv.expectedRegKey, key) - } - - if val != fkv.expectedRegValue { - fkv.t.Errorf("unexpected register value, expected: %s, got: %s", fkv.expectedRegValue, val) - } - - if fkv.retries > 0 { - fkv.retries-- - // discovery client should retry on error. - return nil, errors.New("register self failed") - } - - return nil, nil -} - -func TestRegisterSelf(t *testing.T) { - cases := []struct { - name string - token string - memberId types.ID - expectedRegKey string - expectedRegValue string - retries int // when retries > 0, then return an error on Put request. - }{ - { - name: "no retry with token1", - token: "token1", - memberId: 101, - expectedRegKey: "/_etcd/registry/token1/members/" + types.ID(101).String(), - expectedRegValue: "infra=http://127.0.0.1:2380", - retries: 0, - }, - { - name: "no retry with token2", - token: "token2", - memberId: 102, - expectedRegKey: "/_etcd/registry/token2/members/" + types.ID(102).String(), - expectedRegValue: "infra=http://127.0.0.1:2380", - retries: 0, - }, - { - name: "2 retries", - token: "token3", - memberId: 103, - expectedRegKey: "/_etcd/registry/token3/members/" + types.ID(103).String(), - expectedRegValue: "infra=http://127.0.0.1:2380", - retries: 2, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - fkv := &fakeKVForRegisterSelf{ - fakeBaseKV: &fakeBaseKV{}, - t: t, - expectedRegKey: tc.expectedRegKey, - expectedRegValue: tc.expectedRegValue, - retries: tc.retries, - } - - d := &discovery{ - lg: lg, - clusterToken: tc.token, - memberId: tc.memberId, - cfg: &DiscoveryConfig{}, - c: &clientv3.Client{ - KV: fkv, - }, - clock: clockwork.NewRealClock(), - } - - if err := d.registerSelf(tc.expectedRegValue); err != nil { - t.Errorf("Error occuring on register member self: %v", err) - } - - if fkv.retries != 0 { - t.Errorf("Discovery client did not retry registering itself on error, remaining retries: %d", fkv.retries) - } - }) - } -} - -// fakeWatcherForWaitPeers is used to test waitPeers. -type fakeWatcherForWaitPeers struct { - *fakeBaseWatcher - t *testing.T - token string - members []memberInfo -} - -// Watch we only need to overwrite method `Watch`. -func (fw *fakeWatcherForWaitPeers) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { - expectedWatchKey := fmt.Sprintf("/_etcd/registry/%s/members", fw.token) - if key != expectedWatchKey { - fw.t.Errorf("unexpected watch key, expected: %s, got: %s", expectedWatchKey, key) - } - - ch := make(chan clientv3.WatchResponse, 1) - go func() { - for _, mi := range fw.members { - ch <- clientv3.WatchResponse{ - Events: []*clientv3.Event{ - { - Kv: &mvccpb.KeyValue{ - Key: []byte(mi.peerRegKey), - Value: []byte(mi.peerURLsMap), - CreateRevision: mi.createRev, - }, - }, - }, - } - } - close(ch) - }() - return ch -} - -func TestWaitPeers(t *testing.T) { - actualMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - { - // invalid peer registry key - peerRegKey: "/invalidPrefix/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - // invalid peer info format - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - // duplicate peer - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 2, - }, - } - - // sort by CreateRevision - expectedMemberInfo := []memberInfo{ - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(102).String(), - peerURLsMap: "infra2=http://192.168.0.102:2380", - createRev: 6, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(103).String(), - peerURLsMap: "infra3=http://192.168.0.103:2380", - createRev: 7, - }, - { - peerRegKey: "/_etcd/registry/fakeToken/members/" + types.ID(101).String(), - peerURLsMap: "infra1=http://192.168.0.100:2380", - createRev: 8, - }, - } - - lg := zaptest.NewLogger(t) - - d := &discovery{ - lg: lg, - c: &clientv3.Client{ - KV: &fakeBaseKV{}, - Watcher: &fakeWatcherForWaitPeers{ - fakeBaseWatcher: &fakeBaseWatcher{}, - t: t, - token: "fakeToken", - members: actualMemberInfo, - }, - }, - cfg: &DiscoveryConfig{}, - clusterToken: "fakeToken", - } - - cls := clusterInfo{ - clusterToken: "fakeToken", - } - - d.waitPeers(&cls, 3, 0) - - if cls.Len() != len(expectedMemberInfo) { - t.Errorf("unexpected member number returned by watch, expected: %d, got: %d", len(expectedMemberInfo), cls.Len()) - } - - for i, m := range cls.members { - if m != expectedMemberInfo[i] { - t.Errorf("unexpected member[%d] returned by watch, expected: %v, got: %v", i, expectedMemberInfo[i], m) - } - } -} - -func TestGetInitClusterStr(t *testing.T) { - cases := []struct { - name string - members []memberInfo - clusterSize int - expectedResult string - expectedError error - }{ - { - name: "1 member", - members: []memberInfo{ - { - peerURLsMap: "infra2=http://192.168.0.102:2380", - }, - }, - clusterSize: 1, - expectedResult: "infra2=http://192.168.0.102:2380", - expectedError: nil, - }, - { - name: "2 members", - members: []memberInfo{ - { - peerURLsMap: "infra2=http://192.168.0.102:2380", - }, - { - peerURLsMap: "infra3=http://192.168.0.103:2380", - }, - }, - clusterSize: 2, - expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380", - expectedError: nil, - }, - { - name: "3 members", - members: []memberInfo{ - { - peerURLsMap: "infra2=http://192.168.0.102:2380", - }, - { - peerURLsMap: "infra3=http://192.168.0.103:2380", - }, - { - peerURLsMap: "infra1=http://192.168.0.100:2380", - }, - }, - clusterSize: 3, - expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380,infra1=http://192.168.0.100:2380", - expectedError: nil, - }, - { - name: "should ignore redundant member", - members: []memberInfo{ - { - peerURLsMap: "infra2=http://192.168.0.102:2380", - }, - { - peerURLsMap: "infra3=http://192.168.0.103:2380", - }, - { - peerURLsMap: "infra1=http://192.168.0.100:2380", - }, - { - peerURLsMap: "infra4=http://192.168.0.104:2380", - }, - }, - clusterSize: 3, - expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380,infra1=http://192.168.0.100:2380", - expectedError: nil, - }, - { - name: "invalid_peer_url", - members: []memberInfo{ - { - peerURLsMap: "infra2=http://192.168.0.102:2380", - }, - { - peerURLsMap: "infra3=http://192.168.0.103", //not host:port - }, - }, - clusterSize: 2, - expectedResult: "infra2=http://192.168.0.102:2380,infra3=http://192.168.0.103:2380", - expectedError: ErrInvalidURL, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - clsInfo := &clusterInfo{ - members: tc.members, - } - - retStr, err := clsInfo.getInitClusterStr(tc.clusterSize) - if err != tc.expectedError { - t.Errorf("Unexpected error, expected: %v, got: %v", tc.expectedError, err) - } - - if err == nil { - if retStr != tc.expectedResult { - t.Errorf("Unexpected result, expected: %s, got: %s", tc.expectedResult, retStr) - } - } - }) - } -} - -// fakeBaseKV is the base struct implementing the interface `clientv3.KV`. -type fakeBaseKV struct{} - -func (fkv *fakeBaseKV) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) { - return nil, nil -} - -func (fkv *fakeBaseKV) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { - return nil, nil -} - -func (fkv *fakeBaseKV) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) { - return nil, nil -} - -func (fkv *fakeBaseKV) Compact(ctx context.Context, rev int64, opts ...clientv3.CompactOption) (*clientv3.CompactResponse, error) { - return nil, nil -} - -func (fkv *fakeBaseKV) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { - return clientv3.OpResponse{}, nil -} - -func (fkv *fakeBaseKV) Txn(ctx context.Context) clientv3.Txn { - return nil -} - -// fakeBaseWatcher is the base struct implementing the interface `clientv3.Watcher`. -type fakeBaseWatcher struct{} - -func (fw *fakeBaseWatcher) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { - return nil -} - -func (fw *fakeBaseWatcher) RequestProgress(ctx context.Context) error { - return nil -} - -func (fw *fakeBaseWatcher) Close() error { - return nil -} diff --git a/server/etcdserver/api/v3election/election.go b/server/etcdserver/api/v3election/election.go deleted file mode 100644 index 77a9c4bcb42..00000000000 --- a/server/etcdserver/api/v3election/election.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3election - -import ( - "context" - "errors" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" -) - -// ErrMissingLeaderKey is returned when election API request -// is missing the "leader" field. -var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`) - -type electionServer struct { - c *clientv3.Client -} - -func NewElectionServer(c *clientv3.Client) epb.ElectionServer { - return &electionServer{c} -} - -func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { - s, err := es.session(ctx, req.Lease) - if err != nil { - return nil, err - } - e := concurrency.NewElection(s, string(req.Name)) - if err = e.Campaign(ctx, string(req.Value)); err != nil { - return nil, err - } - return &epb.CampaignResponse{ - Header: e.Header(), - Leader: &epb.LeaderKey{ - Name: req.Name, - Key: []byte(e.Key()), - Rev: e.Rev(), - Lease: int64(s.Lease()), - }, - }, nil -} - -func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { - if req.Leader == nil { - return nil, ErrMissingLeaderKey - } - s, err := es.session(ctx, req.Leader.Lease) - if err != nil { - return nil, err - } - e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) - if err := e.Proclaim(ctx, string(req.Value)); err != nil { - return nil, err - } - return &epb.ProclaimResponse{Header: e.Header()}, nil -} - -func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { - s, err := es.session(stream.Context(), -1) - if err != nil { - return err - } - e := concurrency.NewElection(s, string(req.Name)) - ch := e.Observe(stream.Context()) - for stream.Context().Err() == nil { - select { - case <-stream.Context().Done(): - case resp, ok := <-ch: - if !ok { - return nil - } - lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} - if err := stream.Send(lresp); err != nil { - return err - } - } - } - return stream.Context().Err() -} - -func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { - s, err := es.session(ctx, -1) - if err != nil { - return nil, err - } - l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) - if lerr != nil { - return nil, lerr - } - return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil -} - -func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { - if req.Leader == nil { - return nil, ErrMissingLeaderKey - } - s, err := es.session(ctx, req.Leader.Lease) - if err != nil { - return nil, err - } - e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) - if err := e.Resign(ctx); err != nil { - return nil, err - } - return &epb.ResignResponse{Header: e.Header()}, nil -} - -func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { - s, err := concurrency.NewSession( - es.c, - concurrency.WithLease(clientv3.LeaseID(lease)), - concurrency.WithContext(ctx), - ) - if err != nil { - return nil, err - } - s.Orphan() - return s, nil -} diff --git a/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go deleted file mode 100644 index 81cf59b9b25..00000000000 --- a/server/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ /dev/null @@ -1,2541 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: v3election.proto - -package v3electionpb - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb" - mvccpb "go.etcd.io/etcd/api/v3/mvccpb" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type CampaignRequest struct { - // name is the election's identifier for the campaign. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // lease is the ID of the lease attached to leadership of the election. If the - // lease expires or is revoked before resigning leadership, then the - // leadership is transferred to the next campaigner, if any. - Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` - // value is the initial proclaimed value set when the campaigner wins the - // election. - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } -func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } -func (*CampaignRequest) ProtoMessage() {} -func (*CampaignRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{0} -} -func (m *CampaignRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CampaignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CampaignRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CampaignRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CampaignRequest.Merge(m, src) -} -func (m *CampaignRequest) XXX_Size() int { - return m.Size() -} -func (m *CampaignRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CampaignRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CampaignRequest proto.InternalMessageInfo - -func (m *CampaignRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *CampaignRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -func (m *CampaignRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type CampaignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // leader describes the resources used for holding leadereship of the election. - Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } -func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } -func (*CampaignResponse) ProtoMessage() {} -func (*CampaignResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{1} -} -func (m *CampaignResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CampaignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CampaignResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CampaignResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CampaignResponse.Merge(m, src) -} -func (m *CampaignResponse) XXX_Size() int { - return m.Size() -} -func (m *CampaignResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CampaignResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CampaignResponse proto.InternalMessageInfo - -func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *CampaignResponse) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -type LeaderKey struct { - // name is the election identifier that correponds to the leadership key. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // key is an opaque key representing the ownership of the election. If the key - // is deleted, then leadership is lost. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // rev is the creation revision of the key. It can be used to test for ownership - // of an election during transactions by testing the key's creation revision - // matches rev. - Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` - // lease is the lease ID of the election leader. - Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderKey) Reset() { *m = LeaderKey{} } -func (m *LeaderKey) String() string { return proto.CompactTextString(m) } -func (*LeaderKey) ProtoMessage() {} -func (*LeaderKey) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{2} -} -func (m *LeaderKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderKey.Merge(m, src) -} -func (m *LeaderKey) XXX_Size() int { - return m.Size() -} -func (m *LeaderKey) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderKey.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaderKey proto.InternalMessageInfo - -func (m *LeaderKey) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LeaderKey) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *LeaderKey) GetRev() int64 { - if m != nil { - return m.Rev - } - return 0 -} - -func (m *LeaderKey) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -type LeaderRequest struct { - // name is the election identifier for the leadership information. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } -func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } -func (*LeaderRequest) ProtoMessage() {} -func (*LeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{3} -} -func (m *LeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderRequest.Merge(m, src) -} -func (m *LeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaderRequest proto.InternalMessageInfo - -func (m *LeaderRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -type LeaderResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // kv is the key-value pair representing the latest leader update. - Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } -func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } -func (*LeaderResponse) ProtoMessage() {} -func (*LeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{4} -} -func (m *LeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderResponse.Merge(m, src) -} -func (m *LeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaderResponse proto.InternalMessageInfo - -func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { - if m != nil { - return m.Kv - } - return nil -} - -type ResignRequest struct { - // leader is the leadership to relinquish by resignation. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResignRequest) Reset() { *m = ResignRequest{} } -func (m *ResignRequest) String() string { return proto.CompactTextString(m) } -func (*ResignRequest) ProtoMessage() {} -func (*ResignRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{5} -} -func (m *ResignRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResignRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResignRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResignRequest.Merge(m, src) -} -func (m *ResignRequest) XXX_Size() int { - return m.Size() -} -func (m *ResignRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResignRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResignRequest proto.InternalMessageInfo - -func (m *ResignRequest) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -type ResignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResignResponse) Reset() { *m = ResignResponse{} } -func (m *ResignResponse) String() string { return proto.CompactTextString(m) } -func (*ResignResponse) ProtoMessage() {} -func (*ResignResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{6} -} -func (m *ResignResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResignResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResignResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResignResponse.Merge(m, src) -} -func (m *ResignResponse) XXX_Size() int { - return m.Size() -} -func (m *ResignResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResignResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ResignResponse proto.InternalMessageInfo - -func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type ProclaimRequest struct { - // leader is the leadership hold on the election. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` - // value is an update meant to overwrite the leader's current value. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } -func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } -func (*ProclaimRequest) ProtoMessage() {} -func (*ProclaimRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{7} -} -func (m *ProclaimRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProclaimRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProclaimRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProclaimRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProclaimRequest.Merge(m, src) -} -func (m *ProclaimRequest) XXX_Size() int { - return m.Size() -} -func (m *ProclaimRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProclaimRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProclaimRequest proto.InternalMessageInfo - -func (m *ProclaimRequest) GetLeader() *LeaderKey { - if m != nil { - return m.Leader - } - return nil -} - -func (m *ProclaimRequest) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type ProclaimResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } -func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } -func (*ProclaimResponse) ProtoMessage() {} -func (*ProclaimResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{8} -} -func (m *ProclaimResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProclaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProclaimResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProclaimResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProclaimResponse.Merge(m, src) -} -func (m *ProclaimResponse) XXX_Size() int { - return m.Size() -} -func (m *ProclaimResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProclaimResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ProclaimResponse proto.InternalMessageInfo - -func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") - proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") - proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") - proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") - proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") - proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") - proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") - proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") - proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") -} - -func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) } - -var fileDescriptor_c9b1f26cc432a035 = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x08, 0x21, 0xb8, 0xd1, 0x72, 0xa9, - 0x72, 0xb0, 0x51, 0xc3, 0x29, 0x27, 0x04, 0x02, 0x55, 0x2a, 0x12, 0xe0, 0x03, 0x82, 0xe3, 0xda, - 0x1d, 0xb9, 0x91, 0x1d, 0xaf, 0xb1, 0x5d, 0x4b, 0xb9, 0xf2, 0x0a, 0x1c, 0xe0, 0x91, 0x38, 0x22, - 0xf1, 0x02, 0x28, 0xf0, 0x20, 0x68, 0x77, 0xed, 0xfa, 0x8f, 0x12, 0x84, 0x9a, 0xdb, 0x78, 0xe7, - 0xdb, 0xf9, 0xcd, 0x37, 0x3b, 0x09, 0xe8, 0xf9, 0x0c, 0x43, 0xf4, 0xb2, 0x05, 0x8f, 0xac, 0x38, - 0xe1, 0x19, 0x37, 0xfa, 0xd5, 0x49, 0xec, 0x8e, 0x06, 0x3e, 0xf7, 0xb9, 0x4c, 0xd8, 0x22, 0x52, - 0x9a, 0xd1, 0x11, 0x66, 0xde, 0xb9, 0xcd, 0xe2, 0x85, 0x2d, 0x82, 0x14, 0x93, 0x1c, 0x93, 0xd8, - 0xb5, 0x93, 0xd8, 0x2b, 0x04, 0xc3, 0x2b, 0xc1, 0x32, 0xf7, 0xbc, 0xd8, 0xb5, 0x83, 0xbc, 0xc8, - 0x8c, 0x7d, 0xce, 0xfd, 0x10, 0x65, 0x8e, 0x45, 0x11, 0xcf, 0x98, 0x20, 0xa5, 0x2a, 0x4b, 0xdf, - 0xc1, 0xe1, 0x0b, 0xb6, 0x8c, 0xd9, 0xc2, 0x8f, 0x1c, 0xfc, 0x74, 0x89, 0x69, 0x66, 0x18, 0xd0, - 0x8d, 0xd8, 0x12, 0x87, 0x64, 0x42, 0x8e, 0xfb, 0x8e, 0x8c, 0x8d, 0x01, 0xdc, 0x0c, 0x91, 0xa5, - 0x38, 0xd4, 0x26, 0xe4, 0xb8, 0xe3, 0xa8, 0x0f, 0x71, 0x9a, 0xb3, 0xf0, 0x12, 0x87, 0x1d, 0x29, - 0x55, 0x1f, 0x74, 0x05, 0x7a, 0x55, 0x32, 0x8d, 0x79, 0x94, 0xa2, 0xf1, 0x14, 0x7a, 0x17, 0xc8, - 0xce, 0x31, 0x91, 0x55, 0xef, 0x9c, 0x8c, 0xad, 0xba, 0x0f, 0xab, 0xd4, 0x9d, 0x4a, 0x8d, 0x53, - 0x68, 0x0d, 0x1b, 0x7a, 0xa1, 0xba, 0xa5, 0xc9, 0x5b, 0xf7, 0xad, 0xfa, 0xa8, 0xac, 0xd7, 0x32, - 0x77, 0x86, 0x2b, 0xa7, 0x90, 0xd1, 0x8f, 0x70, 0xfb, 0xea, 0x70, 0xa3, 0x0f, 0x1d, 0x3a, 0x01, - 0xae, 0x64, 0xb9, 0xbe, 0x23, 0x42, 0x71, 0x92, 0x60, 0x2e, 0x1d, 0x74, 0x1c, 0x11, 0x56, 0x5e, - 0xbb, 0x35, 0xaf, 0xf4, 0x31, 0xec, 0xab, 0xd2, 0xff, 0x18, 0x13, 0xbd, 0x80, 0x83, 0x52, 0xb4, - 0x93, 0xf1, 0x09, 0x68, 0x41, 0x5e, 0x98, 0xd6, 0x2d, 0xf5, 0xa2, 0xd6, 0x19, 0xae, 0xde, 0x8b, - 0x01, 0x3b, 0x5a, 0x90, 0xd3, 0x67, 0xb0, 0xef, 0x60, 0x5a, 0x7b, 0xb5, 0x6a, 0x56, 0xe4, 0xff, - 0x66, 0xf5, 0x0a, 0x0e, 0xca, 0x0a, 0xbb, 0xf4, 0x4a, 0x3f, 0xc0, 0xe1, 0xdb, 0x84, 0x7b, 0x21, - 0x5b, 0x2c, 0xaf, 0xdb, 0x4b, 0xb5, 0x48, 0x5a, 0x7d, 0x91, 0x4e, 0x41, 0xaf, 0x2a, 0xef, 0xd2, - 0xe3, 0xc9, 0xd7, 0x2e, 0xec, 0xbd, 0x2c, 0x1a, 0x30, 0x02, 0xd8, 0x2b, 0xf7, 0xd3, 0x78, 0xd4, - 0xec, 0xac, 0xf5, 0x53, 0x18, 0x99, 0xdb, 0xd2, 0x8a, 0x42, 0x27, 0x9f, 0x7f, 0xfe, 0xf9, 0xa2, - 0x8d, 0xe8, 0x3d, 0x3b, 0x9f, 0xd9, 0xa5, 0xd0, 0xf6, 0x0a, 0xd9, 0x9c, 0x4c, 0x05, 0xac, 0xf4, - 0xd0, 0x86, 0xb5, 0xa6, 0xd6, 0x86, 0xb5, 0xad, 0x6f, 0x81, 0xc5, 0x85, 0x4c, 0xc0, 0x3c, 0xe8, - 0xa9, 0xd9, 0x1a, 0x0f, 0x37, 0x4d, 0xbc, 0x04, 0x8d, 0x37, 0x27, 0x0b, 0x8c, 0x29, 0x31, 0x43, - 0x7a, 0xb7, 0x81, 0x51, 0x0f, 0x25, 0x20, 0x3e, 0xdc, 0x7a, 0xe3, 0xca, 0x81, 0xef, 0x42, 0x39, - 0x92, 0x94, 0x07, 0x74, 0xd0, 0xa0, 0x70, 0x55, 0x78, 0x4e, 0xa6, 0x4f, 0x88, 0x70, 0xa3, 0x16, - 0xb4, 0xcd, 0x69, 0x2c, 0x7e, 0x9b, 0xd3, 0xdc, 0xe9, 0x2d, 0x6e, 0x12, 0x29, 0x9a, 0x93, 0xe9, - 0x73, 0xfd, 0xfb, 0xda, 0x24, 0x3f, 0xd6, 0x26, 0xf9, 0xb5, 0x36, 0xc9, 0xb7, 0xdf, 0xe6, 0x0d, - 0xb7, 0x27, 0xff, 0x18, 0x67, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xe6, 0x7c, 0x66, 0xa9, - 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ElectionClient is the client API for Election service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ElectionClient interface { - // Campaign waits to acquire leadership in an election, returning a LeaderKey - // representing the leadership if successful. The LeaderKey can then be used - // to issue new values on the election, transactionally guard API requests on - // leadership still being held, and resign from the election. - Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) - // Proclaim updates the leader's posted value with a new value. - Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) - // Leader returns the current election proclamation, if any. - Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) - // Observe streams election proclamations in-order as made by the election's - // elected leaders. - Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) - // Resign releases election leadership so other campaigners may acquire - // leadership on the election. - Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) -} - -type electionClient struct { - cc *grpc.ClientConn -} - -func NewElectionClient(cc *grpc.ClientConn) ElectionClient { - return &electionClient{cc} -} - -func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { - out := new(CampaignResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { - out := new(ProclaimResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { - out := new(LeaderResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...) - if err != nil { - return nil, err - } - x := &electionObserveClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Election_ObserveClient interface { - Recv() (*LeaderResponse, error) - grpc.ClientStream -} - -type electionObserveClient struct { - grpc.ClientStream -} - -func (x *electionObserveClient) Recv() (*LeaderResponse, error) { - m := new(LeaderResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { - out := new(ResignResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ElectionServer is the server API for Election service. -type ElectionServer interface { - // Campaign waits to acquire leadership in an election, returning a LeaderKey - // representing the leadership if successful. The LeaderKey can then be used - // to issue new values on the election, transactionally guard API requests on - // leadership still being held, and resign from the election. - Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) - // Proclaim updates the leader's posted value with a new value. - Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) - // Leader returns the current election proclamation, if any. - Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) - // Observe streams election proclamations in-order as made by the election's - // elected leaders. - Observe(*LeaderRequest, Election_ObserveServer) error - // Resign releases election leadership so other campaigners may acquire - // leadership on the election. - Resign(context.Context, *ResignRequest) (*ResignResponse, error) -} - -// UnimplementedElectionServer can be embedded to have forward compatible implementations. -type UnimplementedElectionServer struct { -} - -func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented") -} -func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented") -} -func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented") -} -func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error { - return status.Errorf(codes.Unimplemented, "method Observe not implemented") -} -func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented") -} - -func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { - s.RegisterService(&_Election_serviceDesc, srv) -} - -func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CampaignRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Campaign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Campaign", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProclaimRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Proclaim(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Proclaim", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Leader(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Leader", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LeaderRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) -} - -type Election_ObserveServer interface { - Send(*LeaderResponse) error - grpc.ServerStream -} - -type electionObserveServer struct { - grpc.ServerStream -} - -func (x *electionObserveServer) Send(m *LeaderResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResignRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElectionServer).Resign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3electionpb.Election/Resign", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Election_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v3electionpb.Election", - HandlerType: (*ElectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Campaign", - Handler: _Election_Campaign_Handler, - }, - { - MethodName: "Proclaim", - Handler: _Election_Proclaim_Handler, - }, - { - MethodName: "Leader", - Handler: _Election_Leader_Handler, - }, - { - MethodName: "Resign", - Handler: _Election_Resign_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Observe", - Handler: _Election_Observe_Handler, - ServerStreams: true, - }, - }, - Metadata: "v3election.proto", -} - -func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CampaignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if m.Lease != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CampaignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaderKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Lease != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x20 - } - if m.Rev != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) - i-- - dAtA[i] = 0x18 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Kv != nil { - { - size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResignRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResignResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProclaimRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProclaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { - offset -= sovV3Election(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CampaignRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovV3Election(uint64(m.Lease)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CampaignResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaderKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Rev != 0 { - n += 1 + sovV3Election(uint64(m.Rev)) - } - if m.Lease != 0 { - n += 1 + sovV3Election(uint64(m.Lease)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResignRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResignResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProclaimRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Leader != nil { - l = m.Leader.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ProclaimResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Election(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovV3Election(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozV3Election(x uint64) (n int) { - return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CampaignRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CampaignResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType) - } - m.Rev = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rev |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &mvccpb.KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResignRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResignResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Leader == nil { - m.Leader = &LeaderKey{} - } - if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Election - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Election - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Election(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Election - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipV3Election(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Election - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthV3Election - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupV3Election - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthV3Election - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupV3Election = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/etcdserver/api/v3lock/lock.go b/server/etcdserver/api/v3lock/lock.go deleted file mode 100644 index c8ef56ebaeb..00000000000 --- a/server/etcdserver/api/v3lock/lock.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3lock - -import ( - "context" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" -) - -type lockServer struct { - c *clientv3.Client -} - -func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { - return &lockServer{c} -} - -func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { - s, err := concurrency.NewSession( - ls.c, - concurrency.WithLease(clientv3.LeaseID(req.Lease)), - concurrency.WithContext(ctx), - ) - if err != nil { - return nil, err - } - s.Orphan() - m := concurrency.NewMutex(s, string(req.Name)) - if err = m.Lock(ctx); err != nil { - return nil, err - } - return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil -} - -func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { - resp, err := ls.c.Delete(ctx, string(req.Key)) - if err != nil { - return nil, err - } - return &v3lockpb.UnlockResponse{Header: resp.Header}, nil -} diff --git a/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go deleted file mode 100644 index 4282ddc85e0..00000000000 --- a/server/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ /dev/null @@ -1,1141 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: v3lock.proto - -package v3lockpb - -import ( - context "context" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type LockRequest struct { - // name is the identifier for the distributed shared lock to be acquired. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // lease is the ID of the lease that will be attached to ownership of the - // lock. If the lease expires or is revoked and currently holds the lock, - // the lock is automatically released. Calls to Lock with the same lease will - // be treated as a single acquisition; locking twice with the same lease is a - // no-op. - Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LockRequest) Reset() { *m = LockRequest{} } -func (m *LockRequest) String() string { return proto.CompactTextString(m) } -func (*LockRequest) ProtoMessage() {} -func (*LockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{0} -} -func (m *LockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LockRequest.Merge(m, src) -} -func (m *LockRequest) XXX_Size() int { - return m.Size() -} -func (m *LockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LockRequest proto.InternalMessageInfo - -func (m *LockRequest) GetName() []byte { - if m != nil { - return m.Name - } - return nil -} - -func (m *LockRequest) GetLease() int64 { - if m != nil { - return m.Lease - } - return 0 -} - -type LockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - // key is a key that will exist on etcd for the duration that the Lock caller - // owns the lock. Users should not modify this key or the lock may exhibit - // undefined behavior. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LockResponse) Reset() { *m = LockResponse{} } -func (m *LockResponse) String() string { return proto.CompactTextString(m) } -func (*LockResponse) ProtoMessage() {} -func (*LockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{1} -} -func (m *LockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LockResponse.Merge(m, src) -} -func (m *LockResponse) XXX_Size() int { - return m.Size() -} -func (m *LockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LockResponse proto.InternalMessageInfo - -func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *LockResponse) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -type UnlockRequest struct { - // key is the lock ownership key granted by Lock. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } -func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } -func (*UnlockRequest) ProtoMessage() {} -func (*UnlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{2} -} -func (m *UnlockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnlockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockRequest.Merge(m, src) -} -func (m *UnlockRequest) XXX_Size() int { - return m.Size() -} -func (m *UnlockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UnlockRequest proto.InternalMessageInfo - -func (m *UnlockRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -type UnlockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } -func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } -func (*UnlockResponse) ProtoMessage() {} -func (*UnlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{3} -} -func (m *UnlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockResponse.Merge(m, src) -} -func (m *UnlockResponse) XXX_Size() int { - return m.Size() -} -func (m *UnlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UnlockResponse proto.InternalMessageInfo - -func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") - proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") - proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") - proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") -} - -func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) } - -var fileDescriptor_52389b3e2f253201 = []byte{ - // 330 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, - 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, - 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x3e, 0xb5, 0x24, 0x39, - 0x45, 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, - 0x2f, 0x2a, 0x48, 0x86, 0x2a, 0x90, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x2b, 0x49, 0xcc, - 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xc8, 0x2a, 0x99, 0x73, 0x71, 0xfb, - 0xe4, 0x27, 0x67, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, - 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x39, - 0xa9, 0x89, 0xc5, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x52, 0x18, 0x17, - 0x0f, 0x44, 0x63, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x09, 0x17, 0x5b, 0x46, 0x6a, 0x62, - 0x4a, 0x6a, 0x11, 0x58, 0x2f, 0xb7, 0x91, 0x8c, 0x1e, 0xb2, 0x7b, 0xf4, 0x60, 0xea, 0x3c, 0xc0, - 0x6a, 0x82, 0xa0, 0x6a, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0xc1, 0x26, 0xf3, 0x04, 0x81, - 0x98, 0x4a, 0x8a, 0x5c, 0xbc, 0xa1, 0x79, 0x39, 0x48, 0x4e, 0x82, 0x2a, 0x61, 0x44, 0x28, 0x71, - 0xe3, 0xe2, 0x83, 0x29, 0xa1, 0xc4, 0x72, 0xa3, 0x0d, 0x8c, 0x5c, 0x2c, 0x20, 0x3f, 0x08, 0xf9, - 0x43, 0x69, 0x51, 0x3d, 0x58, 0x60, 0xeb, 0x21, 0x05, 0x8a, 0x94, 0x18, 0xba, 0x30, 0xc4, 0x34, - 0x25, 0x89, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, 0x29, 0xf1, 0xea, 0x97, 0x19, 0xeb, 0x83, 0x14, - 0x80, 0x09, 0x2b, 0x46, 0x2d, 0xa1, 0x70, 0x2e, 0x36, 0x88, 0x0b, 0x85, 0xc4, 0x11, 0x7a, 0x51, - 0xbc, 0x25, 0x25, 0x81, 0x29, 0x01, 0x35, 0x56, 0x0a, 0x6c, 0xac, 0x88, 0x12, 0x3f, 0xdc, 0xd8, - 0xd2, 0x3c, 0xa8, 0xc1, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, - 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, 0x78, 0x34, 0x06, 0x04, 0x00, 0x00, - 0xff, 0xff, 0x4a, 0x4d, 0xca, 0xbb, 0x36, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LockClient is the client API for Lock service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LockClient interface { - // Lock acquires a distributed shared lock on a given named lock. - // On success, it will return a unique key that exists so long as the - // lock is held by the caller. This key can be used in conjunction with - // transactions to safely ensure updates to etcd only occur while holding - // lock ownership. The lock is held until Unlock is called on the key or the - // lease associate with the owner expires. - Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) - // Unlock takes a key returned by Lock and releases the hold on lock. The - // next Lock caller waiting for the lock will then be woken up and given - // ownership of the lock. - Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) -} - -type lockClient struct { - cc *grpc.ClientConn -} - -func NewLockClient(cc *grpc.ClientConn) LockClient { - return &lockClient{cc} -} - -func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { - out := new(LockResponse) - err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { - out := new(UnlockResponse) - err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LockServer is the server API for Lock service. -type LockServer interface { - // Lock acquires a distributed shared lock on a given named lock. - // On success, it will return a unique key that exists so long as the - // lock is held by the caller. This key can be used in conjunction with - // transactions to safely ensure updates to etcd only occur while holding - // lock ownership. The lock is held until Unlock is called on the key or the - // lease associate with the owner expires. - Lock(context.Context, *LockRequest) (*LockResponse, error) - // Unlock takes a key returned by Lock and releases the hold on lock. The - // next Lock caller waiting for the lock will then be woken up and given - // ownership of the lock. - Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) -} - -// UnimplementedLockServer can be embedded to have forward compatible implementations. -type UnimplementedLockServer struct { -} - -func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented") -} -func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented") -} - -func RegisterLockServer(s *grpc.Server, srv LockServer) { - s.RegisterService(&_Lock_serviceDesc, srv) -} - -func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LockServer).Lock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3lockpb.Lock/Lock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LockServer).Lock(ctx, req.(*LockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UnlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LockServer).Unlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v3lockpb.Lock/Unlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lock_serviceDesc = grpc.ServiceDesc{ - ServiceName: "v3lockpb.Lock", - HandlerType: (*LockServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Lock", - Handler: _Lock_Lock_Handler, - }, - { - MethodName: "Unlock", - Handler: _Lock_Unlock_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "v3lock.proto", -} - -func (m *LockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Lease != 0 { - i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Lock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Lock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { - offset -= sovV3Lock(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LockRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovV3Lock(uint64(m.Lease)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LockResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Lock(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UnlockRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovV3Lock(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UnlockResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovV3Lock(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovV3Lock(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozV3Lock(x uint64) (n int) { - return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnlockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnlockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowV3Lock - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthV3Lock - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &etcdserverpb.ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipV3Lock(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthV3Lock - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipV3Lock(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowV3Lock - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthV3Lock - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupV3Lock - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthV3Lock - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupV3Lock = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/etcdserver/api/v3rpc/auth.go b/server/etcdserver/api/v3rpc/auth.go deleted file mode 100644 index 6c5db76cb8e..00000000000 --- a/server/etcdserver/api/v3rpc/auth.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver" -) - -type AuthServer struct { - authenticator etcdserver.Authenticator -} - -func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { - return &AuthServer{authenticator: s} -} - -func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := as.authenticator.AuthEnable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := as.authenticator.AuthDisable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { - resp, err := as.authenticator.AuthStatus(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - resp, err := as.authenticator.Authenticate(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := as.authenticator.RoleAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := as.authenticator.RoleDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := as.authenticator.RoleGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := as.authenticator.RoleList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := as.authenticator.RoleRevokePermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := as.authenticator.RoleGrantPermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := as.authenticator.UserAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := as.authenticator.UserDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := as.authenticator.UserGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := as.authenticator.UserList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := as.authenticator.UserGrantRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := as.authenticator.UserRevokeRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := as.authenticator.UserChangePassword(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -type AuthGetter interface { - AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) - AuthStore() auth.AuthStore -} - -type AuthAdmin struct { - ag AuthGetter -} - -// isPermitted verifies the user has admin privilege. -// Only users with "root" role are permitted. -func (aa *AuthAdmin) isPermitted(ctx context.Context) error { - authInfo, err := aa.ag.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return aa.ag.AuthStore().IsAdminPermitted(authInfo) -} diff --git a/server/etcdserver/api/v3rpc/grpc.go b/server/etcdserver/api/v3rpc/grpc.go deleted file mode 100644 index 349ebea4007..00000000000 --- a/server/etcdserver/api/v3rpc/grpc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/tls" - "math" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/v3/credentials" - "go.etcd.io/etcd/server/v3/etcdserver" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" -) - -const ( - grpcOverheadBytes = 512 * 1024 - maxSendBytes = math.MaxInt32 -) - -func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server { - var opts []grpc.ServerOption - opts = append(opts, grpc.CustomCodec(&codec{})) - if tls != nil { - bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls}) - opts = append(opts, grpc.Creds(bundle.TransportCredentials())) - } - chainUnaryInterceptors := []grpc.UnaryServerInterceptor{ - newLogUnaryInterceptor(s), - newUnaryInterceptor(s), - grpc_prometheus.UnaryServerInterceptor, - } - if interceptor != nil { - chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor) - } - - chainStreamInterceptors := []grpc.StreamServerInterceptor{ - newStreamInterceptor(s), - grpc_prometheus.StreamServerInterceptor, - } - - if s.Cfg.ExperimentalEnableDistributedTracing { - chainUnaryInterceptors = append(chainUnaryInterceptors, otelgrpc.UnaryServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) - chainStreamInterceptors = append(chainStreamInterceptors, otelgrpc.StreamServerInterceptor(s.Cfg.ExperimentalTracerOptions...)) - - } - - opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(chainUnaryInterceptors...))) - opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(chainStreamInterceptors...))) - - opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) - opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) - opts = append(opts, grpc.MaxConcurrentStreams(s.Cfg.MaxConcurrentStreams)) - - grpcServer := grpc.NewServer(append(opts, gopts...)...) - - pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) - pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) - pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) - pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) - pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) - pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) - - // server should register all the services manually - // use empty service name for all etcd services' health status, - // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more - hsrv := health.NewServer() - hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(grpcServer, hsrv) - - // set zero values for metrics registered for this grpc server - grpc_prometheus.Register(grpcServer) - - return grpcServer -} diff --git a/server/etcdserver/api/v3rpc/interceptor.go b/server/etcdserver/api/v3rpc/interceptor.go deleted file mode 100644 index c7d1c6bdcdf..00000000000 --- a/server/etcdserver/api/v3rpc/interceptor.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "sync" - "time" - "unicode/utf8" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/raft/v3" - - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -const ( - maxNoLeaderCnt = 3 - snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" -) - -type streamsMap struct { - mu sync.Mutex - streams map[grpc.ServerStream]struct{} -} - -func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return nil, rpctypes.ErrGRPCNotCapable - } - - if s.IsMemberExist(s.MemberId()) && s.IsLearner() && !isRPCSupportedForLearner(req) { - return nil, rpctypes.ErrGRPCNotSupportedForLearner - } - - md, ok := metadata.FromIncomingContext(ctx) - if ok { - ver, vs := "unknown", md.Get(rpctypes.MetadataClientAPIVersionKey) - if len(vs) > 0 { - ver = vs[0] - } - if !utf8.ValidString(ver) { - return nil, rpctypes.ErrGRPCInvalidClientAPIVersion - } - clientRequests.WithLabelValues("unary", ver).Inc() - - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return nil, rpctypes.ErrGRPCNoLeader - } - } - } - - return handler(ctx, req) - } -} - -func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - startTime := time.Now() - resp, err := handler(ctx, req) - lg := s.Logger() - if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive - defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp) - } - return resp, err - } -} - -func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, warnLatency time.Duration, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) { - duration := time.Since(startTime) - var enabledDebugLevel, expensiveRequest bool - if lg.Core().Enabled(zap.DebugLevel) { - enabledDebugLevel = true - } - if duration > warnLatency { - expensiveRequest = true - } - if !enabledDebugLevel && !expensiveRequest { - return - } - remote := "No remote client info." - peerInfo, ok := peer.FromContext(ctx) - if ok { - remote = peerInfo.Addr.String() - } - responseType := info.FullMethod - var reqCount, respCount int64 - var reqSize, respSize int - var reqContent string - switch _resp := resp.(type) { - case *pb.RangeResponse: - _req, ok := req.(*pb.RangeRequest) - if ok { - reqCount = 0 - reqSize = _req.Size() - reqContent = _req.String() - } - if _resp != nil { - respCount = _resp.GetCount() - respSize = _resp.Size() - } - case *pb.PutResponse: - _req, ok := req.(*pb.PutRequest) - if ok { - reqCount = 1 - reqSize = _req.Size() - reqContent = pb.NewLoggablePutRequest(_req).String() - // redact value field from request content, see PR #9821 - } - if _resp != nil { - respCount = 0 - respSize = _resp.Size() - } - case *pb.DeleteRangeResponse: - _req, ok := req.(*pb.DeleteRangeRequest) - if ok { - reqCount = 0 - reqSize = _req.Size() - reqContent = _req.String() - } - if _resp != nil { - respCount = _resp.GetDeleted() - respSize = _resp.Size() - } - case *pb.TxnResponse: - _req, ok := req.(*pb.TxnRequest) - if ok && _resp != nil { - if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure - reqCount = int64(len(_req.GetSuccess())) - reqSize = 0 - for _, r := range _req.GetSuccess() { - reqSize += r.Size() - } - } else { - reqCount = int64(len(_req.GetFailure())) - reqSize = 0 - for _, r := range _req.GetFailure() { - reqSize += r.Size() - } - } - reqContent = pb.NewLoggableTxnRequest(_req).String() - // redact value field from request content, see PR #9821 - } - if _resp != nil { - respCount = 0 - respSize = _resp.Size() - } - default: - reqCount = -1 - reqSize = -1 - respCount = -1 - respSize = -1 - } - - if enabledDebugLevel { - logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent) - } else if expensiveRequest { - logExpensiveRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent) - } -} - -func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string, - reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) { - lg.Debug("request stats", - zap.Time("start time", startTime), - zap.Duration("time spent", duration), - zap.String("remote", remote), - zap.String("response type", responseType), - zap.Int64("request count", reqCount), - zap.Int("request size", reqSize), - zap.Int64("response count", respCount), - zap.Int("response size", respSize), - zap.String("request content", reqContent), - ) -} - -func logExpensiveRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string, - reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) { - lg.Warn("request stats", - zap.Time("start time", startTime), - zap.Duration("time spent", duration), - zap.String("remote", remote), - zap.String("response type", responseType), - zap.Int64("request count", reqCount), - zap.Int("request size", reqSize), - zap.Int64("response count", respCount), - zap.Int("response size", respSize), - zap.String("request content", reqContent), - ) -} - -func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { - smap := monitorLeader(s) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return rpctypes.ErrGRPCNotCapable - } - - if s.IsMemberExist(s.MemberId()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot - return rpctypes.ErrGRPCNotSupportedForLearner - } - - md, ok := metadata.FromIncomingContext(ss.Context()) - if ok { - ver, vs := "unknown", md.Get(rpctypes.MetadataClientAPIVersionKey) - if len(vs) > 0 { - ver = vs[0] - } - if !utf8.ValidString(ver) { - return rpctypes.ErrGRPCInvalidClientAPIVersion - } - clientRequests.WithLabelValues("stream", ver).Inc() - - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return rpctypes.ErrGRPCNoLeader - } - - ctx := newCancellableContext(ss.Context()) - ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss} - - smap.mu.Lock() - smap.streams[ss] = struct{}{} - smap.mu.Unlock() - - defer func() { - smap.mu.Lock() - delete(smap.streams, ss) - smap.mu.Unlock() - // TODO: investigate whether the reason for cancellation here is useful to know - ctx.Cancel(nil) - }() - } - } - - return handler(srv, ss) - } -} - -// cancellableContext wraps a context with new cancellable context that allows a -// specific cancellation error to be preserved and later retrieved using the -// Context.Err() function. This is so downstream context users can disambiguate -// the reason for the cancellation which could be from the client (for example) -// or from this interceptor code. -type cancellableContext struct { - context.Context - - lock sync.RWMutex - cancel context.CancelFunc - cancelReason error -} - -func newCancellableContext(parent context.Context) *cancellableContext { - ctx, cancel := context.WithCancel(parent) - return &cancellableContext{ - Context: ctx, - cancel: cancel, - } -} - -// Cancel stores the cancellation reason and then delegates to context.WithCancel -// against the parent context. -func (c *cancellableContext) Cancel(reason error) { - c.lock.Lock() - c.cancelReason = reason - c.lock.Unlock() - c.cancel() -} - -// Err will return the preserved cancel reason error if present, and will -// otherwise return the underlying error from the parent context. -func (c *cancellableContext) Err() error { - c.lock.RLock() - defer c.lock.RUnlock() - if c.cancelReason != nil { - return c.cancelReason - } - return c.Context.Err() -} - -type serverStreamWithCtx struct { - grpc.ServerStream - - // ctx is used so that we can preserve a reason for cancellation. - ctx *cancellableContext -} - -func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } - -func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { - smap := &streamsMap{ - streams: make(map[grpc.ServerStream]struct{}), - } - - s.GoAttach(func() { - election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond - noLeaderCnt := 0 - - for { - select { - case <-s.StoppingNotify(): - return - case <-time.After(election): - if s.Leader() == types.ID(raft.None) { - noLeaderCnt++ - } else { - noLeaderCnt = 0 - } - - // We are more conservative on canceling existing streams. Reconnecting streams - // cost much more than just rejecting new requests. So we wait until the member - // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. - if noLeaderCnt >= maxNoLeaderCnt { - smap.mu.Lock() - for ss := range smap.streams { - if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { - ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader) - <-ss.Context().Done() - } - } - smap.streams = make(map[grpc.ServerStream]struct{}) - smap.mu.Unlock() - } - } - } - }) - - return smap -} diff --git a/server/etcdserver/api/v3rpc/key.go b/server/etcdserver/api/v3rpc/key.go deleted file mode 100644 index 2c1de2a90de..00000000000 --- a/server/etcdserver/api/v3rpc/key.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3rpc implements etcd v3 RPC system based on gRPC. -package v3rpc - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/pkg/v3/adt" - "go.etcd.io/etcd/server/v3/etcdserver" -) - -type kvServer struct { - hdr header - kv etcdserver.RaftKV - // maxTxnOps is the max operations per txn. - // e.g suppose maxTxnOps = 128. - // Txn.Success can have at most 128 operations, - // and Txn.Failure can have at most 128 operations. - maxTxnOps uint -} - -func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps} -} - -func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := checkRangeRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Range(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := checkPutRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Put(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := checkDeleteRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.DeleteRange(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil { - return nil, err - } - // check for forbidden put/del overlaps after checking request to avoid quadratic blowup - if _, _, err := checkIntervals(r.Success); err != nil { - return nil, err - } - if _, _, err := checkIntervals(r.Failure); err != nil { - return nil, err - } - - resp, err := s.kv.Txn(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - resp, err := s.kv.Compact(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - s.hdr.fill(resp.Header) - return resp, nil -} - -func checkRangeRequest(r *pb.RangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - - if _, ok := pb.RangeRequest_SortOrder_name[int32(r.SortOrder)]; !ok { - return rpctypes.ErrGRPCInvalidSortOption - } - - if _, ok := pb.RangeRequest_SortTarget_name[int32(r.SortTarget)]; !ok { - return rpctypes.ErrGRPCInvalidSortOption - } - - return nil -} - -func checkPutRequest(r *pb.PutRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - if r.IgnoreValue && len(r.Value) != 0 { - return rpctypes.ErrGRPCValueProvided - } - if r.IgnoreLease && r.Lease != 0 { - return rpctypes.ErrGRPCLeaseProvided - } - return nil -} - -func checkDeleteRequest(r *pb.DeleteRangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error { - opc := len(r.Compare) - if opc < len(r.Success) { - opc = len(r.Success) - } - if opc < len(r.Failure) { - opc = len(r.Failure) - } - if opc > maxTxnOps { - return rpctypes.ErrGRPCTooManyOps - } - - for _, c := range r.Compare { - if len(c.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - } - for _, u := range r.Success { - if err := checkRequestOp(u, maxTxnOps-opc); err != nil { - return err - } - } - for _, u := range r.Failure { - if err := checkRequestOp(u, maxTxnOps-opc); err != nil { - return err - } - } - - return nil -} - -// checkIntervals tests whether puts and deletes overlap for a list of ops. If -// there is an overlap, returns an error. If no overlap, return put and delete -// sets for recursive evaluation. -func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) { - dels := adt.NewIntervalTree() - - // collect deletes from this level; build first to check lower level overlapped puts - for _, req := range reqs { - tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange) - if !ok { - continue - } - dreq := tv.RequestDeleteRange - if dreq == nil { - continue - } - var iv adt.Interval - if len(dreq.RangeEnd) != 0 { - iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd)) - } else { - iv = adt.NewStringAffinePoint(string(dreq.Key)) - } - dels.Insert(iv, struct{}{}) - } - - // collect children puts/deletes - puts := make(map[string]struct{}) - for _, req := range reqs { - tv, ok := req.Request.(*pb.RequestOp_RequestTxn) - if !ok { - continue - } - putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success) - if err != nil { - return nil, dels, err - } - putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure) - if err != nil { - return nil, dels, err - } - for k := range putsThen { - if _, ok := puts[k]; ok { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - if dels.Intersects(adt.NewStringAffinePoint(k)) { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - puts[k] = struct{}{} - } - for k := range putsElse { - if _, ok := puts[k]; ok { - // if key is from putsThen, overlap is OK since - // either then/else are mutually exclusive - if _, isSafe := putsThen[k]; !isSafe { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - } - if dels.Intersects(adt.NewStringAffinePoint(k)) { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - puts[k] = struct{}{} - } - dels.Union(delsThen, adt.NewStringAffineInterval("\x00", "")) - dels.Union(delsElse, adt.NewStringAffineInterval("\x00", "")) - } - - // collect and check this level's puts - for _, req := range reqs { - tv, ok := req.Request.(*pb.RequestOp_RequestPut) - if !ok || tv.RequestPut == nil { - continue - } - k := string(tv.RequestPut.Key) - if _, ok := puts[k]; ok { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - if dels.Intersects(adt.NewStringAffinePoint(k)) { - return nil, dels, rpctypes.ErrGRPCDuplicateKey - } - puts[k] = struct{}{} - } - return puts, dels, nil -} - -func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error { - // TODO: ensure only one of the field is set. - switch uv := u.Request.(type) { - case *pb.RequestOp_RequestRange: - return checkRangeRequest(uv.RequestRange) - case *pb.RequestOp_RequestPut: - return checkPutRequest(uv.RequestPut) - case *pb.RequestOp_RequestDeleteRange: - return checkDeleteRequest(uv.RequestDeleteRange) - case *pb.RequestOp_RequestTxn: - return checkTxnRequest(uv.RequestTxn, maxTxnOps) - default: - // empty op / nil entry - return rpctypes.ErrGRPCKeyNotFound - } -} diff --git a/server/etcdserver/api/v3rpc/key_test.go b/server/etcdserver/api/v3rpc/key_test.go deleted file mode 100644 index a585ee89cc0..00000000000 --- a/server/etcdserver/api/v3rpc/key_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" -) - -func TestCheckRangeRequest(t *testing.T) { - rangeReqs := []struct { - sortOrder pb.RangeRequest_SortOrder - sortTarget pb.RangeRequest_SortTarget - expectedError error - }{ - { - sortOrder: pb.RangeRequest_ASCEND, - sortTarget: pb.RangeRequest_CREATE, - expectedError: nil, - }, - { - sortOrder: pb.RangeRequest_ASCEND, - sortTarget: 100, - expectedError: rpctypes.ErrGRPCInvalidSortOption, - }, - { - sortOrder: 200, - sortTarget: pb.RangeRequest_MOD, - expectedError: rpctypes.ErrGRPCInvalidSortOption, - }, - } - - for _, req := range rangeReqs { - rangeReq := pb.RangeRequest{ - Key: []byte{1, 2, 3}, - SortOrder: req.sortOrder, - SortTarget: req.sortTarget, - } - - actualRet := checkRangeRequest(&rangeReq) - if getError(actualRet) != getError(req.expectedError) { - t.Errorf("expected sortOrder (%d) and sortTarget (%d) to be %q, but got %q", - req.sortOrder, req.sortTarget, getError(req.expectedError), getError(actualRet)) - } - } -} - -func getError(err error) string { - if err == nil { - return "" - } - - return err.Error() -} diff --git a/server/etcdserver/api/v3rpc/lease.go b/server/etcdserver/api/v3rpc/lease.go deleted file mode 100644 index e123dd2a37c..00000000000 --- a/server/etcdserver/api/v3rpc/lease.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "io" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/lease" - - "go.uber.org/zap" -) - -type LeaseServer struct { - lg *zap.Logger - hdr header - le etcdserver.Lessor -} - -func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - srv := &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)} - if srv.lg == nil { - srv.lg = zap.NewNop() - } - return srv -} - -func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - resp, err := ls.le.LeaseGrant(ctx, cr) - - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := ls.le.LeaseRevoke(ctx, rr) - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { - return nil, togRPCError(err) - } - if err == lease.ErrLeaseNotFound { - resp = &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: rr.ID, - TTL: -1, - } - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { - resp, err := ls.le.LeaseLeases(ctx, rr) - if err != nil && err != lease.ErrLeaseNotFound { - return nil, togRPCError(err) - } - if err == lease.ErrLeaseNotFound { - resp = &pb.LeaseLeasesResponse{ - Header: &pb.ResponseHeader{}, - Leases: []*pb.LeaseStatus{}, - } - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { - errc := make(chan error, 1) - go func() { - errc <- ls.leaseKeepAlive(stream) - }() - select { - case err = <-errc: - case <-stream.Context().Done(): - // the only server-side cancellation is noleader for now. - err = stream.Context().Err() - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - return err -} - -func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - for { - req, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - if isClientCtxErr(stream.Context().Err(), err) { - ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) - } else { - ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) - streamFailures.WithLabelValues("receive", "lease-keepalive").Inc() - } - return err - } - - // Create header before we sent out the renew request. - // This can make sure that the revision is strictly smaller or equal to - // when the keepalive happened at the local server (when the local server is the leader) - // or remote leader. - // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded - // at rev 4. - resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} - ls.hdr.fill(resp.Header) - - ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) - if err == lease.ErrLeaseNotFound { - err = nil - ttl = 0 - } - - if err != nil { - return togRPCError(err) - } - - resp.TTL = ttl - err = stream.Send(resp) - if err != nil { - if isClientCtxErr(stream.Context().Err(), err) { - ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err)) - } else { - ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err)) - streamFailures.WithLabelValues("send", "lease-keepalive").Inc() - } - return err - } - } -} diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go deleted file mode 100644 index 3fcae8d7d62..00000000000 --- a/server/etcdserver/api/v3rpc/maintenance.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "crypto/sha256" - "io" - "time" - - "github.com/dustin/go-humanize" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/raft/v3" - - "go.uber.org/zap" -) - -type KVGetter interface { - KV() mvcc.WatchableKV -} - -type BackendGetter interface { - Backend() backend.Backend -} - -type Alarmer interface { - // Alarms is implemented in Server interface located in etcdserver/server.go - // It returns a list of alarms present in the AlarmStore - Alarms() []*pb.AlarmMember - Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) -} - -type Downgrader interface { - Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error) -} - -type LeaderTransferrer interface { - MoveLeader(ctx context.Context, lead, target uint64) error -} - -type ClusterStatusGetter interface { - IsLearner() bool -} - -type maintenanceServer struct { - lg *zap.Logger - rg apply.RaftStatusGetter - hasher mvcc.HashStorage - bg BackendGetter - a Alarmer - lt LeaderTransferrer - hdr header - cs ClusterStatusGetter - d Downgrader - vs serverversion.Server -} - -func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, hasher: s.KV().HashStorage(), bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s, vs: etcdserver.NewServerVersionAdapter(s)} - if srv.lg == nil { - srv.lg = zap.NewNop() - } - return &authMaintenanceServer{srv, &AuthAdmin{s}} -} - -func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - ms.lg.Info("starting defragment") - err := ms.bg.Backend().Defrag() - if err != nil { - ms.lg.Warn("failed to defragment", zap.Error(err)) - return nil, err - } - ms.lg.Info("finished defragment") - return &pb.DefragmentResponse{}, nil -} - -// big enough size to hold >1 OS pages in the buffer -const snapshotSendBufferSize = 32 * 1024 - -func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx()) - storageVersion := "" - if ver != nil { - storageVersion = ver.String() - } - snap := ms.bg.Backend().Snapshot() - pr, pw := io.Pipe() - - defer pr.Close() - - go func() { - snap.WriteTo(pw) - if err := snap.Close(); err != nil { - ms.lg.Warn("failed to close snapshot", zap.Error(err)) - } - pw.Close() - }() - - // record SHA digest of snapshot data - // used for integrity checks during snapshot restore operation - h := sha256.New() - - sent := int64(0) - total := snap.Size() - size := humanize.Bytes(uint64(total)) - - start := time.Now() - ms.lg.Info("sending database snapshot to client", - zap.Int64("total-bytes", total), - zap.String("size", size), - zap.String("storage-version", storageVersion), - ) - for total-sent > 0 { - // buffer just holds read bytes from stream - // response size is multiple of OS page size, fetched in boltdb - // e.g. 4*1024 - // NOTE: srv.Send does not wait until the message is received by the client. - // Therefore the buffer can not be safely reused between Send operations - buf := make([]byte, snapshotSendBufferSize) - - n, err := io.ReadFull(pr, buf) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return togRPCError(err) - } - sent += int64(n) - - // if total is x * snapshotSendBufferSize. it is possible that - // resp.RemainingBytes == 0 - // resp.Blob == zero byte but not nil - // does this make server response sent to client nil in proto - // and client stops receiving from snapshot stream before - // server sends snapshot SHA? - // No, the client will still receive non-nil response - // until server closes the stream with EOF - resp := &pb.SnapshotResponse{ - RemainingBytes: uint64(total - sent), - Blob: buf[:n], - Version: storageVersion, - } - if err = srv.Send(resp); err != nil { - return togRPCError(err) - } - h.Write(buf[:n]) - } - - // send SHA digest for integrity checks - // during snapshot restore operation - sha := h.Sum(nil) - - ms.lg.Info("sending database sha256 checksum to client", - zap.Int64("total-bytes", total), - zap.Int("checksum-size", len(sha)), - ) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion} - if err := srv.Send(hresp); err != nil { - return togRPCError(err) - } - - ms.lg.Info("successfully sent database snapshot to client", - zap.Int64("total-bytes", total), - zap.String("size", size), - zap.Duration("took", time.Since(start)), - zap.String("storage-version", storageVersion), - ) - return nil -} - -func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - h, rev, err := ms.hasher.Hash() - if err != nil { - return nil, togRPCError(err) - } - resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { - h, rev, err := ms.hasher.HashByRev(r.Revision) - if err != nil { - return nil, togRPCError(err) - } - - resp := &pb.HashKVResponse{ - Header: &pb.ResponseHeader{Revision: rev}, - Hash: h.Hash, - CompactRevision: h.CompactRevision, - HashRevision: h.Revision, - } - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := ms.a.Alarm(ctx, ar) - if err != nil { - return nil, togRPCError(err) - } - if resp.Header == nil { - resp.Header = &pb.ResponseHeader{} - } - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - hdr := &pb.ResponseHeader{} - ms.hdr.fill(hdr) - resp := &pb.StatusResponse{ - Header: hdr, - Version: version.Version, - Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.CommittedIndex(), - RaftAppliedIndex: ms.rg.AppliedIndex(), - RaftTerm: ms.rg.Term(), - DbSize: ms.bg.Backend().Size(), - DbSizeInUse: ms.bg.Backend().SizeInUse(), - IsLearner: ms.cs.IsLearner(), - } - if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil { - resp.StorageVersion = storageVersion.String() - } - if resp.Leader == raft.None { - resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error()) - } - for _, a := range ms.a.Alarms() { - resp.Errors = append(resp.Errors, a.String()) - } - return resp, nil -} - -func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { - if ms.rg.MemberId() != ms.rg.Leader() { - return nil, rpctypes.ErrGRPCNotLeader - } - - if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil { - return nil, togRPCError(err) - } - return &pb.MoveLeaderResponse{}, nil -} - -func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - resp, err := ms.d.Downgrade(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - resp.Header = &pb.ResponseHeader{} - ms.hdr.fill(resp.Header) - return resp, nil -} - -type authMaintenanceServer struct { - *maintenanceServer - *AuthAdmin -} - -func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Defragment(ctx, sr) -} - -func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - if err := ams.isPermitted(srv.Context()); err != nil { - return err - } - - return ams.maintenanceServer.Snapshot(sr, srv) -} - -func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Hash(ctx, r) -} - -func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - return ams.maintenanceServer.HashKV(ctx, r) -} - -func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Status(ctx, ar) -} - -func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.MoveLeader(ctx, tr) -} - -func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - if err := ams.isPermitted(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Downgrade(ctx, r) -} diff --git a/server/etcdserver/api/v3rpc/member.go b/server/etcdserver/api/v3rpc/member.go deleted file mode 100644 index 001eba9d4aa..00000000000 --- a/server/etcdserver/api/v3rpc/member.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" -) - -type ClusterServer struct { - cluster api.Cluster - server *etcdserver.EtcdServer -} - -func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { - return &ClusterServer{ - cluster: s.Cluster(), - server: s, - } -} - -func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - urls, err := types.NewURLs(r.PeerURLs) - if err != nil { - return nil, rpctypes.ErrGRPCMemberBadURLs - } - - now := time.Now() - var m *membership.Member - if r.IsLearner { - m = membership.NewMemberAsLearner("", urls, "", &now) - } else { - m = membership.NewMember("", urls, "", &now) - } - membs, merr := cs.server.AddMember(ctx, *m) - if merr != nil { - return nil, togRPCError(merr) - } - - return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ - ID: uint64(m.ID), - PeerURLs: m.PeerURLs, - IsLearner: m.IsLearner, - }, - Members: membersToProtoMembers(membs), - }, nil -} - -func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - membs, err := cs.server.RemoveMember(ctx, r.ID) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - m := membership.Member{ - ID: types.ID(r.ID), - RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, - } - membs, err := cs.server.UpdateMember(ctx, m) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - if r.Linearizable { - if err := cs.server.LinearizableReadNotify(ctx); err != nil { - return nil, togRPCError(err) - } - } - membs := membersToProtoMembers(cs.cluster.Members()) - return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil -} - -func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { - membs, err := cs.server.PromoteMember(ctx, r.ID) - if err != nil { - return nil, togRPCError(err) - } - return &pb.MemberPromoteResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.MemberId()), RaftTerm: cs.server.Term()} -} - -func membersToProtoMembers(membs []*membership.Member) []*pb.Member { - protoMembs := make([]*pb.Member, len(membs)) - for i := range membs { - protoMembs[i] = &pb.Member{ - Name: membs[i].Name, - ID: uint64(membs[i].ID), - PeerURLs: membs[i].PeerURLs, - ClientURLs: membs[i].ClientURLs, - IsLearner: membs[i].IsLearner, - } - } - return protoMembs -} diff --git a/server/etcdserver/api/v3rpc/metrics.go b/server/etcdserver/api/v3rpc/metrics.go deleted file mode 100644 index a4ee723c52f..00000000000 --- a/server/etcdserver/api/v3rpc/metrics.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/prometheus/client_golang/prometheus" - -var ( - sentBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_sent_bytes_total", - Help: "The total number of bytes sent to grpc clients.", - }) - - receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_received_bytes_total", - Help: "The total number of bytes received from grpc clients.", - }) - - streamFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "server_stream_failures_total", - Help: "The total number of stream failures from the local server.", - }, - []string{"Type", "API"}, - ) - - clientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "client_requests_total", - Help: "The total number of client requests per client version.", - }, - []string{"type", "client_api_version"}, - ) -) - -func init() { - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) - prometheus.MustRegister(streamFailures) - prometheus.MustRegister(clientRequests) -} diff --git a/server/etcdserver/api/v3rpc/quota.go b/server/etcdserver/api/v3rpc/quota.go deleted file mode 100644 index 21085188650..00000000000 --- a/server/etcdserver/api/v3rpc/quota.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/storage" -) - -type quotaKVServer struct { - pb.KVServer - qa quotaAlarmer -} - -type quotaAlarmer struct { - q storage.Quota - a Alarmer - id types.ID -} - -// check whether request satisfies the quota. If there is not enough space, -// ignore request and raise the free space alarm. -func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { - if qa.q.Available(r) { - return nil - } - req := &pb.AlarmRequest{ - MemberID: uint64(qa.id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - qa.a.Alarm(ctx, req) - return rpctypes.ErrGRPCNoSpace -} - -func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return "aKVServer{ - NewKVServer(s), - quotaAlarmer{newBackendQuota(s, "kv"), s, s.MemberId()}, - } -} - -func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Put(ctx, r) -} - -func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Txn(ctx, r) -} - -type quotaLeaseServer struct { - pb.LeaseServer - qa quotaAlarmer -} - -func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - if err := s.qa.check(ctx, cr); err != nil { - return nil, err - } - return s.LeaseServer.LeaseGrant(ctx, cr) -} - -func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return "aLeaseServer{ - NewLeaseServer(s), - quotaAlarmer{newBackendQuota(s, "lease"), s, s.MemberId()}, - } -} - -func newBackendQuota(s *etcdserver.EtcdServer, name string) storage.Quota { - return storage.NewBackendQuota(s.Logger(), s.Cfg.QuotaBackendBytes, s.Backend(), name) -} diff --git a/server/etcdserver/api/v3rpc/util.go b/server/etcdserver/api/v3rpc/util.go deleted file mode 100644 index 0fd607d6d61..00000000000 --- a/server/etcdserver/api/v3rpc/util.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "strings" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var toGRPCErrorMap = map[error]error{ - membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, - membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, - membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, - membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, - errors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, - errors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, - - mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, - mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, - errors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, - errors.ErrNoSpace: rpctypes.ErrGRPCNoSpace, - errors.ErrTooManyRequests: rpctypes.ErrTooManyRequests, - - errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader, - errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader, - errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, - errors.ErrStopped: rpctypes.ErrGRPCStopped, - errors.ErrTimeout: rpctypes.ErrGRPCTimeout, - errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, - errors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, - errors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, - errors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, - errors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, - errors.ErrCorrupt: rpctypes.ErrGRPCCorrupt, - errors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, - - errors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, - errors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, - version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, - version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, - version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, - - lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, - lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, - lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge, - - auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist, - auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist, - auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist, - auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty, - auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound, - auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist, - auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound, - auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty, - auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed, - auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven, - auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied, - auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted, - auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted, - auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled, - auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken, - auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt, - auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision, - - // In sync with status.FromContextError - context.Canceled: rpctypes.ErrGRPCCanceled, - context.DeadlineExceeded: rpctypes.ErrGRPCDeadlineExceeded, -} - -func togRPCError(err error) error { - // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded - if err == context.Canceled || err == context.DeadlineExceeded { - return err - } - grpcErr, ok := toGRPCErrorMap[err] - if !ok { - return status.Error(codes.Unknown, err.Error()) - } - return grpcErr -} - -func isClientCtxErr(ctxErr error, err error) bool { - if ctxErr != nil { - return true - } - - ev, ok := status.FromError(err) - if !ok { - return false - } - - switch ev.Code() { - case codes.Canceled, codes.DeadlineExceeded: - // client-side context cancel or deadline exceeded - // "rpc error: code = Canceled desc = context canceled" - // "rpc error: code = DeadlineExceeded desc = context deadline exceeded" - return true - case codes.Unavailable: - msg := ev.Message() - // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected") - // "rpc error: code = Unavailable desc = client disconnected" - if msg == "client disconnected" { - return true - } - // "grpc/transport.ClientTransport.CloseStream" on canceled streams - // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL") - if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") { - return true - } - } - return false -} - -// in v3.4, learner is allowed to serve serializable read and endpoint status -func isRPCSupportedForLearner(req interface{}) bool { - switch r := req.(type) { - case *pb.StatusRequest: - return true - case *pb.RangeRequest: - return r.Serializable - default: - return false - } -} diff --git a/server/etcdserver/api/v3rpc/util_test.go b/server/etcdserver/api/v3rpc/util_test.go deleted file mode 100644 index 3ffbbbb8cdb..00000000000 --- a/server/etcdserver/api/v3rpc/util_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "errors" - "testing" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestGRPCError(t *testing.T) { - tt := []struct { - err error - exp error - }{ - {err: mvcc.ErrCompacted, exp: rpctypes.ErrGRPCCompacted}, - {err: mvcc.ErrFutureRev, exp: rpctypes.ErrGRPCFutureRev}, - {err: context.Canceled, exp: context.Canceled}, - {err: context.DeadlineExceeded, exp: context.DeadlineExceeded}, - {err: errors.New("foo"), exp: status.Error(codes.Unknown, "foo")}, - } - for i := range tt { - if err := togRPCError(tt[i].err); err != tt[i].exp { - if _, ok := status.FromError(err); ok { - if err.Error() == tt[i].exp.Error() { - continue - } - } - t.Errorf("#%d: got %v, expected %v", i, err, tt[i].exp) - } - } -} diff --git a/server/etcdserver/api/v3rpc/validationfuzz_test.go b/server/etcdserver/api/v3rpc/validationfuzz_test.go deleted file mode 100644 index d921c9602b7..00000000000 --- a/server/etcdserver/api/v3rpc/validationfuzz_test.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "testing" - - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - txn "go.etcd.io/etcd/server/v3/etcdserver/txn" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -func FuzzTxnRangeRequest(f *testing.F) { - testcases := []pb.RangeRequest{ - { - Key: []byte{2}, - RangeEnd: []byte{2}, - Limit: 3, - Revision: 3, - SortOrder: 2, - SortTarget: 2, - }, - } - - for _, tc := range testcases { - soValue := pb.RangeRequest_SortOrder_value[tc.SortOrder.String()] - soTarget := pb.RangeRequest_SortTarget_value[tc.SortTarget.String()] - f.Add(tc.Key, tc.RangeEnd, tc.Limit, tc.Revision, soValue, soTarget) - } - - f.Fuzz(func(t *testing.T, - key []byte, - rangeEnd []byte, - limit int64, - revision int64, - sortOrder int32, - sortTarget int32, - ) { - fuzzRequest := &pb.RangeRequest{ - Key: key, - RangeEnd: rangeEnd, - Limit: limit, - SortOrder: pb.RangeRequest_SortOrder(sortOrder), - SortTarget: pb.RangeRequest_SortTarget(sortTarget), - } - - verifyCheck(t, func() error { - return checkRangeRequest(fuzzRequest) - }) - - execTransaction(t, &pb.RequestOp{ - Request: &pb.RequestOp_RequestRange{ - RequestRange: fuzzRequest, - }, - }) - }) -} - -func FuzzTxnPutRequest(f *testing.F) { - testcases := []pb.PutRequest{ - { - Key: []byte{2}, - Value: []byte{2}, - Lease: 2, - PrevKv: false, - IgnoreValue: false, - IgnoreLease: false, - }, - } - - for _, tc := range testcases { - f.Add(tc.Key, tc.Value, tc.Lease, tc.PrevKv, tc.IgnoreValue, tc.IgnoreLease) - } - - f.Fuzz(func(t *testing.T, - key []byte, - value []byte, - leaseValue int64, - prevKv bool, - ignoreValue bool, - IgnoreLease bool, - ) { - fuzzRequest := &pb.PutRequest{ - Key: key, - Value: value, - Lease: leaseValue, - PrevKv: prevKv, - IgnoreValue: ignoreValue, - IgnoreLease: IgnoreLease, - } - - verifyCheck(t, func() error { - return checkPutRequest(fuzzRequest) - }) - - execTransaction(t, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: fuzzRequest, - }, - }) - }) -} - -func FuzzTxnDeleteRangeRequest(f *testing.F) { - testcases := []pb.DeleteRangeRequest{ - { - Key: []byte{2}, - RangeEnd: []byte{2}, - PrevKv: false, - }, - } - - for _, tc := range testcases { - f.Add(tc.Key, tc.RangeEnd, tc.PrevKv) - } - - f.Fuzz(func(t *testing.T, - key []byte, - rangeEnd []byte, - prevKv bool, - ) { - fuzzRequest := &pb.DeleteRangeRequest{ - Key: key, - RangeEnd: rangeEnd, - PrevKv: prevKv, - } - - verifyCheck(t, func() error { - return checkDeleteRequest(fuzzRequest) - }) - - execTransaction(t, &pb.RequestOp{ - Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: fuzzRequest, - }, - }) - }) -} - -func verifyCheck(t *testing.T, check func() error) { - errCheck := check() - if errCheck != nil { - t.Skip("Validation not passing. Skipping the apply.") - } -} - -func execTransaction(t *testing.T, req *pb.RequestOp) { - b, _ := betesting.NewDefaultTmpBackend(t) - defer betesting.Close(t, b) - s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{}) - defer s.Close() - - // setup cancelled context - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - - request := &pb.TxnRequest{ - Success: []*pb.RequestOp{req}, - } - - _, _, err := txn.Txn(ctx, zaptest.NewLogger(t), request, false, s, &lease.FakeLessor{}) - if err != nil { - t.Skipf("Application erroring. %s", err.Error()) - } -} diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go deleted file mode 100644 index 5153007258d..00000000000 --- a/server/etcdserver/api/v3rpc/watch.go +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "context" - "io" - "math/rand" - "sync" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/verify" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "go.uber.org/zap" -) - -const minWatchProgressInterval = 100 * time.Millisecond - -type watchServer struct { - lg *zap.Logger - - clusterID int64 - memberID int64 - - maxRequestBytes int - - sg apply.RaftStatusGetter - watchable mvcc.WatchableKV - ag AuthGetter -} - -// NewWatchServer returns a new watch server. -func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { - srv := &watchServer{ - lg: s.Cfg.Logger, - - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.MemberId()), - - maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), - - sg: s, - watchable: s.Watchable(), - ag: s, - } - if srv.lg == nil { - srv.lg = zap.NewNop() - } - if s.Cfg.WatchProgressNotifyInterval > 0 { - if s.Cfg.WatchProgressNotifyInterval < minWatchProgressInterval { - srv.lg.Warn( - "adjusting watch progress notify interval to minimum period", - zap.Duration("min-watch-progress-notify-interval", minWatchProgressInterval), - ) - s.Cfg.WatchProgressNotifyInterval = minWatchProgressInterval - } - SetProgressReportInterval(s.Cfg.WatchProgressNotifyInterval) - } - return srv -} - -var ( - // External test can read this with GetProgressReportInterval() - // and change this to a small value to finish fast with - // SetProgressReportInterval(). - progressReportInterval = 10 * time.Minute - progressReportIntervalMu sync.RWMutex -) - -// GetProgressReportInterval returns the current progress report interval (for testing). -func GetProgressReportInterval() time.Duration { - progressReportIntervalMu.RLock() - interval := progressReportInterval - progressReportIntervalMu.RUnlock() - - // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not - // send progress notifications to watchers around the same time even when watchers - // are created around the same time (which is common when a client restarts itself). - jitter := time.Duration(rand.Int63n(int64(interval) / 10)) - - return interval + jitter -} - -// SetProgressReportInterval updates the current progress report interval (for testing). -func SetProgressReportInterval(newTimeout time.Duration) { - progressReportIntervalMu.Lock() - progressReportInterval = newTimeout - progressReportIntervalMu.Unlock() -} - -// We send ctrl response inside the read loop. We do not want -// send to block read, but we still want ctrl response we sent to -// be serialized. Thus we use a buffered chan to solve the problem. -// A small buffer should be OK for most cases, since we expect the -// ctrl requests are infrequent. -const ctrlStreamBufLen = 16 - -// serverWatchStream is an etcd server side stream. It receives requests -// from client side gRPC stream. It receives watch events from mvcc.WatchStream, -// and creates responses that forwarded to gRPC stream. -// It also forwards control message like watch created and canceled. -type serverWatchStream struct { - lg *zap.Logger - - clusterID int64 - memberID int64 - - maxRequestBytes int - - sg apply.RaftStatusGetter - watchable mvcc.WatchableKV - ag AuthGetter - - gRPCStream pb.Watch_WatchServer - watchStream mvcc.WatchStream - ctrlStream chan *pb.WatchResponse - - // mu protects progress, prevKV, fragment - mu sync.RWMutex - // tracks the watchID that stream might need to send progress to - // TODO: combine progress and prevKV into a single struct? - progress map[mvcc.WatchID]bool - // record watch IDs that need return previous key-value pair - prevKV map[mvcc.WatchID]bool - // records fragmented watch IDs - fragment map[mvcc.WatchID]bool - - // closec indicates the stream is closed. - closec chan struct{} - - // wg waits for the send loop to complete - wg sync.WaitGroup -} - -func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { - sws := serverWatchStream{ - lg: ws.lg, - - clusterID: ws.clusterID, - memberID: ws.memberID, - - maxRequestBytes: ws.maxRequestBytes, - - sg: ws.sg, - watchable: ws.watchable, - ag: ws.ag, - - gRPCStream: stream, - watchStream: ws.watchable.NewWatchStream(), - // chan for sending control response like watcher created and canceled. - ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), - - progress: make(map[mvcc.WatchID]bool), - prevKV: make(map[mvcc.WatchID]bool), - fragment: make(map[mvcc.WatchID]bool), - - closec: make(chan struct{}), - } - - sws.wg.Add(1) - go func() { - sws.sendLoop() - sws.wg.Done() - }() - - errc := make(chan error, 1) - // Ideally recvLoop would also use sws.wg to signal its completion - // but when stream.Context().Done() is closed, the stream's recv - // may continue to block since it uses a different context, leading to - // deadlock when calling sws.close(). - go func() { - if rerr := sws.recvLoop(); rerr != nil { - if isClientCtxErr(stream.Context().Err(), rerr) { - sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr)) - } else { - sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr)) - streamFailures.WithLabelValues("receive", "watch").Inc() - } - errc <- rerr - } - }() - - // TODO: There's a race here. When a stream is closed (e.g. due to a cancellation), - // the underlying error (e.g. a gRPC stream error) may be returned and handled - // through errc if the recv goroutine finishes before the send goroutine. - // When the recv goroutine wins, the stream error is retained. When recv loses - // the race, the underlying error is lost (unless the root error is propagated - // through Context.Err() which is not always the case (as callers have to decide - // to implement a custom context to do so). The stdlib context package builtins - // may be insufficient to carry semantically useful errors around and should be - // revisited. - select { - case err = <-errc: - if err == context.Canceled { - err = rpctypes.ErrGRPCWatchCanceled - } - close(sws.ctrlStream) - case <-stream.Context().Done(): - err = stream.Context().Err() - if err == context.Canceled { - err = rpctypes.ErrGRPCWatchCanceled - } - } - - sws.close() - return err -} - -func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error { - authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) - if err != nil { - return err - } - if authInfo == nil { - // if auth is enabled, IsRangePermitted() can cause an error - authInfo = &auth.AuthInfo{} - } - return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) -} - -func (sws *serverWatchStream) recvLoop() error { - for { - req, err := sws.gRPCStream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - if uv.CreateRequest == nil { - break - } - - creq := uv.CreateRequest - if len(creq.Key) == 0 { - // \x00 is the smallest key - creq.Key = []byte{0} - } - if len(creq.RangeEnd) == 0 { - // force nil since watchstream.Watch distinguishes - // between nil and []byte{} for single key / >= - creq.RangeEnd = nil - } - if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { - // support >= key queries - creq.RangeEnd = []byte{} - } - - err := sws.isWatchPermitted(creq) - if err != nil { - var cancelReason string - switch err { - case auth.ErrInvalidAuthToken: - cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error() - case auth.ErrAuthOldRevision: - cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error() - case auth.ErrUserEmpty: - cancelReason = rpctypes.ErrGRPCUserEmpty.Error() - default: - if err != auth.ErrPermissionDenied { - sws.lg.Error("unexpected error code", zap.Error(err)) - } - cancelReason = rpctypes.ErrGRPCPermissionDenied.Error() - } - - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: clientv3.InvalidWatchID, - Canceled: true, - Created: true, - CancelReason: cancelReason, - } - - select { - case sws.ctrlStream <- wr: - continue - case <-sws.closec: - return nil - } - } - - filters := FiltersFromRequest(creq) - - wsrev := sws.watchStream.Rev() - rev := creq.StartRevision - if rev == 0 { - rev = wsrev + 1 - } - id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...) - if err == nil { - sws.mu.Lock() - if creq.ProgressNotify { - sws.progress[id] = true - } - if creq.PrevKv { - sws.prevKV[id] = true - } - if creq.Fragment { - sws.fragment[id] = true - } - sws.mu.Unlock() - } else { - id = clientv3.InvalidWatchID - } - - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wsrev), - WatchId: int64(id), - Created: true, - Canceled: err != nil, - } - if err != nil { - wr.CancelReason = err.Error() - } - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - return nil - } - - case *pb.WatchRequest_CancelRequest: - if uv.CancelRequest != nil { - id := uv.CancelRequest.WatchId - err := sws.watchStream.Cancel(mvcc.WatchID(id)) - if err == nil { - sws.ctrlStream <- &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: id, - Canceled: true, - } - sws.mu.Lock() - delete(sws.progress, mvcc.WatchID(id)) - delete(sws.prevKV, mvcc.WatchID(id)) - delete(sws.fragment, mvcc.WatchID(id)) - sws.mu.Unlock() - } - } - case *pb.WatchRequest_ProgressRequest: - if uv.ProgressRequest != nil { - sws.ctrlStream <- &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: clientv3.InvalidWatchID, // response is not associated with any WatchId and will be broadcast to all watch channels - } - } - default: - // we probably should not shutdown the entire stream when - // receive an invalid command. - // so just do nothing instead. - sws.lg.Sugar().Infof("invalid watch request type %T received in gRPC stream", uv) - continue - } - } -} - -func (sws *serverWatchStream) sendLoop() { - // watch ids that are currently active - ids := make(map[mvcc.WatchID]struct{}) - // watch responses pending on a watch id creation message - pending := make(map[mvcc.WatchID][]*pb.WatchResponse) - - interval := GetProgressReportInterval() - progressTicker := time.NewTicker(interval) - - defer func() { - progressTicker.Stop() - // drain the chan to clean up pending events - for ws := range sws.watchStream.Chan() { - mvcc.ReportEventReceived(len(ws.Events)) - } - for _, wrs := range pending { - for _, ws := range wrs { - mvcc.ReportEventReceived(len(ws.Events)) - } - } - }() - - for { - select { - case wresp, ok := <-sws.watchStream.Chan(): - if !ok { - return - } - - // TODO: evs is []mvccpb.Event type - // either return []*mvccpb.Event from the mvcc package - // or define protocol buffer with []mvccpb.Event. - evs := wresp.Events - events := make([]*mvccpb.Event, len(evs)) - sws.mu.RLock() - needPrevKV := sws.prevKV[wresp.WatchID] - sws.mu.RUnlock() - for i := range evs { - events[i] = &evs[i] - if needPrevKV && !IsCreateEvent(evs[i]) { - opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} - r, err := sws.watchable.Range(context.TODO(), evs[i].Kv.Key, nil, opt) - if err == nil && len(r.KVs) != 0 { - events[i].PrevKv = &(r.KVs[0]) - } - } - } - - canceled := wresp.CompactRevision != 0 - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wresp.Revision), - WatchId: int64(wresp.WatchID), - Events: events, - CompactRevision: wresp.CompactRevision, - Canceled: canceled, - } - - if _, okID := ids[wresp.WatchID]; !okID { - // buffer if id not yet announced - wrs := append(pending[wresp.WatchID], wr) - pending[wresp.WatchID] = wrs - continue - } - - mvcc.ReportEventReceived(len(evs)) - - sws.mu.RLock() - fragmented, ok := sws.fragment[wresp.WatchID] - sws.mu.RUnlock() - - var serr error - if !fragmented && !ok { - serr = sws.gRPCStream.Send(wr) - } else { - serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send) - } - - if serr != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) { - sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr)) - } else { - sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr)) - streamFailures.WithLabelValues("send", "watch").Inc() - } - return - } - - sws.mu.Lock() - if len(evs) > 0 && sws.progress[wresp.WatchID] { - // elide next progress update if sent a key update - sws.progress[wresp.WatchID] = false - } - sws.mu.Unlock() - - case c, ok := <-sws.ctrlStream: - if !ok { - return - } - - if err := sws.gRPCStream.Send(c); err != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err)) - } else { - sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err)) - streamFailures.WithLabelValues("send", "watch").Inc() - } - return - } - - // track id creation - wid := mvcc.WatchID(c.WatchId) - - verify.Assert(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID, "unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID) - - if c.Canceled && wid != clientv3.InvalidWatchID { - delete(ids, wid) - continue - } - if c.Created { - // flush buffered events - ids[wid] = struct{}{} - for _, v := range pending[wid] { - mvcc.ReportEventReceived(len(v.Events)) - if err := sws.gRPCStream.Send(v); err != nil { - if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err)) - } else { - sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err)) - streamFailures.WithLabelValues("send", "watch").Inc() - } - return - } - } - delete(pending, wid) - } - - case <-progressTicker.C: - sws.mu.Lock() - for id, ok := range sws.progress { - if ok { - sws.watchStream.RequestProgress(id) - } - sws.progress[id] = true - } - sws.mu.Unlock() - - case <-sws.closec: - return - } - } -} - -func IsCreateEvent(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision -} - -func sendFragments( - wr *pb.WatchResponse, - maxRequestBytes int, - sendFunc func(*pb.WatchResponse) error) error { - // no need to fragment if total request size is smaller - // than max request limit or response contains only one event - if wr.Size() < maxRequestBytes || len(wr.Events) < 2 { - return sendFunc(wr) - } - - ow := *wr - ow.Events = make([]*mvccpb.Event, 0) - ow.Fragment = true - - var idx int - for { - cur := ow - for _, ev := range wr.Events[idx:] { - cur.Events = append(cur.Events, ev) - if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes { - cur.Events = cur.Events[:len(cur.Events)-1] - break - } - idx++ - } - if idx == len(wr.Events) { - // last response has no more fragment - cur.Fragment = false - } - if err := sendFunc(&cur); err != nil { - return err - } - if !cur.Fragment { - break - } - } - return nil -} - -func (sws *serverWatchStream) close() { - sws.watchStream.Close() - close(sws.closec) - sws.wg.Wait() -} - -func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(sws.clusterID), - MemberId: uint64(sws.memberID), - Revision: rev, - RaftTerm: sws.sg.Term(), - } -} - -func filterNoDelete(e mvccpb.Event) bool { - return e.Type == mvccpb.DELETE -} - -func filterNoPut(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT -} - -// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request. -func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { - filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) - for _, ft := range creq.Filters { - switch ft { - case pb.WatchCreateRequest_NOPUT: - filters = append(filters, filterNoPut) - case pb.WatchCreateRequest_NODELETE: - filters = append(filters, filterNoDelete) - default: - } - } - return filters -} diff --git a/server/etcdserver/api/v3rpc/watch_test.go b/server/etcdserver/api/v3rpc/watch_test.go deleted file mode 100644 index bd3f4943b2e..00000000000 --- a/server/etcdserver/api/v3rpc/watch_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "bytes" - "math" - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" -) - -func TestSendFragment(t *testing.T) { - tt := []struct { - wr *pb.WatchResponse - maxRequestBytes int - fragments int - werr error - }{ - { // large limit should not fragment - wr: createResponse(100, 1), - maxRequestBytes: math.MaxInt32, - fragments: 1, - }, - { // large limit for two messages, expect no fragment - wr: createResponse(10, 2), - maxRequestBytes: 50, - fragments: 1, - }, - { // limit is small but only one message, expect no fragment - wr: createResponse(1024, 1), - maxRequestBytes: 1, - fragments: 1, - }, - { // exceed limit only when combined, expect fragments - wr: createResponse(11, 5), - maxRequestBytes: 20, - fragments: 5, - }, - { // 5 events with each event exceeding limits, expect fragments - wr: createResponse(15, 5), - maxRequestBytes: 10, - fragments: 5, - }, - { // 4 events with some combined events exceeding limits - wr: createResponse(10, 4), - maxRequestBytes: 35, - fragments: 2, - }, - } - - for i := range tt { - fragmentedResp := make([]*pb.WatchResponse, 0) - testSend := func(wr *pb.WatchResponse) error { - fragmentedResp = append(fragmentedResp, wr) - return nil - } - err := sendFragments(tt[i].wr, tt[i].maxRequestBytes, testSend) - if err != tt[i].werr { - t.Errorf("#%d: expected error %v, got %v", i, tt[i].werr, err) - } - got := len(fragmentedResp) - if got != tt[i].fragments { - t.Errorf("#%d: expected response number %d, got %d", i, tt[i].fragments, got) - } - if got > 0 && fragmentedResp[got-1].Fragment { - t.Errorf("#%d: expected fragment=false in last response, got %+v", i, fragmentedResp[got-1]) - } - } -} - -func createResponse(dataSize, events int) (resp *pb.WatchResponse) { - resp = &pb.WatchResponse{Events: make([]*mvccpb.Event, events)} - for i := range resp.Events { - resp.Events[i] = &mvccpb.Event{ - Kv: &mvccpb.KeyValue{ - Key: bytes.Repeat([]byte("a"), dataSize), - }, - } - } - return resp -} diff --git a/server/etcdserver/apply/apply.go b/server/etcdserver/apply/apply.go deleted file mode 100644 index 058870b1dc2..00000000000 --- a/server/etcdserver/apply/apply.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package apply - -import ( - "context" - - "go.uber.org/zap" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - mvcctxn "go.etcd.io/etcd/server/v3/etcdserver/txn" - "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/etcd/server/v3/lease" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "github.com/coreos/go-semver/semver" - "github.com/gogo/protobuf/proto" -) - -const ( - v3Version = "v3" -) - -// RaftStatusGetter represents etcd server and Raft progress. -type RaftStatusGetter interface { - MemberId() types.ID - Leader() types.ID - CommittedIndex() uint64 - AppliedIndex() uint64 - Term() uint64 -} - -type Result struct { - Resp proto.Message - Err error - // Physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently, only used for - // Compaction requests. - Physc <-chan struct{} - Trace *traceutil.Trace -} - -type applyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result - -// applierV3 is the interface for processing V3 raft messages -type applierV3 interface { - // Apply executes the generic portion of application logic for the current applier, but - // delegates the actual execution to the applyFunc method. - Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result - - Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) - Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) - - LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) - - Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) - - Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) - - AuthEnable() (*pb.AuthEnableResponse, error) - AuthDisable() (*pb.AuthDisableResponse, error) - AuthStatus() (*pb.AuthStatusResponse, error) - - UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - - // processing internal V3 raft request - - ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) - ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) - DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) -} - -type SnapshotServer interface { - ForceSnapshot() -} - -type applierV3backend struct { - lg *zap.Logger - kv mvcc.KV - alarmStore *v3alarm.AlarmStore - authStore auth.AuthStore - lessor lease.Lessor - cluster *membership.RaftCluster - raftStatus RaftStatusGetter - snapshotServer SnapshotServer - consistentIndex cindex.ConsistentIndexer - - txnModeWriteWithSharedBuffer bool -} - -func newApplierV3Backend( - lg *zap.Logger, - kv mvcc.KV, - alarmStore *v3alarm.AlarmStore, - authStore auth.AuthStore, - lessor lease.Lessor, - cluster *membership.RaftCluster, - raftStatus RaftStatusGetter, - snapshotServer SnapshotServer, - consistentIndex cindex.ConsistentIndexer, - txnModeWriteWithSharedBuffer bool) applierV3 { - return &applierV3backend{ - lg: lg, - kv: kv, - alarmStore: alarmStore, - authStore: authStore, - lessor: lessor, - cluster: cluster, - raftStatus: raftStatus, - snapshotServer: snapshotServer, - consistentIndex: consistentIndex, - txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer} -} - -func (a *applierV3backend) Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result { - return applyFunc(ctx, r, shouldApplyV3) -} - -func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - return mvcctxn.Put(ctx, a.lg, a.lessor, a.kv, txn, p) -} - -func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return mvcctxn.DeleteRange(a.kv, txn, dr) -} - -func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - return mvcctxn.Range(ctx, a.lg, a.kv, txn, r) -} - -func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return mvcctxn.Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) -} - -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - resp := &pb.CompactionResponse{} - resp.Header = &pb.ResponseHeader{} - trace := traceutil.New("compact", - a.lg, - traceutil.Field{Key: "revision", Value: compaction.Revision}, - ) - - ch, err := a.kv.Compact(trace, compaction.Revision) - if err != nil { - return nil, ch, nil, err - } - // get the current revision. which key to get is not important. - rr, _ := a.kv.Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) - resp.Header.Revision = rr.Rev - return resp, ch, trace, err -} - -func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) - resp := &pb.LeaseGrantResponse{} - if err == nil { - resp.ID = int64(l.ID) - resp.TTL = l.TTL() - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: a.newHeader()}, err -} - -func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { - for _, c := range lc.Checkpoints { - err := a.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) - if err != nil { - return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, err - } - } - return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, nil -} - -func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp := &pb.AlarmResponse{} - - switch ar.Action { - case pb.AlarmRequest_GET: - resp.Alarms = a.alarmStore.Get(ar.Alarm) - case pb.AlarmRequest_ACTIVATE: - if ar.Alarm == pb.AlarmType_NONE { - break - } - m := a.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Inc() - case pb.AlarmRequest_DEACTIVATE: - m := a.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Dec() - default: - return nil, nil - } - return resp, nil -} - -type applierV3Capped struct { - applierV3 - q serverstorage.BackendQuota -} - -// newApplierV3Capped creates an applyV3 that will reject Puts and transactions -// with Puts so that the number of keys in the store is capped. -func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } - -func (a *applierV3Capped) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, errors.ErrNoSpace -} - -func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if a.q.Cost(r) > 0 { - return nil, nil, errors.ErrNoSpace - } - return a.applierV3.Txn(ctx, r) -} - -func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, errors.ErrNoSpace -} - -func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.authStore.AuthEnable() - if err != nil { - return nil, err - } - return &pb.AuthEnableResponse{Header: a.newHeader()}, nil -} - -func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.authStore.AuthDisable() - return &pb.AuthDisableResponse{Header: a.newHeader()}, nil -} - -func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) { - enabled := a.authStore.IsAuthEnabled() - authRevision := a.authStore.Revision() - return &pb.AuthStatusResponse{Header: a.newHeader(), Enabled: enabled, AuthRevision: authRevision}, nil -} - -func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(context.Background(), auth.AuthenticateParamIndex{}, a.consistentIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) - resp, err := a.authStore.Authenticate(ctx, r.Name, r.Password) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.authStore.UserAdd(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.authStore.UserDelete(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.authStore.UserChangePassword(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.authStore.UserGrantRole(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.authStore.UserGet(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.authStore.UserRevokeRole(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.authStore.RoleAdd(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.authStore.RoleGrantPermission(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.authStore.RoleGet(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.authStore.RoleRevokePermission(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.authStore.RoleDelete(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.authStore.UserList(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.authStore.RoleList(r) - if resp != nil { - resp.Header = a.newHeader() - } - return resp, err -} - -func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - prevVersion := a.cluster.Version() - newVersion := semver.Must(semver.NewVersion(r.Ver)) - a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3) - // Force snapshot after cluster version downgrade. - if prevVersion != nil && newVersion.LessThan(*prevVersion) { - lg := a.lg - if lg != nil { - lg.Info("Cluster version downgrade detected, forcing snapshot", - zap.String("prev-cluster-version", prevVersion.String()), - zap.String("new-cluster-version", newVersion.String()), - ) - } - a.snapshotServer.ForceSnapshot() - } -} - -func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - a.cluster.UpdateAttributes( - types.ID(r.Member_ID), - membership.Attributes{ - Name: r.MemberAttributes.Name, - ClientURLs: r.MemberAttributes.ClientUrls, - }, - shouldApplyV3, - ) -} - -func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - d := version.DowngradeInfo{Enabled: false} - if r.Enabled { - d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} - } - a.cluster.SetDowngradeInfo(&d, shouldApplyV3) -} - -type quotaApplierV3 struct { - applierV3 - q serverstorage.Quota -} - -func newQuotaApplierV3(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, app applierV3) applierV3 { - return "aApplierV3{app, serverstorage.NewBackendQuota(lg, quotaBackendBytesCfg, be, "v3-applier")} -} - -func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - ok := a.q.Available(p) - resp, trace, err := a.applierV3.Put(ctx, txn, p) - if err == nil && !ok { - err = errors.ErrNoSpace - } - return resp, trace, err -} - -func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - ok := a.q.Available(rt) - resp, trace, err := a.applierV3.Txn(ctx, rt) - if err == nil && !ok { - err = errors.ErrNoSpace - } - return resp, trace, err -} - -func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - ok := a.q.Available(lc) - resp, err := a.applierV3.LeaseGrant(lc) - if err == nil && !ok { - err = errors.ErrNoSpace - } - return resp, err -} - -func (a *applierV3backend) newHeader() *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(a.cluster.ID()), - MemberId: uint64(a.raftStatus.MemberId()), - Revision: a.kv.Rev(), - RaftTerm: a.raftStatus.Term(), - } -} diff --git a/server/etcdserver/apply/apply_auth.go b/server/etcdserver/apply/apply_auth.go deleted file mode 100644 index 61f9f8892d2..00000000000 --- a/server/etcdserver/apply/apply_auth.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package apply - -import ( - "context" - "sync" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/txn" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -type authApplierV3 struct { - applierV3 - as auth.AuthStore - lessor lease.Lessor - - // mu serializes Apply so that user isn't corrupted and so that - // serialized requests don't leak data from TOCTOU errors - mu sync.Mutex - - authInfo auth.AuthInfo -} - -func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 { - return &authApplierV3{applierV3: base, as: as, lessor: lessor} -} - -func (aa *authApplierV3) Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result { - aa.mu.Lock() - defer aa.mu.Unlock() - if r.Header != nil { - // backward-compatible with pre-3.0 releases when internalRaftRequest - // does not have header field - aa.authInfo.Username = r.Header.Username - aa.authInfo.Revision = r.Header.AuthRevision - } - if needAdminPermission(r) { - if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &Result{Err: err} - } - } - ret := aa.applierV3.Apply(ctx, r, shouldApplyV3, applyFunc) - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return ret -} - -func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { - return nil, nil, err - } - - if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil { - // The specified lease is already attached with a key that cannot - // be written by this user. It means the user cannot revoke the - // lease so attaching the lease to the newly written key should - // be forbidden. - return nil, nil, err - } - - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) - if err != nil { - return nil, nil, err - } - } - return aa.applierV3.Put(ctx, txn, r) -} - -func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - return aa.applierV3.Range(ctx, txn, r) -} - -func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) - if err != nil { - return nil, err - } - } - - return aa.applierV3.DeleteRange(txn, r) -} - -func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { - return nil, nil, err - } - return aa.applierV3.Txn(ctx, rt) -} - -func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil { - return nil, err - } - return aa.applierV3.LeaseRevoke(lc) -} - -func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error { - l := aa.lessor.Lookup(leaseID) - if l != nil { - for _, key := range l.Keys() { - if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil { - return err - } - } - } - - return nil -} - -func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - err := aa.as.IsAdminPermitted(&aa.authInfo) - if err != nil && r.Name != aa.authInfo.Username { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &pb.AuthUserGetResponse{}, err - } - - return aa.applierV3.UserGet(r) -} - -func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - err := aa.as.IsAdminPermitted(&aa.authInfo) - if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &pb.AuthRoleGetResponse{}, err - } - - return aa.applierV3.RoleGet(r) -} - -func needAdminPermission(r *pb.InternalRaftRequest) bool { - switch { - case r.AuthEnable != nil: - return true - case r.AuthDisable != nil: - return true - case r.AuthStatus != nil: - return true - case r.AuthUserAdd != nil: - return true - case r.AuthUserDelete != nil: - return true - case r.AuthUserChangePassword != nil: - return true - case r.AuthUserGrantRole != nil: - return true - case r.AuthUserRevokeRole != nil: - return true - case r.AuthRoleAdd != nil: - return true - case r.AuthRoleGrantPermission != nil: - return true - case r.AuthRoleRevokePermission != nil: - return true - case r.AuthRoleDelete != nil: - return true - case r.AuthUserList != nil: - return true - case r.AuthRoleList != nil: - return true - default: - return false - } -} diff --git a/server/etcdserver/apply/corrupt.go b/server/etcdserver/apply/corrupt.go deleted file mode 100644 index 040f294aeba..00000000000 --- a/server/etcdserver/apply/corrupt.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package apply - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -type applierV3Corrupt struct { - applierV3 -} - -func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } - -func (a *applierV3Corrupt) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) Range(_ context.Context, _ mvcc.TxnRead, _ *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) DeleteRange(_ mvcc.TxnWrite, _ *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) Txn(_ context.Context, _ *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return nil, nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) Compaction(_ *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - return nil, nil, nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, errors.ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseRevoke(_ *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, errors.ErrCorrupt -} diff --git a/server/etcdserver/apply/metrics.go b/server/etcdserver/apply/metrics.go deleted file mode 100644 index 34578578b4a..00000000000 --- a/server/etcdserver/apply/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package apply - -import "github.com/prometheus/client_golang/prometheus" - -var ( - alarms = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "alarms", - Help: "Alarms for every member in cluster. 1 for 'server_id' label with current ID. 2 for 'alarm_type' label with type of this alarm", - }, - []string{"server_id", "alarm_type"}) -) - -func init() { - prometheus.MustRegister(alarms) -} diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go deleted file mode 100644 index 201defa385b..00000000000 --- a/server/etcdserver/apply/uber_applier.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package apply - -import ( - "context" - "time" - - "go.uber.org/zap" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/etcdserver/txn" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -type UberApplier interface { - Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result -} - -type uberApplier struct { - lg *zap.Logger - - alarmStore *v3alarm.AlarmStore - warningApplyDuration time.Duration - - // This is the applier that is taking in consideration current alarms - applyV3 applierV3 - - // This is the applier used for wrapping when alarms change - applyV3base applierV3 -} - -func NewUberApplier( - lg *zap.Logger, - be backend.Backend, - kv mvcc.KV, - alarmStore *v3alarm.AlarmStore, - authStore auth.AuthStore, - lessor lease.Lessor, - cluster *membership.RaftCluster, - raftStatus RaftStatusGetter, - snapshotServer SnapshotServer, - consistentIndex cindex.ConsistentIndexer, - warningApplyDuration time.Duration, - txnModeWriteWithSharedBuffer bool, - quotaBackendBytesCfg int64) UberApplier { - applyV3base_ := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg) - - ua := &uberApplier{ - lg: lg, - alarmStore: alarmStore, - warningApplyDuration: warningApplyDuration, - applyV3: applyV3base_, - applyV3base: applyV3base_, - } - ua.restoreAlarms() - return ua -} - -func newApplierV3( - lg *zap.Logger, - be backend.Backend, - kv mvcc.KV, - alarmStore *v3alarm.AlarmStore, - authStore auth.AuthStore, - lessor lease.Lessor, - cluster *membership.RaftCluster, - raftStatus RaftStatusGetter, - snapshotServer SnapshotServer, - consistentIndex cindex.ConsistentIndexer, - txnModeWriteWithSharedBuffer bool, - quotaBackendBytesCfg int64) applierV3 { - applierBackend := newApplierV3Backend(lg, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer) - return newAuthApplierV3( - authStore, - newQuotaApplierV3(lg, quotaBackendBytesCfg, be, applierBackend), - lessor, - ) -} - -func (a *uberApplier) restoreAlarms() { - noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 - corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0 - a.applyV3 = a.applyV3base - if noSpaceAlarms { - a.applyV3 = newApplierV3Capped(a.applyV3) - } - if corruptAlarms { - a.applyV3 = newApplierV3Corrupt(a.applyV3) - } -} - -func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result { - // We first execute chain of Apply() calls down the hierarchy: - // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), - // then dispatch() unpacks the request to a specific method (like Put), - // that gets executed down the hierarchy again: - // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))). - return a.applyV3.Apply(context.TODO(), r, shouldApplyV3, a.dispatch) -} - -// dispatch translates the request (r) into appropriate call (like Put) on -// the underlying applyV3 object. -func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result { - op := "unknown" - ar := &Result{} - defer func(start time.Time) { - success := ar.Err == nil || ar.Err == mvcc.ErrCompacted - txn.ApplySecObserve(v3Version, op, success, time.Since(start)) - txn.WarnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) - if !success { - txn.WarnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) - } - }(time.Now()) - - switch { - case r.ClusterVersionSet != nil: // Implemented in 3.5.x - op = "ClusterVersionSet" - a.applyV3.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) - return ar - case r.ClusterMemberAttrSet != nil: - op = "ClusterMemberAttrSet" // Implemented in 3.5.x - a.applyV3.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) - return ar - case r.DowngradeInfoSet != nil: - op = "DowngradeInfoSet" // Implemented in 3.5.x - a.applyV3.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) - return ar - } - - if !shouldApplyV3 { - return nil - } - - switch { - case r.Range != nil: - op = "Range" - ar.Resp, ar.Err = a.applyV3.Range(ctx, nil, r.Range) - case r.Put != nil: - op = "Put" - ar.Resp, ar.Trace, ar.Err = a.applyV3.Put(ctx, nil, r.Put) - case r.DeleteRange != nil: - op = "DeleteRange" - ar.Resp, ar.Err = a.applyV3.DeleteRange(nil, r.DeleteRange) - case r.Txn != nil: - op = "Txn" - ar.Resp, ar.Trace, ar.Err = a.applyV3.Txn(ctx, r.Txn) - case r.Compaction != nil: - op = "Compaction" - ar.Resp, ar.Physc, ar.Trace, ar.Err = a.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - op = "LeaseGrant" - ar.Resp, ar.Err = a.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - op = "LeaseRevoke" - ar.Resp, ar.Err = a.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.LeaseCheckpoint != nil: - op = "LeaseCheckpoint" - ar.Resp, ar.Err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) - case r.Alarm != nil: - op = "Alarm" - ar.Resp, ar.Err = a.Alarm(r.Alarm) - case r.Authenticate != nil: - op = "Authenticate" - ar.Resp, ar.Err = a.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - op = "AuthEnable" - ar.Resp, ar.Err = a.applyV3.AuthEnable() - case r.AuthDisable != nil: - op = "AuthDisable" - ar.Resp, ar.Err = a.applyV3.AuthDisable() - case r.AuthStatus != nil: - ar.Resp, ar.Err = a.applyV3.AuthStatus() - case r.AuthUserAdd != nil: - op = "AuthUserAdd" - ar.Resp, ar.Err = a.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - op = "AuthUserDelete" - ar.Resp, ar.Err = a.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - op = "AuthUserChangePassword" - ar.Resp, ar.Err = a.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - op = "AuthUserGrantRole" - ar.Resp, ar.Err = a.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - op = "AuthUserGet" - ar.Resp, ar.Err = a.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - op = "AuthUserRevokeRole" - ar.Resp, ar.Err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - op = "AuthRoleAdd" - ar.Resp, ar.Err = a.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - op = "AuthRoleGrantPermission" - ar.Resp, ar.Err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - op = "AuthRoleGet" - ar.Resp, ar.Err = a.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - op = "AuthRoleRevokePermission" - ar.Resp, ar.Err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - op = "AuthRoleDelete" - ar.Resp, ar.Err = a.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - op = "AuthUserList" - ar.Resp, ar.Err = a.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - op = "AuthRoleList" - ar.Resp, ar.Err = a.applyV3.RoleList(r.AuthRoleList) - default: - a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) - } - return ar -} - -func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := a.applyV3.Alarm(ar) - - if ar.Action == pb.AlarmRequest_ACTIVATE || - ar.Action == pb.AlarmRequest_DEACTIVATE { - a.restoreAlarms() - } - return resp, err -} diff --git a/server/etcdserver/bootstrap.go b/server/etcdserver/bootstrap.go deleted file mode 100644 index e416fd079c2..00000000000 --- a/server/etcdserver/bootstrap.go +++ /dev/null @@ -1,713 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "strings" - "time" - - "github.com/coreos/go-semver/semver" - "github.com/dustin/go-humanize" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - servererrors "go.etcd.io/etcd/server/v3/etcdserver/errors" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) { - if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - cfg.Logger.Warn( - "exceeded recommended request limit", - zap.Uint("max-request-bytes", cfg.MaxRequestBytes), - zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), - zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), - zap.String("recommended-request-size", recommendedMaxRequestBytesString), - ) - } - - if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } - ss := bootstrapSnapshot(cfg) - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) - if err != nil { - return nil, err - } - - haveWAL := wal.Exist(cfg.WALDir()) - st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) - backend, err := bootstrapBackend(cfg, haveWAL, st, ss) - if err != nil { - return nil, err - } - var bwal *bootstrappedWAL - - if haveWAL { - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot) - } - - cluster, err := bootstrapCluster(cfg, bwal, prt) - if err != nil { - backend.Close() - return nil, err - } - - s, err := bootstrapStorage(cfg, st, backend, bwal, cluster) - if err != nil { - backend.Close() - return nil, err - } - - if err = cluster.Finalize(cfg, s); err != nil { - backend.Close() - return nil, err - } - raft := bootstrapRaft(cfg, cluster, s.wal) - return &bootstrappedServer{ - prt: prt, - ss: ss, - storage: s, - cluster: cluster, - raft: raft, - }, nil -} - -type bootstrappedServer struct { - storage *bootstrappedStorage - cluster *bootstrapedCluster - raft *bootstrappedRaft - prt http.RoundTripper - ss *snap.Snapshotter -} - -func (s *bootstrappedServer) Close() { - s.storage.Close() -} - -type bootstrappedStorage struct { - backend *bootstrappedBackend - wal *bootstrappedWAL - st v2store.Store -} - -func (s *bootstrappedStorage) Close() { - s.backend.Close() -} - -type bootstrappedBackend struct { - beHooks *serverstorage.BackendHooks - be backend.Backend - ci cindex.ConsistentIndexer - beExist bool - snapshot *raftpb.Snapshot -} - -func (s *bootstrappedBackend) Close() { - s.be.Close() -} - -type bootstrapedCluster struct { - remotes []*membership.Member - cl *membership.RaftCluster - nodeID types.ID -} - -type bootstrappedRaft struct { - lg *zap.Logger - heartbeat time.Duration - - peers []raft.Peer - config *raft.Config - storage *raft.MemoryStorage -} - -func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) (b *bootstrappedStorage, err error) { - if wal == nil { - wal = bootstrapNewWAL(cfg, cl) - } - - return &bootstrappedStorage{ - backend: be, - st: st, - wal: wal, - }, nil -} - -func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter { - if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil { - cfg.Logger.Fatal( - "failed to create snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - - if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { - return strings.HasPrefix(fileName, "tmp") - }); err != nil { - cfg.Logger.Error( - "failed to remove temp file(s) in snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - return snap.New(cfg.Logger, cfg.SnapDir()) -} - -func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) { - beExist := fileutil.Exist(cfg.BackendPath()) - ci := cindex.NewConsistentIndex(nil) - beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci) - be := serverstorage.OpenBackend(cfg, beHooks) - defer func() { - if err != nil && be != nil { - be.Close() - } - }() - ci.SetBackend(be) - schema.CreateMetaBucket(be.BatchTx()) - if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { - err = maybeDefragBackend(cfg, be) - if err != nil { - return nil, err - } - } - cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex())) - - // TODO(serathius): Implement schema setup in fresh storage - var snapshot *raftpb.Snapshot - if haveWAL { - snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss) - if err != nil { - return nil, err - } - } - if beExist { - if err = schema.Validate(cfg.Logger, be.ReadTx()); err != nil { - cfg.Logger.Error("Failed to validate schema", zap.Error(err)) - return nil, err - } - } - - return &bootstrappedBackend{ - beHooks: beHooks, - be: be, - ci: ci, - beExist: beExist, - snapshot: snapshot, - }, nil -} - -func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { - size := be.Size() - sizeInUse := be.SizeInUse() - freeableMemory := uint(size - sizeInUse) - thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 - if freeableMemory < thresholdBytes { - cfg.Logger.Info("Skipping defragmentation", - zap.Int64("current-db-size-bytes", size), - zap.String("current-db-size", humanize.Bytes(uint64(size))), - zap.Int64("current-db-size-in-use-bytes", sizeInUse), - zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), - zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), - zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), - ) - return nil - } - return be.Defrag() -} - -func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrapedCluster, err error) { - switch { - case bwal == nil && !cfg.NewCluster: - c, err = bootstrapExistingClusterNoWAL(cfg, prt) - case bwal == nil && cfg.NewCluster: - c, err = bootstrapNewClusterNoWAL(cfg, prt) - case bwal != nil && bwal.haveWAL: - c, err = bootstrapClusterWithWAL(cfg, bwal.meta) - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - if err != nil { - return nil, err - } - return c, nil -} - -func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) { - if err := cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt, cfg.ReqTimeout()) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - scaleUpLearners := false - if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, existingCluster.Members(), scaleUpLearners); err != nil { - return nil, err - } - remotes := existingCluster.Members() - cl.SetID(types.ID(0), existingCluster.ID()) - member := cl.MemberByName(cfg.Name) - return &bootstrapedCluster{ - remotes: remotes, - cl: cl, - nodeID: member.ID, - }, nil -} - -func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) { - if err := cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - if cfg.DiscoveryURL != "" { - cfg.Logger.Warn("V2 discovery is deprecated!") - str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - } else { - cfg.Logger.Info("Bootstrapping cluster using v3 discovery.") - str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String()) - } - if err != nil { - return nil, &servererrors.DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if config.CheckDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)); err != nil { - return nil, err - } - } - return &bootstrapedCluster{ - remotes: nil, - cl: cl, - nodeID: m.ID, - }, nil -} - -func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrapedCluster, error) { - if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if cfg.ShouldDiscover() { - cfg.Logger.Warn( - "discovery token is ignored since cluster already initialized; valid logs are found", - zap.String("wal-dir", cfg.WALDir()), - ) - } - cl := membership.NewCluster(cfg.Logger, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) - - scaleUpLearners := false - if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, cl.Members(), scaleUpLearners); err != nil { - return nil, err - } - - cl.SetID(meta.nodeID, meta.clusterID) - return &bootstrapedCluster{ - cl: cl, - nodeID: meta.nodeID, - }, nil -} - -func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) { - // Find a snapshot to start/restart a raft node - walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) - if err != nil { - return nil, be, err - } - // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding - // bwal log entries - snapshot, err := ss.LoadNewestAvailable(walSnaps) - if err != nil && !errors.Is(err, snap.ErrNoSnapshot) { - return nil, be, err - } - - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) - } - - if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil { - cfg.Logger.Error("illegal v2store content", zap.Error(err)) - return nil, be, err - } - - cfg.Logger.Info( - "recovered v2 store from snapshot", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), - ) - - if be, err = serverstorage.RecoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil { - cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) - } - // A snapshot db may have already been recovered, and the old db should have - // already been closed in this case, so we should set the backend again. - ci.SetBackend(be) - - s1, s2 := be.Size(), be.SizeInUse() - cfg.Logger.Info( - "recovered v3 backend from snapshot", - zap.Int64("backend-size-bytes", s1), - zap.String("backend-size", humanize.Bytes(uint64(s1))), - zap.Int64("backend-size-in-use-bytes", s2), - zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), - ) - if beExist { - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - kvindex := ci.ConsistentIndex() - if kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index) - } - cfg.Logger.Warn( - "consistent index was never saved", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - ) - } - } - } else { - cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") - } - return snapshot, be, nil -} - -func (c *bootstrapedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error { - if !s.wal.haveWAL { - c.cl.SetID(c.nodeID, c.cl.ID()) - } - c.cl.SetStore(s.st) - c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be)) - if s.wal.haveWAL { - c.cl.Recover(api.UpdateCapability) - if c.databaseFileMissing(s) { - bepath := cfg.BackendPath() - os.RemoveAll(bepath) - return fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - } - scaleUpLearners := false - return membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, c.cl.Members(), scaleUpLearners) -} - -func (c *bootstrapedCluster) databaseFileMissing(s *bootstrappedStorage) bool { - v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3}) - return v3Cluster && !s.backend.beExist -} - -func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrapedCluster, bwal *bootstrappedWAL) *bootstrappedRaft { - switch { - case !bwal.haveWAL && !cfg.NewCluster: - return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal) - case !bwal.haveWAL && cfg.NewCluster: - return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal) - case bwal.haveWAL: - return bootstrapRaftFromWAL(cfg, bwal) - default: - cfg.Logger.Panic("unsupported bootstrap config") - return nil - } -} - -func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft { - member := cl.MemberByName(cfg.Name) - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - var ctx []byte - ctx, err := json.Marshal((*cl).Member(id)) - if err != nil { - cfg.Logger.Panic("failed to marshal member", zap.Error(err)) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - cfg.Logger.Info( - "starting local member", - zap.String("local-member-id", member.ID.String()), - zap.String("cluster-id", cl.ID().String()), - ) - s := bwal.MemoryStorage() - return &bootstrappedRaft{ - lg: cfg.Logger, - heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, - config: raftConfig(cfg, uint64(member.ID), s), - peers: peers, - storage: s, - } -} - -func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft { - s := bwal.MemoryStorage() - return &bootstrappedRaft{ - lg: cfg.Logger, - heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, - config: raftConfig(cfg, uint64(bwal.meta.nodeID), s), - storage: s, - } -} - -func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config { - return &raft.Config{ - ID: id, - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } -} - -func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode { - var n raft.Node - if len(b.peers) == 0 { - n = raft.RestartNode(b.config) - } else { - n = raft.StartNode(b.config, b.peers) - } - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - return newRaftNode( - raftNodeConfig{ - lg: b.lg, - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: b.heartbeat, - raftStorage: b.storage, - storage: serverstorage.NewStorage(b.lg, wal, ss), - }, - ) -} - -func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedWAL { - wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot) - bwal := &bootstrappedWAL{ - lg: cfg.Logger, - w: wal, - st: st, - ents: ents, - snapshot: snap, - meta: meta, - haveWAL: true, - } - - if cfg.ForceNewCluster { - // discard the previously uncommitted entries - bwal.ents = bwal.CommitedEntries() - entries := bwal.NewConfigChangeEntries() - // force commit config change entries - bwal.AppendAndCommitEntries(entries) - cfg.Logger.Info( - "forcing restart member", - zap.String("cluster-id", meta.clusterID.String()), - zap.String("local-member-id", meta.nodeID.String()), - zap.Uint64("commit-index", bwal.st.Commit), - ) - } else { - cfg.Logger.Info( - "restarting local member", - zap.String("cluster-id", meta.clusterID.String()), - zap.String("local-member-id", meta.nodeID.String()), - zap.Uint64("commit-index", bwal.st.Commit), - ) - } - return bwal -} - -// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear -// after the position of the given snap in the WAL. -// The snap must have been previously saved to the WAL, or this call will panic. -func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - repaired := false - for { - w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap) - if err != nil { - cfg.Logger.Fatal("failed to open WAL", zap.Error(err)) - } - if cfg.UnsafeNoFsync { - w.SetUnsafeNoFsync() - } - wmetadata, st, ents, err := w.ReadAll() - if err != nil { - w.Close() - // we can only repair ErrUnexpectedEOF and we never repair twice. - if repaired || !errors.Is(err, io.ErrUnexpectedEOF) { - cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err)) - } - if !wal.Repair(cfg.Logger, cfg.WALDir()) { - cfg.Logger.Fatal("failed to repair WAL", zap.Error(err)) - } else { - cfg.Logger.Info("repaired WAL", zap.Error(err)) - repaired = true - } - continue - } - var metadata etcdserverpb.Metadata - pbutil.MustUnmarshal(&metadata, wmetadata) - id := types.ID(metadata.NodeID) - cid := types.ID(metadata.ClusterID) - meta := &snapshotMetadata{clusterID: cid, nodeID: id} - return w, &st, ents, snapshot, meta - } -} - -type snapshotMetadata struct { - nodeID, clusterID types.ID -} - -func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrapedCluster) *bootstrappedWAL { - metadata := pbutil.MustMarshal( - &etcdserverpb.Metadata{ - NodeID: uint64(cl.nodeID), - ClusterID: uint64(cl.cl.ID()), - }, - ) - w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata) - if err != nil { - cfg.Logger.Panic("failed to create WAL", zap.Error(err)) - } - if cfg.UnsafeNoFsync { - w.SetUnsafeNoFsync() - } - return &bootstrappedWAL{ - lg: cfg.Logger, - w: w, - } -} - -type bootstrappedWAL struct { - lg *zap.Logger - - haveWAL bool - w *wal.WAL - st *raftpb.HardState - ents []raftpb.Entry - snapshot *raftpb.Snapshot - meta *snapshotMetadata -} - -func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage { - s := raft.NewMemoryStorage() - if wal.snapshot != nil { - s.ApplySnapshot(*wal.snapshot) - } - if wal.st != nil { - s.SetHardState(*wal.st) - } - if len(wal.ents) != 0 { - s.Append(wal.ents) - } - return s -} - -func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry { - for i, ent := range wal.ents { - if ent.Index > wal.st.Commit { - wal.lg.Info( - "discarding uncommitted WAL entries", - zap.Uint64("entry-index", ent.Index), - zap.Uint64("commit-index-from-wal", wal.st.Commit), - zap.Int("number-of-discarded-entries", len(wal.ents)-i), - ) - return wal.ents[:i] - } - } - return wal.ents -} - -func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry { - return serverstorage.CreateConfigChangeEnts( - wal.lg, - serverstorage.GetEffectiveNodeIDsFromWalEntries(wal.lg, wal.snapshot, wal.ents), - uint64(wal.meta.nodeID), - wal.st.Term, - wal.st.Commit, - ) -} - -func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) { - wal.ents = append(wal.ents, ents...) - err := wal.w.Save(raftpb.HardState{}, ents) - if err != nil { - wal.lg.Fatal("failed to save hard state and entries", zap.Error(err)) - } - if len(wal.ents) != 0 { - wal.st.Commit = wal.ents[len(wal.ents)-1].Index - } -} diff --git a/server/etcdserver/bootstrap_test.go b/server/etcdserver/bootstrap_test.go deleted file mode 100644 index 55a20684fe8..00000000000 --- a/server/etcdserver/bootstrap_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version implements etcd version parsing and contains latest version -// information. - -package etcdserver - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strings" - "testing" - - "go.uber.org/zap/zaptest" - - bolt "go.etcd.io/bbolt" - "go.etcd.io/etcd/server/v3/storage/datadir" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/raft/v3/raftpb" -) - -func TestBootstrapExistingClusterNoWALMaxLearner(t *testing.T) { - tests := []struct { - name string - members []etcdserverpb.Member - maxLearner int - hasError bool - expectedError error - }{ - { - name: "bootstrap success: maxLearner gt learner count", - members: []etcdserverpb.Member{ - {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}}, - {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}}, - {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}}, - }, - maxLearner: 1, - hasError: false, - expectedError: nil, - }, - { - name: "bootstrap success: maxLearner eq learner count", - members: []etcdserverpb.Member{ - {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}, IsLearner: true}, - {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}}, - {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}, IsLearner: true}, - }, - maxLearner: 2, - hasError: false, - expectedError: nil, - }, - { - name: "bootstrap fail: maxLearner lt learner count", - members: []etcdserverpb.Member{ - {ID: 4512484362714696085, PeerURLs: []string{"http://localhost:2380"}}, - {ID: 5321713336100798248, PeerURLs: []string{"http://localhost:2381"}, IsLearner: true}, - {ID: 5670219998796287055, PeerURLs: []string{"http://localhost:2382"}, IsLearner: true}, - }, - maxLearner: 1, - hasError: true, - expectedError: membership.ErrTooManyLearners, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cluster, err := types.NewURLsMap("node0=http://localhost:2380,node1=http://localhost:2381,node2=http://localhost:2382") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - cfg := config.ServerConfig{ - Name: "node0", - InitialPeerURLsMap: cluster, - Logger: zaptest.NewLogger(t), - ExperimentalMaxLearners: tt.maxLearner, - } - _, err = bootstrapExistingClusterNoWAL(cfg, mockBootstrapRoundTrip(tt.members)) - hasError := err != nil - if hasError != tt.hasError { - t.Errorf("expected error: %v got: %v", tt.hasError, err) - } - if hasError && !strings.Contains(err.Error(), tt.expectedError.Error()) { - t.Fatalf("expected error to contain: %q, got: %q", tt.expectedError.Error(), err.Error()) - } - }) - } -} - -type roundTripFunc func(r *http.Request) (*http.Response, error) - -func (s roundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return s(r) -} - -func mockBootstrapRoundTrip(members []etcdserverpb.Member) roundTripFunc { - return func(r *http.Request) (*http.Response, error) { - switch { - case strings.Contains(r.URL.String(), "/members"): - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader(mockMembersJSON(members))), - Header: http.Header{"X-Etcd-Cluster-Id": []string{"f4588138892a16b0"}}, - }, nil - case strings.Contains(r.URL.String(), "/version"): - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader(mockVersionJSON())), - }, nil - case strings.Contains(r.URL.String(), DowngradeEnabledPath): - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader(`true`)), - }, nil - } - return nil, nil - } -} - -func mockVersionJSON() string { - v := version.Versions{Server: "3.7.0", Cluster: "3.7.0"} - version, _ := json.Marshal(v) - return string(version) -} - -func mockMembersJSON(m []etcdserverpb.Member) string { - members, _ := json.Marshal(m) - return string(members) -} - -func TestBootstrapBackend(t *testing.T) { - tests := []struct { - name string - prepareData func(config.ServerConfig) error - expectedConsistentIdx uint64 - expectedError error - }{ - { - name: "bootstrap backend success: no data files", - prepareData: nil, - expectedConsistentIdx: 0, - expectedError: nil, - }, - { - name: "bootstrap backend success: have data files and snapshot db file", - prepareData: prepareData, - expectedConsistentIdx: 5, - expectedError: nil, - }, - // TODO(ahrtr): add more test cases - // https://github.com/etcd-io/etcd/issues/13507 - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dataDir, err := createDataDir(t) - if err != nil { - t.Fatalf("Failed to create the data dir, unexpected error: %v", err) - } - - cfg := config.ServerConfig{ - Name: "demoNode", - DataDir: dataDir, - BackendFreelistType: bolt.FreelistArrayType, - Logger: zaptest.NewLogger(t), - } - - if tt.prepareData != nil { - if err := tt.prepareData(cfg); err != nil { - t.Fatalf("failed to prepare data, unexpected error: %v", err) - } - } - - haveWAL := wal.Exist(cfg.WALDir()) - st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) - ss := snap.New(cfg.Logger, cfg.SnapDir()) - backend, err := bootstrapBackend(cfg, haveWAL, st, ss) - - hasError := err != nil - expectedHasError := tt.expectedError != nil - if hasError != expectedHasError { - t.Errorf("expected error: %v got: %v", expectedHasError, err) - } - if hasError && !strings.Contains(err.Error(), tt.expectedError.Error()) { - t.Fatalf("expected error to contain: %q, got: %q", tt.expectedError.Error(), err.Error()) - } - - if backend.ci.ConsistentIndex() != tt.expectedConsistentIdx { - t.Errorf("expected consistent index: %d, got: %d", tt.expectedConsistentIdx, backend.ci.ConsistentIndex()) - } - }) - } -} - -func createDataDir(t *testing.T) (dataDir string, err error) { - // create the temporary data dir - dataDir = t.TempDir() - - // create ${dataDir}/member/snap - if err = os.MkdirAll(datadir.ToSnapDir(dataDir), 0700); err != nil { - return - } - - // create ${dataDir}/member/wal - err = os.MkdirAll(datadir.ToWalDir(dataDir), 0700) - - return -} - -// prepare data for the test case -func prepareData(cfg config.ServerConfig) (err error) { - var snapshotTerm, snapshotIndex uint64 = 2, 5 - - if err = createWALFileWithSnapshotRecord(cfg, snapshotTerm, snapshotIndex); err != nil { - return - } - - return createSnapshotAndBackendDB(cfg, snapshotTerm, snapshotIndex) -} - -func createWALFileWithSnapshotRecord(cfg config.ServerConfig, snapshotTerm, snapshotIndex uint64) (err error) { - var w *wal.WAL - if w, err = wal.Create(cfg.Logger, cfg.WALDir(), []byte("somedata")); err != nil { - return - } - - defer func() { - err = w.Close() - }() - - walSnap := walpb.Snapshot{ - Index: snapshotIndex, - Term: snapshotTerm, - ConfState: &raftpb.ConfState{ - Voters: []uint64{0x00ffca74}, - AutoLeave: false, - }, - } - - if err = w.SaveSnapshot(walSnap); err != nil { - return - } - - return w.Save(raftpb.HardState{Term: snapshotTerm, Vote: 3, Commit: snapshotIndex}, nil) -} - -func createSnapshotAndBackendDB(cfg config.ServerConfig, snapshotTerm, snapshotIndex uint64) (err error) { - confState := raftpb.ConfState{ - Voters: []uint64{1, 2, 3}, - } - - // create snapshot file - ss := snap.New(cfg.Logger, cfg.SnapDir()) - if err = ss.SaveSnap(raftpb.Snapshot{ - Data: []byte("{}"), - Metadata: raftpb.SnapshotMetadata{ - ConfState: confState, - Index: snapshotIndex, - Term: snapshotTerm, - }, - }); err != nil { - return - } - - // create snapshot db file: "%016x.snap.db" - be := serverstorage.OpenBackend(cfg, nil) - schema.CreateMetaBucket(be.BatchTx()) - schema.UnsafeUpdateConsistentIndex(be.BatchTx(), snapshotIndex, snapshotTerm) - schema.MustUnsafeSaveConfStateToBackend(cfg.Logger, be.BatchTx(), &confState) - if err = be.Close(); err != nil { - return - } - sdb := filepath.Join(cfg.SnapDir(), fmt.Sprintf("%016x.snap.db", snapshotIndex)) - if err = os.Rename(cfg.BackendPath(), sdb); err != nil { - return - } - - // create backend db file - be = serverstorage.OpenBackend(cfg, nil) - schema.CreateMetaBucket(be.BatchTx()) - schema.UnsafeUpdateConsistentIndex(be.BatchTx(), 1, 1) - return be.Close() -} diff --git a/server/etcdserver/cindex/cindex.go b/server/etcdserver/cindex/cindex.go deleted file mode 100644 index 70646e19e8b..00000000000 --- a/server/etcdserver/cindex/cindex.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cindex - -import ( - "sync" - "sync/atomic" - - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -type Backend interface { - ReadTx() backend.ReadTx -} - -// ConsistentIndexer is an interface that wraps the Get/Set/Save method for consistentIndex. -type ConsistentIndexer interface { - - // ConsistentIndex returns the consistent index of current executing entry. - ConsistentIndex() uint64 - - // ConsistentApplyingIndex returns the consistent applying index of current executing entry. - ConsistentApplyingIndex() (uint64, uint64) - - // UnsafeConsistentIndex is similar to ConsistentIndex, but it doesn't lock the transaction. - UnsafeConsistentIndex() uint64 - - // SetConsistentIndex set the consistent index of current executing entry. - SetConsistentIndex(v uint64, term uint64) - - // SetConsistentApplyingIndex set the consistent applying index of current executing entry. - SetConsistentApplyingIndex(v uint64, term uint64) - - // UnsafeSave must be called holding the lock on the tx. - // It saves consistentIndex to the underlying stable storage. - UnsafeSave(tx backend.BatchTx) - - // SetBackend set the available backend.BatchTx for ConsistentIndexer. - SetBackend(be Backend) -} - -// consistentIndex implements the ConsistentIndexer interface. -type consistentIndex struct { - // consistentIndex represents the offset of an entry in a consistent replica log. - // It caches the "consistent_index" key's value. - // Accessed through atomics so must be 64-bit aligned. - consistentIndex uint64 - // term represents the RAFT term of committed entry in a consistent replica log. - // Accessed through atomics so must be 64-bit aligned. - // The value is being persisted in the backend since v3.5. - term uint64 - - // applyingIndex and applyingTerm are just temporary cache of the raftpb.Entry.Index - // and raftpb.Entry.Term, and they are not ready to be persisted yet. They will be - // saved to consistentIndex and term above in the txPostLockInsideApplyHook. - // - // TODO(ahrtr): try to remove the OnPreCommitUnsafe, and compare the - // performance difference. Afterwards we can make a decision on whether - // or not we should remove OnPreCommitUnsafe. If it is true, then we - // can remove applyingIndex and applyingTerm, and save the e.Index and - // e.Term to consistentIndex and term directly in applyEntries, and - // persist them into db in the txPostLockInsideApplyHook. - applyingIndex uint64 - applyingTerm uint64 - - // be is used for initial read consistentIndex - be Backend - // mutex is protecting be. - mutex sync.Mutex -} - -// NewConsistentIndex creates a new consistent index. -// If `be` is nil, it must be set (SetBackend) before first access using `ConsistentIndex()`. -func NewConsistentIndex(be Backend) ConsistentIndexer { - return &consistentIndex{be: be} -} - -func (ci *consistentIndex) ConsistentIndex() uint64 { - if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 { - return index - } - ci.mutex.Lock() - defer ci.mutex.Unlock() - - v, term := schema.ReadConsistentIndex(ci.be.ReadTx()) - ci.SetConsistentIndex(v, term) - return v -} - -func (ci *consistentIndex) UnsafeConsistentIndex() uint64 { - if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 { - return index - } - - v, term := schema.UnsafeReadConsistentIndex(ci.be.ReadTx()) - ci.SetConsistentIndex(v, term) - return v -} - -func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) { - atomic.StoreUint64(&ci.consistentIndex, v) - atomic.StoreUint64(&ci.term, term) -} - -func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) { - index := atomic.LoadUint64(&ci.consistentIndex) - term := atomic.LoadUint64(&ci.term) - schema.UnsafeUpdateConsistentIndex(tx, index, term) -} - -func (ci *consistentIndex) SetBackend(be Backend) { - ci.mutex.Lock() - defer ci.mutex.Unlock() - ci.be = be - // After the backend is changed, the first access should re-read it. - ci.SetConsistentIndex(0, 0) -} - -func (ci *consistentIndex) ConsistentApplyingIndex() (uint64, uint64) { - return atomic.LoadUint64(&ci.applyingIndex), atomic.LoadUint64(&ci.applyingTerm) -} - -func (ci *consistentIndex) SetConsistentApplyingIndex(v uint64, term uint64) { - atomic.StoreUint64(&ci.applyingIndex, v) - atomic.StoreUint64(&ci.applyingTerm, term) -} - -func NewFakeConsistentIndex(index uint64) ConsistentIndexer { - return &fakeConsistentIndex{index: index} -} - -type fakeConsistentIndex struct { - index uint64 - term uint64 -} - -func (f *fakeConsistentIndex) ConsistentIndex() uint64 { - return atomic.LoadUint64(&f.index) -} -func (f *fakeConsistentIndex) ConsistentApplyingIndex() (uint64, uint64) { - return atomic.LoadUint64(&f.index), atomic.LoadUint64(&f.term) -} -func (f *fakeConsistentIndex) UnsafeConsistentIndex() uint64 { - return atomic.LoadUint64(&f.index) -} - -func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) { - atomic.StoreUint64(&f.index, index) - atomic.StoreUint64(&f.term, term) -} -func (f *fakeConsistentIndex) SetConsistentApplyingIndex(index uint64, term uint64) { - atomic.StoreUint64(&f.index, index) - atomic.StoreUint64(&f.term, term) -} - -func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {} -func (f *fakeConsistentIndex) SetBackend(_ Backend) {} - -func UpdateConsistentIndexForce(tx backend.BatchTx, index uint64, term uint64) { - tx.LockOutsideApply() - defer tx.Unlock() - schema.UnsafeUpdateConsistentIndexForce(tx, index, term) -} diff --git a/server/etcdserver/cindex/cindex_test.go b/server/etcdserver/cindex/cindex_test.go deleted file mode 100644 index a056ac3d759..00000000000 --- a/server/etcdserver/cindex/cindex_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cindex - -import ( - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -// TestConsistentIndex ensures that LoadConsistentIndex/Save/ConsistentIndex and backend.BatchTx can work well together. -func TestConsistentIndex(t *testing.T) { - - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - ci := NewConsistentIndex(be) - - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - - schema.UnsafeCreateMetaBucket(tx) - tx.Unlock() - be.ForceCommit() - r := uint64(7890123) - term := uint64(234) - ci.SetConsistentIndex(r, term) - index := ci.ConsistentIndex() - if index != r { - t.Errorf("expected %d,got %d", r, index) - } - tx.Lock() - ci.UnsafeSave(tx) - tx.Unlock() - be.ForceCommit() - be.Close() - - b := backend.NewDefaultBackend(zaptest.NewLogger(t), tmpPath) - defer b.Close() - ci.SetBackend(b) - index = ci.ConsistentIndex() - assert.Equal(t, r, index) - - ci = NewConsistentIndex(b) - index = ci.ConsistentIndex() - assert.Equal(t, r, index) -} - -func TestConsistentIndexDecrease(t *testing.T) { - testutil.BeforeTest(t) - initIndex := uint64(100) - initTerm := uint64(10) - - tcs := []struct { - name string - index uint64 - term uint64 - panicExpected bool - }{ - { - name: "Decrease term", - index: initIndex + 1, - term: initTerm - 1, - panicExpected: false, // TODO: Change in v3.7 - }, - { - name: "Decrease CI", - index: initIndex - 1, - term: initTerm + 1, - panicExpected: true, - }, - { - name: "Decrease CI and term", - index: initIndex - 1, - term: initTerm - 1, - panicExpected: true, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - tx := be.BatchTx() - tx.Lock() - schema.UnsafeCreateMetaBucket(tx) - schema.UnsafeUpdateConsistentIndex(tx, initIndex, initTerm) - tx.Unlock() - be.ForceCommit() - be.Close() - - be = backend.NewDefaultBackend(zaptest.NewLogger(t), tmpPath) - defer be.Close() - ci := NewConsistentIndex(be) - ci.SetConsistentIndex(tc.index, tc.term) - tx = be.BatchTx() - func() { - tx.Lock() - defer tx.Unlock() - if tc.panicExpected { - assert.Panics(t, func() { ci.UnsafeSave(tx) }, "Should refuse to decrease cindex") - return - } - ci.UnsafeSave(tx) - }() - if !tc.panicExpected { - assert.Equal(t, tc.index, ci.ConsistentIndex()) - - ci = NewConsistentIndex(be) - assert.Equal(t, tc.index, ci.ConsistentIndex()) - } - }) - } -} - -func TestFakeConsistentIndex(t *testing.T) { - - r := rand.Uint64() - ci := NewFakeConsistentIndex(r) - index := ci.ConsistentIndex() - if index != r { - t.Errorf("expected %d,got %d", r, index) - } - r = rand.Uint64() - ci.SetConsistentIndex(r, 5) - index = ci.ConsistentIndex() - if index != r { - t.Errorf("expected %d,got %d", r, index) - } - -} diff --git a/server/etcdserver/cindex/doc.go b/server/etcdserver/cindex/doc.go deleted file mode 100644 index 7d3e4b774e5..00000000000 --- a/server/etcdserver/cindex/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cindex provides an interface and implementation for getting/saving consistentIndex. -package cindex diff --git a/server/etcdserver/cluster_util.go b/server/etcdserver/cluster_util.go deleted file mode 100644 index dd797baf0e0..00000000000 --- a/server/etcdserver/cluster_util.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "sort" - "strconv" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -// isMemberBootstrapped tries to check if the given member has been bootstrapped -// in the given cluster. -func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) - if err != nil { - return false - } - id := cl.MemberByName(member).ID - m := rcl.Member(id) - if m == nil { - return false - } - if len(m.ClientURLs) > 0 { - return true - } - return false -} - -// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and -// attempts to construct a Cluster by accessing the members endpoint on one of -// these URLs. The first URL to provide a response is used. If no URLs provide -// a response, or a Cluster cannot be successfully created from a received -// response, an error is returned. -// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, -// 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt) -} - -// If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { - if lg == nil { - lg = zap.NewNop() - } - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - for _, u := range urls { - addr := u + "/members" - resp, err := cc.Get(addr) - if err != nil { - if logerr { - lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err)) - } - continue - } - b, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if logerr { - lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err)) - } - continue - } - var membs []*membership.Member - if err = json.Unmarshal(b, &membs); err != nil { - if logerr { - lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err)) - } - continue - } - id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) - if err != nil { - if logerr { - lg.Warn( - "failed to parse cluster ID", - zap.String("address", addr), - zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")), - zap.Error(err), - ) - } - continue - } - - // check the length of membership members - // if the membership members are present then prepare and return raft cluster - // if membership members are not present then the raft cluster formed will be - // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error - if len(membs) > 0 { - return membership.NewClusterFromMembers(lg, id, membs), nil - } - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs") - } - return nil, fmt.Errorf("could not retrieve cluster information from the given URLs") -} - -// getRemotePeerURLs returns peer urls of remote members in the cluster. The -// returned list is sorted in ascending lexicographical order. -func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { - us := make([]string, 0) - for _, m := range cl.Members() { - if m.Name == local { - continue - } - us = append(us, m.PeerURLs...) - } - sort.Strings(us) - return us -} - -// getMembersVersions returns the versions of the members in the given cluster. -// The key of the returned map is the member's ID. The value of the returned map -// is the semver versions string, including server and cluster. -// If it fails to get the version of a member, the key will be nil. -func getMembersVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) map[string]*version.Versions { - members := cl.Members() - vers := make(map[string]*version.Versions) - for _, m := range members { - if m.ID == local { - cv := "not_decided" - if cl.Version() != nil { - cv = cl.Version().String() - } - vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} - continue - } - ver, err := getVersion(lg, m, rt, timeout) - if err != nil { - lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) - vers[m.ID.String()] = nil - } else { - vers[m.ID.String()] = ver - } - } - return vers -} - -// allowedVersionRange decides the available version range of the cluster that local server can join in; -// if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher] -// if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion] -func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *semver.Version) { - minV = semver.Must(semver.NewVersion(version.MinClusterVersion)) - maxV = semver.Must(semver.NewVersion(version.Version)) - maxV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor} - - if downgradeEnabled { - // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x) - maxV.Minor = maxV.Minor + 1 - minV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor} - } - return minV, maxV -} - -// isCompatibleWithCluster return true if the local member has a compatible version with -// the current running cluster. -// The version is considered as compatible when at least one of the other members in the cluster has a -// cluster version in the range of [MinV, MaxV] and no known members has a cluster version -// out of the range. -// We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool { - vers := getMembersVersions(lg, cl, local, rt, timeout) - minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt, timeout)) - return isCompatibleWithVers(lg, vers, local, minV, maxV) -} - -func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { - var ok bool - for id, v := range vers { - // ignore comparison with local version - if id == local.String() { - continue - } - if v == nil { - continue - } - clusterv, err := semver.NewVersion(v.Cluster) - if err != nil { - lg.Warn( - "failed to parse cluster version of remote member", - zap.String("remote-member-id", id), - zap.String("remote-member-cluster-version", v.Cluster), - zap.Error(err), - ) - continue - } - if clusterv.LessThan(*minV) { - lg.Warn( - "cluster version of remote member is not compatible; too low", - zap.String("remote-member-id", id), - zap.String("remote-member-cluster-version", clusterv.String()), - zap.String("minimum-cluster-version-supported", minV.String()), - ) - return false - } - if maxV.LessThan(*clusterv) { - lg.Warn( - "cluster version of remote member is not compatible; too high", - zap.String("remote-member-id", id), - zap.String("remote-member-cluster-version", clusterv.String()), - zap.String("maximum-cluster-version-supported", maxV.String()), - ) - return false - } - ok = true - } - return ok -} - -// getVersion returns the Versions of the given member via its -// peerURLs. Returns the last error if it fails to get the version. -func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (*version.Versions, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - addr := u + "/version" - resp, err = cc.Get(addr) - if err != nil { - lg.Warn( - "failed to reach the peer URL", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - var b []byte - b, err = io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - lg.Warn( - "failed to read body of response", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - var vers version.Versions - if err = json.Unmarshal(b, &vers); err != nil { - lg.Warn( - "failed to unmarshal response", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - return &vers, nil - } - return nil, err -} - -func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) { - cc := &http.Client{Transport: peerRt} - // TODO: refactor member http handler code - // cannot import etcdhttp, so manually construct url - requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id) - req, err := http.NewRequest("POST", requestUrl, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - resp, err := cc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusRequestTimeout { - return nil, errors.ErrTimeout - } - if resp.StatusCode == http.StatusPreconditionFailed { - // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code - if strings.Contains(string(b), errors.ErrLearnerNotReady.Error()) { - return nil, errors.ErrLearnerNotReady - } - if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { - return nil, membership.ErrMemberNotLearner - } - return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) - } - if resp.StatusCode == http.StatusNotFound { - return nil, membership.ErrIDNotFound - } - - if resp.StatusCode != http.StatusOK { // all other types of errors - return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) - } - - var membs []*membership.Member - if err := json.Unmarshal(b, &membs); err != nil { - return nil, err - } - return membs, nil -} - -// getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster. -func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool { - members := cl.Members() - - for _, m := range members { - if m.ID == local { - continue - } - enable, err := getDowngradeEnabled(lg, m, rt, timeout) - if err != nil { - lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) - } else { - // Since the "/downgrade/enabled" serves linearized data, - // this function can return once it gets a non-error response from the endpoint. - return enable - } - } - return false -} - -// getDowngradeEnabled returns the downgrade enabled status of the given member -// via its peerURLs. Returns the last error if it fails to get it. -func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (bool, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - addr := u + DowngradeEnabledPath - resp, err = cc.Get(addr) - if err != nil { - lg.Warn( - "failed to reach the peer URL", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - var b []byte - b, err = io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - lg.Warn( - "failed to read body of response", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - var enable bool - if enable, err = strconv.ParseBool(string(b)); err != nil { - lg.Warn( - "failed to convert response", - zap.String("address", addr), - zap.String("remote-member-id", m.ID.String()), - zap.Error(err), - ) - continue - } - return enable, nil - } - return false, err -} - -func convertToClusterVersion(v string) (*semver.Version, error) { - ver, err := semver.NewVersion(v) - if err != nil { - // allow input version format Major.Minor - ver, err = semver.NewVersion(v + ".0") - if err != nil { - return nil, errors.ErrWrongDowngradeVersionFormat - } - } - // cluster version only keeps major.minor, remove patch version - ver = &semver.Version{Major: ver.Major, Minor: ver.Minor} - return ver, nil -} diff --git a/server/etcdserver/cluster_util_test.go b/server/etcdserver/cluster_util_test.go deleted file mode 100644 index d3f65364465..00000000000 --- a/server/etcdserver/cluster_util_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "testing" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/types" -) - -func TestIsCompatibleWithVers(t *testing.T) { - tests := []struct { - vers map[string]*version.Versions - local types.ID - minV, maxV *semver.Version - wok bool - }{ - // too low - { - map[string]*version.Versions{ - "a": {Server: "2.0.0", Cluster: "not_decided"}, - "b": {Server: "2.1.0", Cluster: "2.1.0"}, - "c": {Server: "2.1.0", Cluster: "2.1.0"}, - }, - 0xa, - semver.Must(semver.NewVersion("2.0.0")), semver.Must(semver.NewVersion("2.0.0")), - false, - }, - { - map[string]*version.Versions{ - "a": {Server: "2.1.0", Cluster: "not_decided"}, - "b": {Server: "2.1.0", Cluster: "2.1.0"}, - "c": {Server: "2.1.0", Cluster: "2.1.0"}, - }, - 0xa, - semver.Must(semver.NewVersion("2.0.0")), semver.Must(semver.NewVersion("2.1.0")), - true, - }, - // too high - { - map[string]*version.Versions{ - "a": {Server: "2.2.0", Cluster: "not_decided"}, - "b": {Server: "2.0.0", Cluster: "2.0.0"}, - "c": {Server: "2.0.0", Cluster: "2.0.0"}, - }, - 0xa, - semver.Must(semver.NewVersion("2.1.0")), semver.Must(semver.NewVersion("2.2.0")), - false, - }, - // cannot get b's version, expect ok - { - map[string]*version.Versions{ - "a": {Server: "2.1.0", Cluster: "not_decided"}, - "b": nil, - "c": {Server: "2.1.0", Cluster: "2.1.0"}, - }, - 0xa, - semver.Must(semver.NewVersion("2.0.0")), semver.Must(semver.NewVersion("2.1.0")), - true, - }, - // cannot get b and c's version, expect not ok - { - map[string]*version.Versions{ - "a": {Server: "2.1.0", Cluster: "not_decided"}, - "b": nil, - "c": nil, - }, - 0xa, - semver.Must(semver.NewVersion("2.0.0")), semver.Must(semver.NewVersion("2.1.0")), - false, - }, - } - - for i, tt := range tests { - ok := isCompatibleWithVers(zaptest.NewLogger(t), tt.vers, tt.local, tt.minV, tt.maxV) - if ok != tt.wok { - t.Errorf("#%d: ok = %+v, want %+v", i, ok, tt.wok) - } - } -} - -func TestConvertToClusterVersion(t *testing.T) { - tests := []struct { - name string - inputVerStr string - expectedVer string - hasError bool - }{ - { - "Succeeded: Major.Minor.Patch", - "3.4.2", - "3.4.0", - false, - }, - { - "Succeeded: Major.Minor", - "3.4", - "3.4.0", - false, - }, - { - "Failed: wrong version format", - "3*.9", - "", - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ver, err := convertToClusterVersion(tt.inputVerStr) - hasError := err != nil - if hasError != tt.hasError { - t.Errorf("Expected error status is %v; Got %v", tt.hasError, err) - } - if tt.hasError { - return - } - if ver == nil || tt.expectedVer != ver.String() { - t.Errorf("Expected output cluster version is %v; Got %v", tt.expectedVer, ver) - } - }) - } -} - -func TestDecideAllowedVersionRange(t *testing.T) { - minClusterV := semver.Must(semver.NewVersion(version.MinClusterVersion)) - localV := semver.Must(semver.NewVersion(version.Version)) - localV = &semver.Version{Major: localV.Major, Minor: localV.Minor} - - tests := []struct { - name string - downgradeEnabled bool - expectedMinV *semver.Version - expectedMaxV *semver.Version - }{ - { - "When cluster enables downgrade", - true, - &semver.Version{Major: localV.Major, Minor: localV.Minor + 1}, - &semver.Version{Major: localV.Major, Minor: localV.Minor + 1}, - }, - { - "When cluster disables downgrade", - false, - minClusterV, - localV, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - minV, maxV := allowedVersionRange(tt.downgradeEnabled) - if !minV.Equal(*tt.expectedMinV) { - t.Errorf("Expected minV is %v; Got %v", tt.expectedMinV.String(), minV.String()) - } - - if !maxV.Equal(*tt.expectedMaxV) { - t.Errorf("Expected maxV is %v; Got %v", tt.expectedMaxV.String(), maxV.String()) - } - }) - } -} diff --git a/server/etcdserver/corrupt.go b/server/etcdserver/corrupt.go deleted file mode 100644 index 20e19fbb706..00000000000 --- a/server/etcdserver/corrupt.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "sort" - "strings" - "sync" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "go.uber.org/zap" -) - -type CorruptionChecker interface { - InitialCheck() error - PeriodicCheck() error - CompactHashCheck() -} - -type corruptionChecker struct { - lg *zap.Logger - - hasher Hasher - - mux sync.RWMutex - latestRevisionChecked int64 -} - -type Hasher interface { - mvcc.HashStorage - ReqTimeout() time.Duration - MemberId() types.ID - PeerHashByRev(int64) []*peerHashKVResp - LinearizableReadNotify(context.Context) error - TriggerCorruptAlarm(types.ID) -} - -func newCorruptionChecker(lg *zap.Logger, s *EtcdServer, storage mvcc.HashStorage) *corruptionChecker { - return &corruptionChecker{ - lg: lg, - hasher: hasherAdapter{s, storage}, - } -} - -type hasherAdapter struct { - *EtcdServer - mvcc.HashStorage -} - -func (h hasherAdapter) ReqTimeout() time.Duration { - return h.EtcdServer.Cfg.ReqTimeout() -} - -func (h hasherAdapter) PeerHashByRev(rev int64) []*peerHashKVResp { - return h.EtcdServer.getPeerHashKVs(rev) -} - -func (h hasherAdapter) TriggerCorruptAlarm(memberID types.ID) { - h.EtcdServer.triggerCorruptAlarm(memberID) -} - -// InitialCheck compares initial hash values with its peers -// before serving any peer/client traffic. Only mismatch when hashes -// are different at requested revision, with same compact revision. -func (cm *corruptionChecker) InitialCheck() error { - - cm.lg.Info( - "starting initial corruption check", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Duration("timeout", cm.hasher.ReqTimeout()), - ) - - h, _, err := cm.hasher.HashByRev(0) - if err != nil { - return fmt.Errorf("%s failed to fetch hash (%v)", cm.hasher.MemberId(), err) - } - peers := cm.hasher.PeerHashByRev(h.Revision) - mismatch := 0 - for _, p := range peers { - if p.resp != nil { - peerID := types.ID(p.resp.Header.MemberId) - fields := []zap.Field{ - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", h.Revision), - zap.Int64("local-member-compact-revision", h.CompactRevision), - zap.Uint32("local-member-hash", h.Hash), - zap.String("remote-peer-id", peerID.String()), - zap.Strings("remote-peer-endpoints", p.eps), - zap.Int64("remote-peer-revision", p.resp.Header.Revision), - zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision), - zap.Uint32("remote-peer-hash", p.resp.Hash), - } - - if h.Hash != p.resp.Hash { - if h.CompactRevision == p.resp.CompactRevision { - cm.lg.Warn("found different hash values from remote peer", fields...) - mismatch++ - } else { - cm.lg.Warn("found different compact revision values from remote peer", fields...) - } - } - - continue - } - - if p.err != nil { - switch p.err { - case rpctypes.ErrFutureRev: - cm.lg.Warn( - "cannot fetch hash from slow remote peer", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", h.Revision), - zap.Int64("local-member-compact-revision", h.CompactRevision), - zap.Uint32("local-member-hash", h.Hash), - zap.String("remote-peer-id", p.id.String()), - zap.Strings("remote-peer-endpoints", p.eps), - zap.Error(err), - ) - case rpctypes.ErrCompacted: - cm.lg.Warn( - "cannot fetch hash from remote peer; local member is behind", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Int64("local-member-revision", h.Revision), - zap.Int64("local-member-compact-revision", h.CompactRevision), - zap.Uint32("local-member-hash", h.Hash), - zap.String("remote-peer-id", p.id.String()), - zap.Strings("remote-peer-endpoints", p.eps), - zap.Error(err), - ) - } - } - } - if mismatch > 0 { - return fmt.Errorf("%s found data inconsistency with peers", cm.hasher.MemberId()) - } - - cm.lg.Info( - "initial corruption checking passed; no corruption", - zap.String("local-member-id", cm.hasher.MemberId().String()), - ) - return nil -} - -func (cm *corruptionChecker) PeriodicCheck() error { - h, _, err := cm.hasher.HashByRev(0) - if err != nil { - return err - } - peers := cm.hasher.PeerHashByRev(h.Revision) - - ctx, cancel := context.WithTimeout(context.Background(), cm.hasher.ReqTimeout()) - err = cm.hasher.LinearizableReadNotify(ctx) - cancel() - if err != nil { - return err - } - - h2, rev2, err := cm.hasher.HashByRev(0) - if err != nil { - return err - } - - alarmed := false - mismatch := func(id types.ID) { - if alarmed { - return - } - alarmed = true - cm.hasher.TriggerCorruptAlarm(id) - } - - if h2.Hash != h.Hash && h2.Revision == h.Revision && h.CompactRevision == h2.CompactRevision { - cm.lg.Warn( - "found hash mismatch", - zap.Int64("revision-1", h.Revision), - zap.Int64("compact-revision-1", h.CompactRevision), - zap.Uint32("hash-1", h.Hash), - zap.Int64("revision-2", h2.Revision), - zap.Int64("compact-revision-2", h2.CompactRevision), - zap.Uint32("hash-2", h2.Hash), - ) - mismatch(cm.hasher.MemberId()) - } - - checkedCount := 0 - for _, p := range peers { - if p.resp == nil { - continue - } - checkedCount++ - - // leader expects follower's latest revision less than or equal to leader's - if p.resp.Header.Revision > rev2 { - cm.lg.Warn( - "revision from follower must be less than or equal to leader's", - zap.Int64("leader-revision", rev2), - zap.Int64("follower-revision", p.resp.Header.Revision), - zap.String("follower-peer-id", p.id.String()), - ) - mismatch(p.id) - } - - // leader expects follower's latest compact revision less than or equal to leader's - if p.resp.CompactRevision > h2.CompactRevision { - cm.lg.Warn( - "compact revision from follower must be less than or equal to leader's", - zap.Int64("leader-compact-revision", h2.CompactRevision), - zap.Int64("follower-compact-revision", p.resp.CompactRevision), - zap.String("follower-peer-id", p.id.String()), - ) - mismatch(p.id) - } - - // follower's compact revision is leader's old one, then hashes must match - if p.resp.CompactRevision == h.CompactRevision && p.resp.Hash != h.Hash { - cm.lg.Warn( - "same compact revision then hashes must match", - zap.Int64("leader-compact-revision", h2.CompactRevision), - zap.Uint32("leader-hash", h.Hash), - zap.Int64("follower-compact-revision", p.resp.CompactRevision), - zap.Uint32("follower-hash", p.resp.Hash), - zap.String("follower-peer-id", p.id.String()), - ) - mismatch(p.id) - } - } - cm.lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount)) - return nil -} - -// CompactHashCheck is based on the fact that 'compactions' are coordinated -// between raft members and performed at the same revision. For each compacted -// revision there is KV store hash computed and saved for some time. -// -// This method communicates with peers to find a recent common revision across -// members, and raises alarm if 2 or more members at the same compact revision -// have different hashes. -// -// We might miss opportunity to perform the check if the compaction is still -// ongoing on one of the members or it was unresponsive. In such situation the -// method still passes without raising alarm. -func (cm *corruptionChecker) CompactHashCheck() { - cm.lg.Info("starting compact hash check", - zap.String("local-member-id", cm.hasher.MemberId().String()), - zap.Duration("timeout", cm.hasher.ReqTimeout()), - ) - hashes := cm.uncheckedRevisions() - // Assume that revisions are ordered from largest to smallest - for _, hash := range hashes { - peers := cm.hasher.PeerHashByRev(hash.Revision) - if len(peers) == 0 { - continue - } - if cm.checkPeerHashes(hash, peers) { - return - } - } - cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", len(hashes))) - return -} - -// check peers hash and raise alarms if detected corruption. -// return a bool indicate whether to check next hash. -// -// true: successfully checked hash on whole cluster or raised alarms, so no need to check next hash -// false: skipped some members, so need to check next hash -func (cm *corruptionChecker) checkPeerHashes(leaderHash mvcc.KeyValueHash, peers []*peerHashKVResp) bool { - leaderId := cm.hasher.MemberId() - hash2members := map[uint32]types.IDSlice{leaderHash.Hash: {leaderId}} - - peersChecked := 0 - // group all peers by hash - for _, peer := range peers { - skipped := false - reason := "" - - if peer.resp == nil { - skipped = true - reason = "no response" - } else if peer.resp.CompactRevision != leaderHash.CompactRevision { - skipped = true - reason = fmt.Sprintf("the peer's CompactRevision %d doesn't match leader's CompactRevision %d", - peer.resp.CompactRevision, leaderHash.CompactRevision) - } - if skipped { - cm.lg.Warn("Skipped peer's hash", zap.Int("number-of-peers", len(peers)), - zap.String("leader-id", leaderId.String()), - zap.String("peer-id", peer.id.String()), - zap.String("reason", reason)) - continue - } - - peersChecked++ - if ids, ok := hash2members[peer.resp.Hash]; !ok { - hash2members[peer.resp.Hash] = []types.ID{peer.id} - } else { - ids = append(ids, peer.id) - hash2members[peer.resp.Hash] = ids - } - } - - // All members have the same CompactRevision and Hash. - if len(hash2members) == 1 { - return cm.handleConsistentHash(leaderHash, peersChecked, len(peers)) - } - - // Detected hashes mismatch - // The first step is to figure out the majority with the same hash. - memberCnt := len(peers) + 1 - quorum := memberCnt/2 + 1 - quorumExist := false - for k, v := range hash2members { - if len(v) >= quorum { - quorumExist = true - // remove the majority, and we might raise alarms for the left members. - delete(hash2members, k) - break - } - } - - if !quorumExist { - // If quorum doesn't exist, we don't know which members data are - // corrupted. In such situation, we intentionally set the memberID - // as 0, it means it affects the whole cluster. - cm.lg.Error("Detected compaction hash mismatch but cannot identify the corrupted members, so intentionally set the memberID as 0", - zap.String("leader-id", leaderId.String()), - zap.Int64("leader-revision", leaderHash.Revision), - zap.Int64("leader-compact-revision", leaderHash.CompactRevision), - zap.Uint32("leader-hash", leaderHash.Hash), - ) - cm.hasher.TriggerCorruptAlarm(0) - } - - // Raise alarm for the left members if the quorum is present. - // But we should always generate error log for debugging. - for k, v := range hash2members { - if quorumExist { - for _, pid := range v { - cm.hasher.TriggerCorruptAlarm(pid) - } - } - - cm.lg.Error("Detected compaction hash mismatch", - zap.String("leader-id", leaderId.String()), - zap.Int64("leader-revision", leaderHash.Revision), - zap.Int64("leader-compact-revision", leaderHash.CompactRevision), - zap.Uint32("leader-hash", leaderHash.Hash), - zap.Uint32("peer-hash", k), - zap.String("peer-ids", v.String()), - zap.Bool("quorum-exist", quorumExist), - ) - } - - return true -} - -func (cm *corruptionChecker) handleConsistentHash(hash mvcc.KeyValueHash, peersChecked, peerCnt int) bool { - if peersChecked == peerCnt { - cm.lg.Info("successfully checked hash on whole cluster", - zap.Int("number-of-peers-checked", peersChecked), - zap.Int64("revision", hash.Revision), - zap.Int64("compactRevision", hash.CompactRevision), - ) - cm.mux.Lock() - if hash.Revision > cm.latestRevisionChecked { - cm.latestRevisionChecked = hash.Revision - } - cm.mux.Unlock() - return true - } - cm.lg.Warn("skipped revision in compaction hash check; was not able to check all peers", - zap.Int("number-of-peers-checked", peersChecked), - zap.Int("number-of-peers", peerCnt), - zap.Int64("revision", hash.Revision), - zap.Int64("compactRevision", hash.CompactRevision), - ) - // The only case which needs to check next hash - return false -} - -func (cm *corruptionChecker) uncheckedRevisions() []mvcc.KeyValueHash { - cm.mux.RLock() - lastRevisionChecked := cm.latestRevisionChecked - cm.mux.RUnlock() - - hashes := cm.hasher.Hashes() - // Sort in descending order - sort.Slice(hashes, func(i, j int) bool { - return hashes[i].Revision > hashes[j].Revision - }) - for i, hash := range hashes { - if hash.Revision <= lastRevisionChecked { - return hashes[:i] - } - } - return hashes -} - -func (s *EtcdServer) triggerCorruptAlarm(id types.ID) { - a := &pb.AlarmRequest{ - MemberID: uint64(id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_CORRUPT, - } - s.GoAttach(func() { - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) - }) -} - -type peerInfo struct { - id types.ID - eps []string -} - -type peerHashKVResp struct { - peerInfo - resp *pb.HashKVResponse - err error -} - -func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { - // TODO: handle the case when "s.cluster.Members" have not - // been populated (e.g. no snapshot to load from disk) - members := s.cluster.Members() - peers := make([]peerInfo, 0, len(members)) - for _, m := range members { - if m.ID == s.MemberId() { - continue - } - peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs}) - } - - lg := s.Logger() - - cc := &http.Client{Transport: s.peerRt} - var resps []*peerHashKVResp - for _, p := range peers { - if len(p.eps) == 0 { - continue - } - - respsLen := len(resps) - var lastErr error - for _, ep := range p.eps { - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - resp, lastErr := HashByRev(ctx, cc, ep, rev) - cancel() - if lastErr == nil { - resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil}) - break - } - lg.Warn( - "failed hash kv request", - zap.String("local-member-id", s.MemberId().String()), - zap.Int64("requested-revision", rev), - zap.String("remote-peer-endpoint", ep), - zap.Error(lastErr), - ) - } - - // failed to get hashKV from all endpoints of this peer - if respsLen == len(resps) { - resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr}) - } - } - return resps -} - -const PeerHashKVPath = "/members/hashkv" - -type hashKVHandler struct { - lg *zap.Logger - server *EtcdServer -} - -func (s *EtcdServer) HashKVHandler() http.Handler { - return &hashKVHandler{lg: s.Logger(), server: s} -} - -func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - w.Header().Set("Allow", http.MethodGet) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - if r.URL.Path != PeerHashKVPath { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - - defer r.Body.Close() - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, "error reading body", http.StatusBadRequest) - return - } - - req := &pb.HashKVRequest{} - if err := json.Unmarshal(b, req); err != nil { - h.lg.Warn("failed to unmarshal request", zap.Error(err)) - http.Error(w, "error unmarshalling request", http.StatusBadRequest) - return - } - hash, rev, err := h.server.KV().HashStorage().HashByRev(req.Revision) - if err != nil { - h.lg.Warn( - "failed to get hashKV", - zap.Int64("requested-revision", req.Revision), - zap.Error(err), - ) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - resp := &pb.HashKVResponse{ - Header: &pb.ResponseHeader{Revision: rev}, - Hash: hash.Hash, - CompactRevision: hash.CompactRevision, - HashRevision: hash.Revision, - } - respBytes, err := json.Marshal(resp) - if err != nil { - h.lg.Warn("failed to marshal hashKV response", zap.Error(err)) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String()) - w.Header().Set("Content-Type", "application/json") - w.Write(respBytes) -} - -// HashByRev fetch hash of kv store at the given rev via http call to the given url -func HashByRev(ctx context.Context, cc *http.Client, url string, rev int64) (*pb.HashKVResponse, error) { - hashReq := &pb.HashKVRequest{Revision: rev} - hashReqBytes, err := json.Marshal(hashReq) - if err != nil { - return nil, err - } - requestUrl := url + PeerHashKVPath - req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes)) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/json") - req.Cancel = ctx.Done() - - resp, err := cc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusBadRequest { - if strings.Contains(string(b), mvcc.ErrCompacted.Error()) { - return nil, rpctypes.ErrCompacted - } - if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) { - return nil, rpctypes.ErrFutureRev - } - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unknown error: %s", string(b)) - } - - hashResp := &pb.HashKVResponse{} - if err := json.Unmarshal(b, hashResp); err != nil { - return nil, err - } - return hashResp, nil -} diff --git a/server/etcdserver/corrupt_test.go b/server/etcdserver/corrupt_test.go deleted file mode 100644 index 3fff8a533f3..00000000000 --- a/server/etcdserver/corrupt_test.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -func TestInitialCheck(t *testing.T) { - tcs := []struct { - name string - hasher fakeHasher - expectError bool - expectCorrupt bool - expectActions []string - }{ - { - name: "No peers", - hasher: fakeHasher{ - hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 10}}}, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(10)", "MemberId()"}, - }, - { - name: "Error getting hash", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{err: fmt.Errorf("error getting hash")}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "MemberId()"}, - expectError: true, - }, - { - name: "Peer with empty response", - hasher: fakeHasher{peerHashes: []*peerHashKVResp{{}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()"}, - }, - { - name: "Peer returned ErrFutureRev", - hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrFutureRev}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()", "MemberId()"}, - }, - { - name: "Peer returned ErrCompacted", - hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrCompacted}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()", "MemberId()"}, - }, - { - name: "Peer returned other error", - hasher: fakeHasher{peerHashes: []*peerHashKVResp{{err: rpctypes.ErrCorrupt}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()"}, - }, - { - name: "Peer returned same hash", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 1}}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()", "MemberId()"}, - }, - { - name: "Peer returned different hash with same compaction rev", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 2, CompactRevision: 1}}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()", "MemberId()"}, - expectError: true, - }, - { - name: "Peer returned different hash and compaction rev", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}}, peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{}, Hash: 2, CompactRevision: 2}}}}, - expectActions: []string{"MemberId()", "ReqTimeout()", "HashByRev(0)", "PeerHashByRev(0)", "MemberId()", "MemberId()"}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - monitor := corruptionChecker{ - lg: zaptest.NewLogger(t), - hasher: &tc.hasher, - } - err := monitor.InitialCheck() - if gotError := err != nil; gotError != tc.expectError { - t.Errorf("Unexpected error, got: %v, expected?: %v", err, tc.expectError) - } - if tc.hasher.alarmTriggered != tc.expectCorrupt { - t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt) - } - assert.Equal(t, tc.expectActions, tc.hasher.actions) - }) - } -} - -func TestPeriodicCheck(t *testing.T) { - tcs := []struct { - name string - hasher fakeHasher - expectError bool - expectCorrupt bool - expectActions []string - }{ - { - name: "Same local hash and no peers", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 10}}, {hash: mvcc.KeyValueHash{Revision: 10}}}}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(10)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - }, - { - name: "Error getting hash first time", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{err: fmt.Errorf("error getting hash")}}}, - expectActions: []string{"HashByRev(0)"}, - expectError: true, - }, - { - name: "Error getting hash second time", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Revision: 11}}, {err: fmt.Errorf("error getting hash")}}}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(11)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - expectError: true, - }, - { - name: "Error linearizableReadNotify", - hasher: fakeHasher{linearizableReadNotify: fmt.Errorf("error getting linearizableReadNotify")}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()"}, - expectError: true, - }, - { - name: "Different local hash and revision", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2}, revision: 2}}}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - }, - { - name: "Different local hash and compaction revision", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1}}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2}}}}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - }, - { - name: "Different local hash and same revisions", - hasher: fakeHasher{hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 1, Revision: 1}, revision: 1}}}, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "MemberId()", "TriggerCorruptAlarm(1)"}, - expectCorrupt: true, - }, - { - name: "Peer with nil response", - hasher: fakeHasher{ - peerHashes: []*peerHashKVResp{{}}, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - }, - { - name: "Peer with newer revision", - hasher: fakeHasher{ - peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}}}}, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(42)"}, - expectCorrupt: true, - }, - { - name: "Peer with newer compact revision", - hasher: fakeHasher{ - peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 88}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}}}, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(88)"}, - expectCorrupt: true, - }, - { - name: "Peer with same hash and compact revision", - hasher: fakeHasher{ - hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2, Revision: 2}, revision: 2}}, - peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}, CompactRevision: 1, Hash: 1}}}, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)"}, - }, - { - name: "Peer with different hash and same compact revision as first local", - hasher: fakeHasher{ - hashByRevResponses: []hashByRev{{hash: mvcc.KeyValueHash{Hash: 1, CompactRevision: 1, Revision: 1}, revision: 1}, {hash: mvcc.KeyValueHash{Hash: 2, CompactRevision: 2}, revision: 2}}, - peerHashes: []*peerHashKVResp{{peerInfo: peerInfo{id: 666}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 1}, CompactRevision: 1, Hash: 2}}}, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(1)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(666)"}, - expectCorrupt: true, - }, - { - name: "Multiple corrupted peers trigger one alarm", - hasher: fakeHasher{ - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 88}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}}, - {peerInfo: peerInfo{id: 89}, resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: 10}, CompactRevision: 2}}, - }, - }, - expectActions: []string{"HashByRev(0)", "PeerHashByRev(0)", "ReqTimeout()", "LinearizableReadNotify()", "HashByRev(0)", "TriggerCorruptAlarm(88)"}, - expectCorrupt: true, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - monitor := corruptionChecker{ - lg: zaptest.NewLogger(t), - hasher: &tc.hasher, - } - err := monitor.PeriodicCheck() - if gotError := err != nil; gotError != tc.expectError { - t.Errorf("Unexpected error, got: %v, expected?: %v", err, tc.expectError) - } - if tc.hasher.alarmTriggered != tc.expectCorrupt { - t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt) - } - assert.Equal(t, tc.expectActions, tc.hasher.actions) - }) - } -} - -func TestCompactHashCheck(t *testing.T) { - tcs := []struct { - name string - hasher fakeHasher - lastRevisionChecked int64 - - expectError bool - expectCorrupt bool - expectActions []string - expectLastRevisionChecked int64 - }{ - { - name: "No hashes", - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()"}, - }, - { - name: "No peers, check new checked from largest to smallest", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1}, {Revision: 2}, {Revision: 3}, {Revision: 4}}, - }, - lastRevisionChecked: 2, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(4)", "PeerHashByRev(3)"}, - expectLastRevisionChecked: 2, - }, - { - name: "Peer error", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1}, {Revision: 2}}, - peerHashes: []*peerHashKVResp{{err: fmt.Errorf("failed getting hash")}}, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "PeerHashByRev(1)", "MemberId()"}, - }, - { - name: "Peer returned different compaction revision is skipped", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1}, {Revision: 2, CompactRevision: 2}}, - peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{CompactRevision: 3}}}, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "PeerHashByRev(1)", "MemberId()"}, - }, - { - name: "Etcd can identify two corrupted members in 5 member cluster", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 7}}, - {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 7}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(44)", "TriggerCorruptAlarm(45)"}, - expectCorrupt: true, - }, - { - name: "Etcd checks next hash when one member is unresponsive in 3 member cluster", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {err: fmt.Errorf("failed getting hash")}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "PeerHashByRev(1)", "MemberId()"}, - expectCorrupt: false, - }, - { - name: "Etcd can identify single corrupted member in 3 member cluster", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(43)"}, - expectCorrupt: true, - }, - { - name: "Etcd can identify single corrupted member in 5 member cluster", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(44)"}, - expectCorrupt: true, - }, - { - name: "Etcd triggers corrupted alarm on whole cluster if in 3 member cluster one member is down and one member corrupted", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {err: fmt.Errorf("failed getting hash")}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(0)"}, - expectCorrupt: true, - }, - { - name: "Etcd triggers corrupted alarm on whole cluster if no quorum in 5 member cluster", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 46}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 4}}, - {peerInfo: peerInfo{id: 47}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(0)"}, - expectCorrupt: true, - }, - { - name: "Etcd can identify corrupted member in 5 member cluster even if one member is down", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - {err: fmt.Errorf("failed getting hash")}, - {peerInfo: peerInfo{id: 44}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 45}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 2}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(44)"}, - expectCorrupt: true, - }, - { - name: "Etcd can identify that leader is corrupted", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 2}, {Revision: 2, CompactRevision: 1, Hash: 2}}, - peerHashes: []*peerHashKVResp{ - {peerInfo: peerInfo{id: 42}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - {peerInfo: peerInfo{id: 43}, resp: &pb.HashKVResponse{CompactRevision: 1, Hash: 3}}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()", "TriggerCorruptAlarm(1)"}, - expectCorrupt: true, - }, - { - name: "Peer returned same hash bumps last revision checked", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}, {Revision: 2, CompactRevision: 1, Hash: 1}}, - peerHashes: []*peerHashKVResp{{resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{MemberId: 42}, CompactRevision: 1, Hash: 1}}}, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(2)", "MemberId()"}, - expectLastRevisionChecked: 2, - }, - { - name: "Only one peer succeeded check", - hasher: fakeHasher{ - hashes: []mvcc.KeyValueHash{{Revision: 1, CompactRevision: 1, Hash: 1}}, - peerHashes: []*peerHashKVResp{ - {resp: &pb.HashKVResponse{Header: &pb.ResponseHeader{MemberId: 42}, CompactRevision: 1, Hash: 1}}, - {err: fmt.Errorf("failed getting hash")}, - }, - }, - expectActions: []string{"MemberId()", "ReqTimeout()", "Hashes()", "PeerHashByRev(1)", "MemberId()"}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - monitor := corruptionChecker{ - latestRevisionChecked: tc.lastRevisionChecked, - lg: zaptest.NewLogger(t), - hasher: &tc.hasher, - } - monitor.CompactHashCheck() - if tc.hasher.alarmTriggered != tc.expectCorrupt { - t.Errorf("Unexpected corrupt triggered, got: %v, expected?: %v", tc.hasher.alarmTriggered, tc.expectCorrupt) - } - if tc.expectLastRevisionChecked != monitor.latestRevisionChecked { - t.Errorf("Unexpected last revision checked, got: %v, expected?: %v", monitor.latestRevisionChecked, tc.expectLastRevisionChecked) - } - assert.Equal(t, tc.expectActions, tc.hasher.actions) - }) - } -} - -type fakeHasher struct { - peerHashes []*peerHashKVResp - hashByRevIndex int - hashByRevResponses []hashByRev - linearizableReadNotify error - hashes []mvcc.KeyValueHash - - alarmTriggered bool - actions []string -} - -type hashByRev struct { - hash mvcc.KeyValueHash - revision int64 - err error -} - -func (f *fakeHasher) Hash() (hash uint32, revision int64, err error) { - panic("not implemented") -} - -func (f *fakeHasher) HashByRev(rev int64) (hash mvcc.KeyValueHash, revision int64, err error) { - f.actions = append(f.actions, fmt.Sprintf("HashByRev(%d)", rev)) - if len(f.hashByRevResponses) == 0 { - return mvcc.KeyValueHash{}, 0, nil - } - hashByRev := f.hashByRevResponses[f.hashByRevIndex] - f.hashByRevIndex++ - return hashByRev.hash, hashByRev.revision, hashByRev.err -} - -func (f *fakeHasher) Store(hash mvcc.KeyValueHash) { - f.actions = append(f.actions, fmt.Sprintf("Store(%v)", hash)) - f.hashes = append(f.hashes, hash) -} - -func (f *fakeHasher) Hashes() []mvcc.KeyValueHash { - f.actions = append(f.actions, "Hashes()") - return f.hashes -} - -func (f *fakeHasher) ReqTimeout() time.Duration { - f.actions = append(f.actions, "ReqTimeout()") - return time.Second -} - -func (f *fakeHasher) MemberId() types.ID { - f.actions = append(f.actions, "MemberId()") - return 1 -} - -func (f *fakeHasher) PeerHashByRev(rev int64) []*peerHashKVResp { - f.actions = append(f.actions, fmt.Sprintf("PeerHashByRev(%d)", rev)) - return f.peerHashes -} - -func (f *fakeHasher) LinearizableReadNotify(ctx context.Context) error { - f.actions = append(f.actions, "LinearizableReadNotify()") - return f.linearizableReadNotify -} - -func (f *fakeHasher) TriggerCorruptAlarm(memberId types.ID) { - f.actions = append(f.actions, fmt.Sprintf("TriggerCorruptAlarm(%d)", memberId)) - f.alarmTriggered = true -} diff --git a/server/etcdserver/doc.go b/server/etcdserver/doc.go deleted file mode 100644 index b195d2d167a..00000000000 --- a/server/etcdserver/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdserver defines how etcd servers interact and store their states. -package etcdserver diff --git a/server/etcdserver/errors/errors.go b/server/etcdserver/errors/errors.go deleted file mode 100644 index 8de698a1df3..00000000000 --- a/server/etcdserver/errors/errors.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "errors" - "fmt" -) - -var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long") - ErrLeaderChanged = errors.New("etcdserver: leader changed") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrNotLeader = errors.New("etcdserver: not leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrCorrupt = errors.New("etcdserver: corrupt cluster") - ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") - ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") - ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") - ErrKeyNotFound = errors.New("etcdserver: key not found") -) - -type DiscoveryError struct { - Op string - Err error -} - -func (e DiscoveryError) Error() string { - return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) -} diff --git a/server/etcdserver/metrics.go b/server/etcdserver/metrics.go deleted file mode 100644 index 954dfafca4e..00000000000 --- a/server/etcdserver/metrics.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - goruntime "runtime" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/pkg/v3/runtime" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -var ( - hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "has_leader", - Help: "Whether or not a leader exists. 1 is existence, 0 is not.", - }) - isLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "is_leader", - Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.", - }) - leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "leader_changes_seen_total", - Help: "The number of leader changes seen.", - }) - isLearner = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "is_learner", - Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.", - }) - learnerPromoteFailed = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "learner_promote_failures", - Help: "The total number of failed learner promotions (likely learner not ready) while this member is leader.", - }, - []string{"Reason"}, - ) - learnerPromoteSucceed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "learner_promote_successes", - Help: "The total number of successful learner promotions while this member is leader.", - }) - heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "heartbeat_send_failures_total", - Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).", - }) - applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "snapshot_apply_in_progress_total", - Help: "1 if the server is applying the incoming snapshot. 0 if none.", - }) - proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_committed_total", - Help: "The total number of consensus proposals committed.", - }) - proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_applied_total", - Help: "The total number of consensus proposals applied.", - }) - proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_pending", - Help: "The current number of pending proposals to commit.", - }) - proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_failed_total", - Help: "The total number of failed proposals seen.", - }) - slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "slow_read_indexes_total", - Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.", - }) - readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "read_indexes_failed_total", - Help: "The total number of failed read indexes seen.", - }) - leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "lease_expired_total", - Help: "The total number of expired leases.", - }) - - currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "version", - Help: "Which version is running. 1 for 'server_version' label with current version.", - }, - []string{"server_version"}) - currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "go_version", - Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.", - }, - []string{"server_go_version"}) - serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "id", - Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.", - }, - []string{"server_id"}) - - fdUsed = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "os", - Subsystem: "fd", - Name: "used", - Help: "The number of used file descriptors.", - }) - fdLimit = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "os", - Subsystem: "fd", - Name: "limit", - Help: "The file descriptor limit.", - }) -) - -func init() { - prometheus.MustRegister(hasLeader) - prometheus.MustRegister(isLeader) - prometheus.MustRegister(leaderChanges) - prometheus.MustRegister(heartbeatSendFailures) - prometheus.MustRegister(applySnapshotInProgress) - prometheus.MustRegister(proposalsCommitted) - prometheus.MustRegister(proposalsApplied) - prometheus.MustRegister(proposalsPending) - prometheus.MustRegister(proposalsFailed) - prometheus.MustRegister(slowReadIndex) - prometheus.MustRegister(readIndexFailed) - prometheus.MustRegister(leaseExpired) - prometheus.MustRegister(currentVersion) - prometheus.MustRegister(currentGoVersion) - prometheus.MustRegister(serverID) - prometheus.MustRegister(isLearner) - prometheus.MustRegister(learnerPromoteSucceed) - prometheus.MustRegister(learnerPromoteFailed) - prometheus.MustRegister(fdUsed) - prometheus.MustRegister(fdLimit) - - currentVersion.With(prometheus.Labels{ - "server_version": version.Version, - }).Set(1) - currentGoVersion.With(prometheus.Labels{ - "server_go_version": goruntime.Version(), - }).Set(1) -} - -func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) { - // This ticker will check File Descriptor Requirements ,and count all fds in used. - // And recorded some logs when in used >= limit/5*4. Just recorded message. - // If fds was more than 10K,It's low performance due to FDUsage() works. - // So need to increase it. - // See https://github.com/etcd-io/etcd/issues/11969 for more detail. - ticker := time.NewTicker(10 * time.Minute) - defer ticker.Stop() - for { - used, err := runtime.FDUsage() - if err != nil { - lg.Warn("failed to get file descriptor usage", zap.Error(err)) - return - } - fdUsed.Set(float64(used)) - limit, err := runtime.FDLimit() - if err != nil { - lg.Warn("failed to get file descriptor limit", zap.Error(err)) - return - } - fdLimit.Set(float64(limit)) - if used >= limit/5*4 { - lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit)) - } - select { - case <-ticker.C: - case <-done: - return - } - } -} diff --git a/server/etcdserver/raft.go b/server/etcdserver/raft.go deleted file mode 100644 index 6e50b417f6a..00000000000 --- a/server/etcdserver/raft.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "expvar" - "fmt" - "log" - "sync" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/pkg/v3/contention" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -const ( - // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). - // Assuming the RTT is around 10ms, 1MB max size is large enough. - maxSizePerMsg = 1 * 1024 * 1024 - // Never overflow the rafthttp buffer, which is 4096. - // TODO: a better const? - maxInflightMsgs = 4096 / 8 -) - -var ( - // protects raftStatus - raftStatusMu sync.Mutex - // indirection for expvar func interface - // expvar panics when publishing duplicate name - // expvar does not support remove a registered name - // so only register a func that calls raftStatus - // and change raftStatus as we need. - raftStatus func() raft.Status -) - -func init() { - expvar.Publish("raft.status", expvar.Func(func() interface{} { - raftStatusMu.Lock() - defer raftStatusMu.Unlock() - if raftStatus == nil { - return nil - } - return raftStatus() - })) -} - -// toApply contains entries, snapshot to be applied. Once -// an toApply is consumed, the entries will be persisted to -// to raft storage concurrently; the application must read -// notifyc before assuming the raft messages are stable. -type toApply struct { - entries []raftpb.Entry - snapshot raftpb.Snapshot - // notifyc synchronizes etcd server applies with the raft node - notifyc chan struct{} -} - -type raftNode struct { - lg *zap.Logger - - tickMu *sync.Mutex - raftNodeConfig - - // a chan to send/receive snapshot - msgSnapC chan raftpb.Message - - // a chan to send out apply - applyc chan toApply - - // a chan to send out readState - readStateC chan raft.ReadState - - // utility - ticker *time.Ticker - // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - - stopped chan struct{} - done chan struct{} -} - -type raftNodeConfig struct { - lg *zap.Logger - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - raft.Node - raftStorage *raft.MemoryStorage - storage serverstorage.Storage - heartbeat time.Duration // for logging - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter -} - -func newRaftNode(cfg raftNodeConfig) *raftNode { - var lg raft.Logger - if cfg.lg != nil { - lg = NewRaftLoggerZap(cfg.lg) - } else { - lcfg := logutil.DefaultZapLoggerConfig - var err error - lg, err = NewRaftLogger(&lcfg) - if err != nil { - log.Fatalf("cannot create raft logger %v", err) - } - } - raft.SetLogger(lg) - r := &raftNode{ - lg: cfg.lg, - tickMu: new(sync.Mutex), - raftNodeConfig: cfg, - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * cfg.heartbeat), - readStateC: make(chan raft.ReadState, 1), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan toApply), - stopped: make(chan struct{}), - done: make(chan struct{}), - } - if r.heartbeat == 0 { - r.ticker = &time.Ticker{} - } else { - r.ticker = time.NewTicker(r.heartbeat) - } - return r -} - -// raft.Node does not have locks in Raft package -func (r *raftNode) tick() { - r.tickMu.Lock() - r.Tick() - r.tickMu.Unlock() -} - -// start prepares and starts raftNode in a new goroutine. It is no longer safe -// to modify the fields after it has been started. -func (r *raftNode) start(rh *raftReadyHandler) { - internalTimeout := time.Second - - go func() { - defer r.onStop() - islead := false - - for { - select { - case <-r.ticker.C: - r.tick() - case rd := <-r.Ready(): - if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead - if newLeader { - leaderChanges.Inc() - } - - if rd.SoftState.Lead == raft.None { - hasLeader.Set(0) - } else { - hasLeader.Set(1) - } - - rh.updateLead(rd.SoftState.Lead) - islead = rd.RaftState == raft.StateLeader - if islead { - isLeader.Set(1) - } else { - isLeader.Set(0) - } - rh.updateLeadership(newLeader) - r.td.Reset() - } - - if len(rd.ReadStates) != 0 { - select { - case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: - case <-time.After(internalTimeout): - r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout)) - case <-r.stopped: - return - } - } - - notifyc := make(chan struct{}, 1) - ap := toApply{ - entries: rd.CommittedEntries, - snapshot: rd.Snapshot, - notifyc: notifyc, - } - - updateCommittedIndex(&ap, rh) - - select { - case r.applyc <- ap: - case <-r.stopped: - return - } - - // the leader can write to its disk in parallel with replicating to the followers and them - // writing to their disks. - // For more details, check raft thesis 10.2.1 - if islead { - // gofail: var raftBeforeLeaderSend struct{} - r.transport.Send(r.processMessages(rd.Messages)) - } - - // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to - // ensure that recovery after a snapshot restore is possible. - if !raft.IsEmptySnap(rd.Snapshot) { - // gofail: var raftBeforeSaveSnap struct{} - if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - r.lg.Fatal("failed to save Raft snapshot", zap.Error(err)) - } - // gofail: var raftAfterSaveSnap struct{} - } - - // gofail: var raftBeforeSave struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) - } - if !raft.IsEmptyHardState(rd.HardState) { - proposalsCommitted.Set(float64(rd.HardState.Commit)) - } - // gofail: var raftAfterSave struct{} - - if !raft.IsEmptySnap(rd.Snapshot) { - // Force WAL to fsync its hard state before Release() releases - // old data from the WAL. Otherwise could get an error like: - // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost? - // See https://github.com/etcd-io/etcd/issues/10219 for more details. - if err := r.storage.Sync(); err != nil { - r.lg.Fatal("failed to sync Raft snapshot", zap.Error(err)) - } - - // etcdserver now claim the snapshot has been persisted onto the disk - notifyc <- struct{}{} - - // gofail: var raftBeforeApplySnap struct{} - r.raftStorage.ApplySnapshot(rd.Snapshot) - r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index)) - // gofail: var raftAfterApplySnap struct{} - - if err := r.storage.Release(rd.Snapshot); err != nil { - r.lg.Fatal("failed to release Raft wal", zap.Error(err)) - } - // gofail: var raftAfterWALRelease struct{} - } - - r.raftStorage.Append(rd.Entries) - - if !islead { - // finish processing incoming messages before we signal notifyc chan - msgs := r.processMessages(rd.Messages) - - // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots - notifyc <- struct{}{} - - // Candidate or follower needs to wait for all pending configuration - // changes to be applied before sending messages. - // Otherwise we might incorrectly count votes (e.g. votes from removed members). - // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before toApply-layer applies the config change. - // We simply wait for ALL pending entries to be applied for now. - // We might improve this later on if it causes unnecessary long blocking issues. - waitApply := false - for _, ent := range rd.CommittedEntries { - if ent.Type == raftpb.EntryConfChange { - waitApply = true - break - } - } - if waitApply { - // blocks until 'applyAll' calls 'applyWait.Trigger' - // to be in sync with scheduled config-change job - // (assume notifyc has cap of 1) - select { - case notifyc <- struct{}{}: - case <-r.stopped: - return - } - } - - // gofail: var raftBeforeFollowerSend struct{} - r.transport.Send(msgs) - } else { - // leader already processed 'MsgSnap' and signaled - notifyc <- struct{}{} - } - - r.Advance() - case <-r.stopped: - return - } - } - }() -} - -func updateCommittedIndex(ap *toApply, rh *raftReadyHandler) { - var ci uint64 - if len(ap.entries) != 0 { - ci = ap.entries[len(ap.entries)-1].Index - } - if ap.snapshot.Metadata.Index > ci { - ci = ap.snapshot.Metadata.Index - } - if ci != 0 { - rh.updateCommittedIndex(ci) - } -} - -func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { - sentAppResp := false - for i := len(ms) - 1; i >= 0; i-- { - if r.isIDRemoved(ms[i].To) { - ms[i].To = 0 - } - - if ms[i].Type == raftpb.MsgAppResp { - if sentAppResp { - ms[i].To = 0 - } else { - sentAppResp = true - } - } - - if ms[i].Type == raftpb.MsgSnap { - // There are two separate data store: the store for v2, and the KV for v3. - // The msgSnap only contains the most recent snapshot of store without KV. - // So we need to redirect the msgSnap to etcd server main loop for merging in the - // current store snapshot and KV snapshot. - select { - case r.msgSnapC <- ms[i]: - default: - // drop msgSnap if the inflight chan if full. - } - ms[i].To = 0 - } - if ms[i].Type == raftpb.MsgHeartbeat { - ok, exceed := r.td.Observe(ms[i].To) - if !ok { - // TODO: limit request rate. - r.lg.Warn( - "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk", - zap.String("to", fmt.Sprintf("%x", ms[i].To)), - zap.Duration("heartbeat-interval", r.heartbeat), - zap.Duration("expected-duration", 2*r.heartbeat), - zap.Duration("exceeded-duration", exceed), - ) - heartbeatSendFailures.Inc() - } - } - } - return ms -} - -func (r *raftNode) apply() chan toApply { - return r.applyc -} - -func (r *raftNode) stop() { - select { - case r.stopped <- struct{}{}: - // Not already stopped, so trigger it - case <-r.done: - // Has already been stopped - no need to do anything - return - } - // Block until the stop has been acknowledged by start() - <-r.done -} - -func (r *raftNode) onStop() { - r.Stop() - r.ticker.Stop() - r.transport.Stop() - if err := r.storage.Close(); err != nil { - r.lg.Panic("failed to close Raft storage", zap.Error(err)) - } - close(r.done) -} - -// for testing -func (r *raftNode) pauseSending() { - p := r.transport.(rafthttp.Pausable) - p.Pause() -} - -func (r *raftNode) resumeSending() { - p := r.transport.(rafthttp.Pausable) - p.Resume() -} - -// advanceTicks advances ticks of Raft node. -// This can be used for fast-forwarding election -// ticks in multi data-center deployments, thus -// speeding up election process. -func (r *raftNode) advanceTicks(ticks int) { - for i := 0; i < ticks; i++ { - r.tick() - } -} diff --git a/server/etcdserver/raft_test.go b/server/etcdserver/raft_test.go deleted file mode 100644 index e795732cb51..00000000000 --- a/server/etcdserver/raft_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "reflect" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/mock/mockstorage" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -func TestGetIDs(t *testing.T) { - lg := zaptest.NewLogger(t) - addcc := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2} - addEntry := raftpb.Entry{Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(addcc)} - removecc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} - removeEntry := raftpb.Entry{Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc)} - normalEntry := raftpb.Entry{Type: raftpb.EntryNormal} - updatecc := &raftpb.ConfChange{Type: raftpb.ConfChangeUpdateNode, NodeID: 2} - updateEntry := raftpb.Entry{Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(updatecc)} - - tests := []struct { - confState *raftpb.ConfState - ents []raftpb.Entry - - widSet []uint64 - }{ - {nil, []raftpb.Entry{}, []uint64{}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{}, []uint64{1}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{addEntry}, []uint64{1, 2}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{addEntry, removeEntry}, []uint64{1}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{addEntry, normalEntry}, []uint64{1, 2}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{addEntry, normalEntry, updateEntry}, []uint64{1, 2}}, - {&raftpb.ConfState{Voters: []uint64{1}}, - []raftpb.Entry{addEntry, removeEntry, normalEntry}, []uint64{1}}, - } - - for i, tt := range tests { - var snap raftpb.Snapshot - if tt.confState != nil { - snap.Metadata.ConfState = *tt.confState - } - idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(lg, &snap, tt.ents) - if !reflect.DeepEqual(idSet, tt.widSet) { - t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet) - } - } -} - -func TestCreateConfigChangeEnts(t *testing.T) { - lg := zaptest.NewLogger(t) - m := membership.Member{ - ID: types.ID(1), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - addcc1 := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1, Context: ctx} - removecc2 := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} - removecc3 := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 3} - tests := []struct { - ids []uint64 - self uint64 - term, index uint64 - - wents []raftpb.Entry - }{ - { - []uint64{1}, - 1, - 1, 1, - - nil, - }, - { - []uint64{1, 2}, - 1, - 1, 1, - - []raftpb.Entry{{Term: 1, Index: 2, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc2)}}, - }, - { - []uint64{1, 2}, - 1, - 2, 2, - - []raftpb.Entry{{Term: 2, Index: 3, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc2)}}, - }, - { - []uint64{1, 2, 3}, - 1, - 2, 2, - - []raftpb.Entry{ - {Term: 2, Index: 3, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc2)}, - {Term: 2, Index: 4, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc3)}, - }, - }, - { - []uint64{2, 3}, - 2, - 2, 2, - - []raftpb.Entry{ - {Term: 2, Index: 3, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc3)}, - }, - }, - { - []uint64{2, 3}, - 1, - 2, 2, - - []raftpb.Entry{ - {Term: 2, Index: 3, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(addcc1)}, - {Term: 2, Index: 4, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc2)}, - {Term: 2, Index: 5, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(removecc3)}, - }, - }, - } - - for i, tt := range tests { - gents := serverstorage.CreateConfigChangeEnts(lg, tt.ids, tt.self, tt.term, tt.index) - if !reflect.DeepEqual(gents, tt.wents) { - t.Errorf("#%d: ents = %v, want %v", i, gents, tt.wents) - } - } -} - -func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { - n := newNopReadyNode() - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: n, - storage: mockstorage.NewStorageRecorder(""), - raftStorage: raft.NewMemoryStorage(), - transport: newNopTransporter(), - }) - srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r} - srv.r.start(nil) - n.readyc <- raft.Ready{} - - stop := func() { - srv.r.stopped <- struct{}{} - select { - case <-srv.r.done: - case <-time.After(time.Second): - t.Fatalf("failed to stop raft loop") - } - } - - select { - case <-srv.r.applyc: - case <-time.After(time.Second): - stop() - t.Fatalf("failed to receive toApply struct") - } - - stop() -} - -// TestConfigChangeBlocksApply ensures toApply blocks if committed entries contain config-change. -func TestConfigChangeBlocksApply(t *testing.T) { - n := newNopReadyNode() - - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: n, - storage: mockstorage.NewStorageRecorder(""), - raftStorage: raft.NewMemoryStorage(), - transport: newNopTransporter(), - }) - srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r} - - srv.r.start(&raftReadyHandler{ - getLead: func() uint64 { return 0 }, - updateLead: func(uint64) {}, - updateLeadership: func(bool) {}, - }) - defer srv.r.Stop() - - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{RaftState: raft.StateFollower}, - CommittedEntries: []raftpb.Entry{{Type: raftpb.EntryConfChange}}, - } - ap := <-srv.r.applyc - - continueC := make(chan struct{}) - go func() { - n.readyc <- raft.Ready{} - <-srv.r.applyc - close(continueC) - }() - - select { - case <-continueC: - t.Fatalf("unexpected execution: raft routine should block waiting for toApply") - case <-time.After(time.Second): - } - - // finish toApply, unblock raft routine - <-ap.notifyc - - select { - case <-continueC: - case <-time.After(time.Second): - t.Fatalf("unexpected blocking on execution") - } -} - -func TestProcessDuplicatedAppRespMessage(t *testing.T) { - n := newNopReadyNode() - cl := membership.NewCluster(zaptest.NewLogger(t)) - - rs := raft.NewMemoryStorage() - p := mockstorage.NewStorageRecorder("") - tr, sendc := newSendMsgAppRespTransporter() - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - transport: tr, - storage: p, - raftStorage: rs, - }) - - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *r, - cluster: cl, - SyncTicker: &time.Ticker{}, - } - - s.start() - defer s.Stop() - - lead := uint64(1) - - n.readyc <- raft.Ready{Messages: []raftpb.Message{ - {Type: raftpb.MsgAppResp, From: 2, To: lead, Term: 1, Index: 1}, - {Type: raftpb.MsgAppResp, From: 2, To: lead, Term: 1, Index: 2}, - {Type: raftpb.MsgAppResp, From: 2, To: lead, Term: 1, Index: 3}, - }} - - got, want := <-sendc, 1 - if got != want { - t.Errorf("count = %d, want %d", got, want) - } -} - -// TestExpvarWithNoRaftStatus to test that none of the expvars that get added during init panic. -// This matters if another package imports etcdserver, doesn't use it, but does use expvars. -func TestExpvarWithNoRaftStatus(t *testing.T) { - defer func() { - if err := recover(); err != nil { - t.Fatal(err) - } - }() - expvar.Do(func(kv expvar.KeyValue) { - _ = kv.Value.String() - }) -} - -func TestStopRaftNodeMoreThanOnce(t *testing.T) { - n := newNopReadyNode() - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: n, - storage: mockstorage.NewStorageRecorder(""), - raftStorage: raft.NewMemoryStorage(), - transport: newNopTransporter(), - }) - r.start(&raftReadyHandler{}) - - for i := 0; i < 2; i++ { - stopped := make(chan struct{}) - go func() { - r.stop() - close(stopped) - }() - - select { - case <-stopped: - case <-time.After(time.Second): - t.Errorf("*raftNode.stop() is blocked !") - } - } -} diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go deleted file mode 100644 index eb07dab5548..00000000000 --- a/server/etcdserver/server.go +++ /dev/null @@ -1,2455 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - "encoding/json" - "expvar" - "fmt" - "math" - "math/rand" - "net/http" - "path" - "regexp" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/coreos/go-semver/semver" - humanize "github.com/dustin/go-humanize" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - - "go.etcd.io/etcd/pkg/v3/notify" - "go.etcd.io/etcd/pkg/v3/runtime" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.etcd.io/etcd/pkg/v3/idutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/pkg/v3/schedule" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/pkg/v3/wait" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api" - httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/lease/leasehttp" - serverstorage "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -const ( - DefaultSnapshotCount = 100000 - - // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower - // to catch-up after compacting the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - DefaultSnapshotCatchUpEntries uint64 = 5000 - - StoreClusterPrefix = "/0" - StoreKeysPrefix = "/1" - - // HealthInterval is the minimum time the cluster should be healthy - // before accepting add member requests. - HealthInterval = 5 * time.Second - - purgeFileInterval = 30 * time.Second - - // max number of in-flight snapshot messages etcdserver allows to have - // This number is more than enough for most clusters with 5 machines. - maxInFlightMsgSnap = 16 - - releaseDelayAfterSnapshot = 30 * time.Second - - // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 - - recommendedMaxRequestBytes = 10 * 1024 * 1024 - - readyPercent = 0.9 - - DowngradeEnabledPath = "/downgrade/enabled" -) - -var ( - // monitorVersionInterval should be smaller than the timeout - // on the connection. Or we will not be able to reuse the connection - // (since it will timeout). - monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - - recommendedMaxRequestBytesString = humanize.Bytes(uint64(recommendedMaxRequestBytes)) - storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) -) - -func init() { - rand.Seed(time.Now().UnixNano()) - - expvar.Publish( - "file_descriptor_limit", - expvar.Func( - func() interface{} { - n, _ := runtime.FDLimit() - return n - }, - ), - ) -} - -type Response struct { - Term uint64 - Index uint64 - Event *v2store.Event - Watcher v2store.Watcher - Err error -} - -type ServerV2 interface { - Server - Leader() types.ID - - // Do takes a V2 request and attempts to fulfill it, returning a Response. - Do(ctx context.Context, r pb.Request) (Response, error) - ClientCertAuthEnabled() bool -} - -type ServerV3 interface { - Server - apply.RaftStatusGetter -} - -func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } - -type Server interface { - // AddMember attempts to add a member into the cluster. It will return - // ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) - // RemoveMember attempts to remove a member from the cluster. It will - // return ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) - // UpdateMember attempts to update an existing member in the cluster. It will - // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) - // PromoteMember attempts to promote a non-voting node to a voting node. It will - // return ErrIDNotFound if the member ID does not exist. - // return ErrLearnerNotReady if the member are not ready. - // return ErrMemberNotLearner if the member is not a learner. - PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) - - // ClusterVersion is the cluster-wide minimum major.minor version. - // Cluster version is set to the min version that an etcd member is - // compatible with when first bootstrap. - // - // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). - // - // During a rolling upgrades, the ClusterVersion will be updated - // automatically after a sync. (5 second by default) - // - // The API/raft component can utilize ClusterVersion to determine if - // it can accept a client request or a raft RPC. - // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and - // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since - // this feature is introduced post 2.0. - ClusterVersion() *semver.Version - // StorageVersion is the storage schema version. It's supported starting - // from 3.6. - StorageVersion() *semver.Version - Cluster() api.Cluster - Alarms() []*pb.AlarmMember - - // LeaderChangedNotify returns a channel for application level code to be notified - // when etcd leader changes, this function is intend to be used only in application - // which embed etcd. - // Caution: - // 1. the returned channel is being closed when the leadership changes. - // 2. so the new channel needs to be obtained for each raft term. - // 3. user can loose some consecutive channel changes using this API. - LeaderChangedNotify() <-chan struct{} -} - -// EtcdServer is the production implementation of the Server interface -type EtcdServer struct { - // inflightSnapshots holds count the number of snapshots currently inflight. - inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. - appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - term uint64 // must use atomic operations to access; keep 64-bit aligned. - lead uint64 // must use atomic operations to access; keep 64-bit aligned. - - consistIndex cindex.ConsistentIndexer // consistIndex is used to get/set/save consistentIndex - r raftNode // uses 64-bit atomics; keep 64-bit aligned. - - readych chan struct{} - Cfg config.ServerConfig - - lgMu *sync.RWMutex - lg *zap.Logger - - w wait.Wait - - readMu sync.RWMutex - // read routine notifies etcd server that it waits for reading by sending an empty struct to - // readwaitC - readwaitc chan struct{} - // readNotifier is used to notify the read routine that it can process the request - // when there is no error - readNotifier *notifier - - // stop signals the run goroutine should shutdown. - stop chan struct{} - // stopping is closed by run goroutine on shutdown. - stopping chan struct{} - // done is closed when all goroutines from start() complete. - done chan struct{} - // leaderChanged is used to notify the linearizable read loop to drop the old read requests. - leaderChanged *notify.Notifier - - errorc chan error - memberId types.ID - attributes membership.Attributes - - cluster *membership.RaftCluster - - v2store v2store.Store - snapshotter *snap.Snapshotter - - applyV2 ApplierV2 - - uberApply apply.UberApplier - - applyWait wait.WaitTime - - kv mvcc.WatchableKV - lessor lease.Lessor - bemu sync.RWMutex - be backend.Backend - beHooks *serverstorage.BackendHooks - authStore auth.AuthStore - alarmStore *v3alarm.AlarmStore - - stats *stats.ServerStats - lstats *stats.LeaderStats - - SyncTicker *time.Ticker - // compactor is used to auto-compact the KV. - compactor v3compactor.Compactor - - // peerRt used to send requests (version, lease) to peers. - peerRt http.RoundTripper - reqIDGen *idutil.Generator - - // wgMu blocks concurrent waitgroup mutation while server stopping - wgMu sync.RWMutex - // wg is used to wait for the goroutines that depends on the server state - // to exit when stopping the server. - wg sync.WaitGroup - - // ctx is used for etcd-initiated requests that may need to be canceled - // on etcd server shutdown. - ctx context.Context - cancel context.CancelFunc - - leadTimeMu sync.RWMutex - leadElectedTime time.Time - - firstCommitInTerm *notify.Notifier - clusterVersionChanged *notify.Notifier - - *AccessController - // forceSnapshot can force snapshot be triggered after apply, independent of the snapshotCount. - // Should only be set within apply code path. Used to force snapshot after cluster version downgrade. - forceSnapshot bool - corruptionChecker CorruptionChecker -} - -// NewServer creates a new EtcdServer from the supplied configuration. The -// configuration is considered static for the lifetime of the EtcdServer. -func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { - b, err := bootstrap(cfg) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - b.Close() - } - }() - - sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String()) - lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String()) - - heartbeat := time.Duration(cfg.TickMs) * time.Millisecond - srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - lgMu: new(sync.RWMutex), - lg: cfg.Logger, - errorc: make(chan error, 1), - v2store: b.storage.st, - snapshotter: b.ss, - r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl), - memberId: b.cluster.nodeID, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: b.cluster.cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: b.prt, - reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()), - AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, - consistIndex: b.storage.backend.ci, - firstCommitInTerm: notify.NewNotifier(), - clusterVersionChanged: notify.NewNotifier(), - } - serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1) - srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged) - srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster) - - srv.be = b.storage.backend.be - srv.beHooks = b.storage.backend.beHooks - minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.Logger(), srv.be, srv.cluster, lease.LessorConfig{ - MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), - CheckpointInterval: cfg.LeaseCheckpointInterval, - CheckpointPersist: cfg.LeaseCheckpointPersist, - ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(), - }) - - tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken, - func(index uint64) <-chan struct{} { - return srv.applyWait.Wait(index) - }, - time.Duration(cfg.TokenTTL)*time.Second, - ) - if err != nil { - cfg.Logger.Warn("failed to create token provider", zap.Error(err)) - return nil, err - } - - mvccStoreConfig := mvcc.StoreConfig{ - CompactionBatchLimit: cfg.CompactionBatchLimit, - CompactionSleepInterval: cfg.CompactionSleepInterval, - } - srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvccStoreConfig) - srv.corruptionChecker = newCorruptionChecker(cfg.Logger, srv, srv.kv.HashStorage()) - - srv.authStore = auth.NewAuthStore(srv.Logger(), schema.NewAuthBackend(srv.Logger(), srv.be), tp, int(cfg.BcryptCost)) - - newSrv := srv // since srv == nil in defer if srv is returned as nil - defer func() { - // closing backend without first closing kv can cause - // resumed compactions to fail with closed tx errors - if err != nil { - newSrv.kv.Close() - } - }() - if num := cfg.AutoCompactionRetention; num != 0 { - srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv) - if err != nil { - return nil, err - } - srv.compactor.Run() - } - - if err = srv.restoreAlarms(); err != nil { - return nil, err - } - srv.uberApply = srv.NewUberApplier() - - if srv.Cfg.EnableLeaseCheckpoint { - // setting checkpointer enables lease checkpoint feature. - srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) { - srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp}) - }) - } - - // Set the hook after EtcdServer finishes the initialization to avoid - // the hook being called during the initialization process. - srv.be.SetTxPostLockInsideApplyHook(srv.getTxPostLockInsideApplyHook()) - - // TODO: move transport initialization near the definition of remote - tr := &rafthttp.Transport{ - Logger: cfg.Logger, - TLSInfo: cfg.PeerTLSInfo, - DialTimeout: cfg.PeerDialTimeout(), - ID: b.cluster.nodeID, - URLs: cfg.PeerURLs, - ClusterID: b.cluster.cl.ID(), - Raft: srv, - Snapshotter: b.ss, - ServerStats: sstats, - LeaderStats: lstats, - ErrorC: srv.errorc, - } - if err = tr.Start(); err != nil { - return nil, err - } - // add all remotes into transport - for _, m := range b.cluster.remotes { - if m.ID != b.cluster.nodeID { - tr.AddRemote(m.ID, m.PeerURLs) - } - } - for _, m := range b.cluster.cl.Members() { - if m.ID != b.cluster.nodeID { - tr.AddPeer(m.ID, m.PeerURLs) - } - } - srv.r.transport = tr - - return srv, nil -} - -func (s *EtcdServer) Logger() *zap.Logger { - s.lgMu.RLock() - l := s.lg - s.lgMu.RUnlock() - return l -} - -func (s *EtcdServer) Config() config.ServerConfig { - return s.Cfg -} - -func tickToDur(ticks int, tickMs uint) string { - return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond) -} - -func (s *EtcdServer) adjustTicks() { - lg := s.Logger() - clusterN := len(s.cluster.Members()) - - // single-node fresh start, or single-node recovers from snapshot - if clusterN == 1 { - ticks := s.Cfg.ElectionTicks - 1 - lg.Info( - "started as single-node; fast-forwarding election ticks", - zap.String("local-member-id", s.MemberId().String()), - zap.Int("forward-ticks", ticks), - zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), - zap.Int("election-ticks", s.Cfg.ElectionTicks), - zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), - ) - s.r.advanceTicks(ticks) - return - } - - if !s.Cfg.InitialElectionTickAdvance { - lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) - return - } - lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) - - // retry up to "rafthttp.ConnReadTimeout", which is 5-sec - // until peer connection reports; otherwise: - // 1. all connections failed, or - // 2. no active peers, or - // 3. restarted single-node with no snapshot - // then, do nothing, because advancing ticks would have no effect - waitTime := rafthttp.ConnReadTimeout - itv := 50 * time.Millisecond - for i := int64(0); i < int64(waitTime/itv); i++ { - select { - case <-time.After(itv): - case <-s.stopping: - return - } - - peerN := s.r.transport.ActivePeers() - if peerN > 1 { - // multi-node received peer connection reports - // adjust ticks, in case slow leader message receive - ticks := s.Cfg.ElectionTicks - 2 - - lg.Info( - "initialized peer connections; fast-forwarding election ticks", - zap.String("local-member-id", s.MemberId().String()), - zap.Int("forward-ticks", ticks), - zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), - zap.Int("election-ticks", s.Cfg.ElectionTicks), - zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), - zap.Int("active-remote-members", peerN), - ) - - s.r.advanceTicks(ticks) - return - } - } -} - -// Start performs any initialization of the Server necessary for it to -// begin serving requests. It must be called before Do or Process. -// Start must be non-blocking; any long-running server functionality -// should be implemented in goroutines. -func (s *EtcdServer) Start() { - s.start() - s.GoAttach(func() { s.adjustTicks() }) - s.GoAttach(func() { s.publishV3(s.Cfg.ReqTimeout()) }) - s.GoAttach(s.purgeFile) - s.GoAttach(func() { monitorFileDescriptor(s.Logger(), s.stopping) }) - s.GoAttach(s.monitorClusterVersions) - s.GoAttach(s.monitorStorageVersion) - s.GoAttach(s.linearizableReadLoop) - s.GoAttach(s.monitorKVHash) - s.GoAttach(s.monitorCompactHash) - s.GoAttach(s.monitorDowngrade) -} - -// start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// This function is just used for testing. -func (s *EtcdServer) start() { - lg := s.Logger() - - if s.Cfg.SnapshotCount == 0 { - lg.Info( - "updating snapshot-count to default", - zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), - zap.Uint64("updated-snapshot-count", DefaultSnapshotCount), - ) - s.Cfg.SnapshotCount = DefaultSnapshotCount - } - if s.Cfg.SnapshotCatchUpEntries == 0 { - lg.Info( - "updating snapshot catch-up entries to default", - zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries), - zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries), - ) - s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries - } - - s.w = wait.New() - s.applyWait = wait.NewTimeList() - s.done = make(chan struct{}) - s.stop = make(chan struct{}) - s.stopping = make(chan struct{}, 1) - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.readwaitc = make(chan struct{}, 1) - s.readNotifier = newNotifier() - s.leaderChanged = notify.NewNotifier() - if s.ClusterVersion() != nil { - lg.Info( - "starting etcd server", - zap.String("local-member-id", s.MemberId().String()), - zap.String("local-server-version", version.Version), - zap.String("cluster-id", s.Cluster().ID().String()), - zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), - ) - membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1) - } else { - lg.Info( - "starting etcd server", - zap.String("local-member-id", s.MemberId().String()), - zap.String("local-server-version", version.Version), - zap.String("cluster-version", "to_be_decided"), - ) - } - - // TODO: if this is an empty log, writes all peer infos - // into the first entry - go s.run() -} - -func (s *EtcdServer) purgeFile() { - lg := s.Logger() - var dberrc, serrc, werrc <-chan error - var dbdonec, sdonec, wdonec <-chan struct{} - if s.Cfg.MaxSnapFiles > 0 { - dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) - sdonec, serrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) - } - if s.Cfg.MaxWALFiles > 0 { - wdonec, werrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping) - } - - select { - case e := <-dberrc: - lg.Fatal("failed to purge snap db file", zap.Error(e)) - case e := <-serrc: - lg.Fatal("failed to purge snap file", zap.Error(e)) - case e := <-werrc: - lg.Fatal("failed to purge wal file", zap.Error(e)) - case <-s.stopping: - if dbdonec != nil { - <-dbdonec - } - if sdonec != nil { - <-sdonec - } - if wdonec != nil { - <-wdonec - } - return - } -} - -func (s *EtcdServer) Cluster() api.Cluster { return s.cluster } - -func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } - -type ServerPeer interface { - ServerV2 - RaftHandler() http.Handler - LeaseHandler() http.Handler -} - -func (s *EtcdServer) LeaseHandler() http.Handler { - if s.lessor == nil { - return nil - } - return leasehttp.NewHandler(s.lessor, s.ApplyWait) -} - -func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } - -type ServerPeerV2 interface { - ServerPeer - HashKVHandler() http.Handler - DowngradeEnabledHandler() http.Handler -} - -func (s *EtcdServer) DowngradeInfo() *serverversion.DowngradeInfo { return s.cluster.DowngradeInfo() } - -type downgradeEnabledHandler struct { - lg *zap.Logger - cluster api.Cluster - server *EtcdServer -} - -func (s *EtcdServer) DowngradeEnabledHandler() http.Handler { - return &downgradeEnabledHandler{ - lg: s.Logger(), - cluster: s.cluster, - server: s, - } -} - -func (h *downgradeEnabledHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - w.Header().Set("Allow", http.MethodGet) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - if r.URL.Path != DowngradeEnabledPath { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), h.server.Cfg.ReqTimeout()) - defer cancel() - - // serve with linearized downgrade info - if err := h.server.linearizableReadNotify(ctx); err != nil { - http.Error(w, fmt.Sprintf("failed linearized read: %v", err), - http.StatusInternalServerError) - return - } - enabled := h.server.DowngradeInfo().Enabled - w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(strconv.FormatBool(enabled))) -} - -// Process takes a raft message and applies it to the server's raft state -// machine, respecting any timeout of the given context. -func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { - lg := s.Logger() - if s.cluster.IsIDRemoved(types.ID(m.From)) { - lg.Warn( - "rejected Raft message from removed member", - zap.String("local-member-id", s.MemberId().String()), - zap.String("removed-member-id", types.ID(m.From).String()), - ) - return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") - } - if m.Type == raftpb.MsgApp { - s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) - } - return s.r.Step(ctx, m) -} - -func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } - -func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } - -// ReportSnapshot reports snapshot sent status to the raft state machine, -// and clears the used snapshot from the snapshot store. -func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { - s.r.ReportSnapshot(id, status) -} - -type etcdProgress struct { - confState raftpb.ConfState - snapi uint64 - appliedt uint64 - appliedi uint64 -} - -// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, -// and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to toApply the commit entries and do snapshot/recover -type raftReadyHandler struct { - getLead func() (lead uint64) - updateLead func(lead uint64) - updateLeadership func(newLeader bool) - updateCommittedIndex func(uint64) -} - -func (s *EtcdServer) run() { - lg := s.Logger() - - sn, err := s.r.raftStorage.Snapshot() - if err != nil { - lg.Panic("failed to get snapshot from Raft storage", zap.Error(err)) - } - - // asynchronously accept toApply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler(lg) - - var ( - smu sync.RWMutex - syncC <-chan time.Time - ) - setSyncC := func(ch <-chan time.Time) { - smu.Lock() - syncC = ch - smu.Unlock() - } - getSyncC := func() (ch <-chan time.Time) { - smu.RLock() - ch = syncC - smu.RUnlock() - return - } - rh := &raftReadyHandler{ - getLead: func() (lead uint64) { return s.getLead() }, - updateLead: func(lead uint64) { s.setLead(lead) }, - updateLeadership: func(newLeader bool) { - if !s.isLeader() { - if s.lessor != nil { - s.lessor.Demote() - } - if s.compactor != nil { - s.compactor.Pause() - } - setSyncC(nil) - } else { - if newLeader { - t := time.Now() - s.leadTimeMu.Lock() - s.leadElectedTime = t - s.leadTimeMu.Unlock() - } - setSyncC(s.SyncTicker.C) - if s.compactor != nil { - s.compactor.Resume() - } - } - if newLeader { - s.leaderChanged.Notify() - } - // TODO: remove the nil checking - // current test utility does not provide the stats - if s.stats != nil { - s.stats.BecomeLeader() - } - }, - updateCommittedIndex: func(ci uint64) { - cci := s.getCommittedIndex() - if ci > cci { - s.setCommittedIndex(ci) - } - }, - } - s.r.start(rh) - - ep := etcdProgress{ - confState: sn.Metadata.ConfState, - snapi: sn.Metadata.Index, - appliedt: sn.Metadata.Term, - appliedi: sn.Metadata.Index, - } - - defer func() { - s.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping - close(s.stopping) - s.wgMu.Unlock() - s.cancel() - sched.Stop() - - // wait for goroutines before closing raft so wal stays open - s.wg.Wait() - - s.SyncTicker.Stop() - - // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines - // by adding a peer after raft stops the transport - s.r.stop() - - s.Cleanup() - - close(s.done) - }() - - var expiredLeaseC <-chan []*lease.Lease - if s.lessor != nil { - expiredLeaseC = s.lessor.ExpiredLeasesC() - } - - for { - select { - case ap := <-s.r.apply(): - f := schedule.NewJob("server_applyAll", func(context.Context) { s.applyAll(&ep, &ap) }) - sched.Schedule(f) - case leases := <-expiredLeaseC: - s.revokeExpiredLeases(leases) - case err := <-s.errorc: - lg.Warn("server error", zap.Error(err)) - lg.Warn("data-dir used by this member must be removed") - return - case <-getSyncC(): - if s.v2store.HasTTLKeys() { - s.sync(s.Cfg.ReqTimeout()) - } - case <-s.stop: - return - } - } -} - -func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) { - s.GoAttach(func() { - lg := s.Logger() - // Increases throughput of expired leases deletion process through parallelization - c := make(chan struct{}, maxPendingRevokes) - for _, curLease := range leases { - select { - case c <- struct{}{}: - case <-s.stopping: - return - } - - f := func(lid int64) { - s.GoAttach(func() { - ctx := s.authStore.WithRoot(s.ctx) - _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lid}) - if lerr == nil { - leaseExpired.Inc() - } else { - lg.Warn( - "failed to revoke lease", - zap.String("lease-id", fmt.Sprintf("%016x", lid)), - zap.Error(lerr), - ) - } - - <-c - }) - } - - f(int64(curLease.ID)) - } - }) -} - -// Cleanup removes allocated objects by EtcdServer.NewServer in -// situation that EtcdServer::Start was not called (that takes care of cleanup). -func (s *EtcdServer) Cleanup() { - // kv, lessor and backend can be nil if running without v3 enabled - // or running unit tests. - if s.lessor != nil { - s.lessor.Stop() - } - if s.kv != nil { - s.kv.Close() - } - if s.authStore != nil { - s.authStore.Close() - } - if s.be != nil { - s.be.Close() - } - if s.compactor != nil { - s.compactor.Stop() - } -} - -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *toApply) { - s.applySnapshot(ep, apply) - s.applyEntries(ep, apply) - - proposalsApplied.Set(float64(ep.appliedi)) - s.applyWait.Trigger(ep.appliedi) - - // wait for the raft routine to finish the disk writes before triggering a - // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than toApply routine. - <-apply.notifyc - - s.triggerSnapshot(ep) - select { - // snapshot requested via send() - case m := <-s.r.msgSnapC: - merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) - s.sendMergedSnap(merged) - default: - } -} - -func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) { - if raft.IsEmptySnap(toApply.snapshot) { - return - } - applySnapshotInProgress.Inc() - - lg := s.Logger() - lg.Info( - "applying snapshot", - zap.Uint64("current-snapshot-index", ep.snapi), - zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), - ) - defer func() { - lg.Info( - "applied snapshot", - zap.Uint64("current-snapshot-index", ep.snapi), - zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), - ) - applySnapshotInProgress.Dec() - }() - - if toApply.snapshot.Metadata.Index <= ep.appliedi { - lg.Panic( - "unexpected leader snapshot from outdated index", - zap.Uint64("current-snapshot-index", ep.snapi), - zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), - ) - } - - // wait for raftNode to persist snapshot onto the disk - <-toApply.notifyc - - newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, toApply.snapshot, s.beHooks) - if err != nil { - lg.Panic("failed to open snapshot backend", zap.Error(err)) - } - - // We need to set the backend to consistIndex before recovering the lessor, - // because lessor.Recover will commit the boltDB transaction, accordingly it - // will get the old consistent_index persisted into the db in OnPreCommitUnsafe. - // Eventually the new consistent_index value coming from snapshot is overwritten - // by the old value. - s.consistIndex.SetBackend(newbe) - verifySnapshotIndex(toApply.snapshot, s.consistIndex.ConsistentIndex()) - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - if s.lessor != nil { - lg.Info("restoring lease store") - - s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) }) - - lg.Info("restored lease store") - } - - lg.Info("restoring mvcc store") - - if err := s.kv.Restore(newbe); err != nil { - lg.Panic("failed to restore mvcc store", zap.Error(err)) - } - - newbe.SetTxPostLockInsideApplyHook(s.getTxPostLockInsideApplyHook()) - - lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex())) - - // Closing old backend might block until all the txns - // on the backend are finished. - // We do not want to wait on closing the old backend. - s.bemu.Lock() - oldbe := s.be - go func() { - lg.Info("closing old backend file") - defer func() { - lg.Info("closed old backend file") - }() - if err := oldbe.Close(); err != nil { - lg.Panic("failed to close old backend", zap.Error(err)) - } - }() - - s.be = newbe - s.bemu.Unlock() - - lg.Info("restoring alarm store") - - if err := s.restoreAlarms(); err != nil { - lg.Panic("failed to restore alarm store", zap.Error(err)) - } - - lg.Info("restored alarm store") - - if s.authStore != nil { - lg.Info("restoring auth store") - - s.authStore.Recover(schema.NewAuthBackend(lg, newbe)) - - lg.Info("restored auth store") - } - - lg.Info("restoring v2 store") - if err := s.v2store.Recovery(toApply.snapshot.Data); err != nil { - lg.Panic("failed to restore v2 store", zap.Error(err)) - } - - if err := serverstorage.AssertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { - lg.Panic("illegal v2store content", zap.Error(err)) - } - - lg.Info("restored v2 store") - - s.cluster.SetBackend(schema.NewMembershipBackend(lg, newbe)) - - lg.Info("restoring cluster configuration") - - s.cluster.Recover(api.UpdateCapability) - - lg.Info("restored cluster configuration") - lg.Info("removing old peers from network") - - // recover raft transport - s.r.transport.RemoveAllPeers() - - lg.Info("removed old peers from network") - lg.Info("adding peers from new cluster configuration") - - for _, m := range s.cluster.Members() { - if m.ID == s.MemberId() { - continue - } - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - - lg.Info("added peers from new cluster configuration") - - ep.appliedt = toApply.snapshot.Metadata.Term - ep.appliedi = toApply.snapshot.Metadata.Index - ep.snapi = ep.appliedi - ep.confState = toApply.snapshot.Metadata.ConfState - - // As backends and implementations like alarmsStore changed, we need - // to re-bootstrap Appliers. - s.uberApply = s.NewUberApplier() -} - -func (s *EtcdServer) NewUberApplier() apply.UberApplier { - return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, - s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) -} - -func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) { - verify.Verify(func() { - if cindex != snapshot.Metadata.Index { - panic(fmt.Sprintf("consistent_index(%d) isn't equal to snapshot index (%d)", cindex, snapshot.Metadata.Index)) - } - }) -} - -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *toApply) { - if len(apply.entries) == 0 { - return - } - firsti := apply.entries[0].Index - if firsti > ep.appliedi+1 { - lg := s.Logger() - lg.Panic( - "unexpected committed entry index", - zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("first-committed-entry-index", firsti), - ) - } - var ents []raftpb.Entry - if ep.appliedi+1-firsti < uint64(len(apply.entries)) { - ents = apply.entries[ep.appliedi+1-firsti:] - } - if len(ents) == 0 { - return - } - var shouldstop bool - if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { - go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) - } -} - -func (s *EtcdServer) ForceSnapshot() { - s.forceSnapshot = true -} - -func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if !s.shouldSnapshot(ep) { - return - } - lg := s.Logger() - lg.Info( - "triggering snapshot", - zap.String("local-member-id", s.MemberId().String()), - zap.Uint64("local-member-applied-index", ep.appliedi), - zap.Uint64("local-member-snapshot-index", ep.snapi), - zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), - zap.Bool("snapshot-forced", s.forceSnapshot), - ) - s.forceSnapshot = false - - s.snapshot(ep.appliedi, ep.confState) - ep.snapi = ep.appliedi -} - -func (s *EtcdServer) shouldSnapshot(ep *etcdProgress) bool { - return (s.forceSnapshot && ep.appliedi != ep.snapi) || (ep.appliedi-ep.snapi > s.Cfg.SnapshotCount) -} - -func (s *EtcdServer) hasMultipleVotingMembers() bool { - return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1 -} - -func (s *EtcdServer) isLeader() bool { - return uint64(s.MemberId()) == s.Lead() -} - -// MoveLeader transfers the leader to the given transferee. -func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { - if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { - return errors.ErrBadLeaderTransferee - } - - now := time.Now() - interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - - lg := s.Logger() - lg.Info( - "leadership transfer starting", - zap.String("local-member-id", s.MemberId().String()), - zap.String("current-leader-member-id", types.ID(lead).String()), - zap.String("transferee-member-id", types.ID(transferee).String()), - ) - - s.r.TransferLeadership(ctx, lead, transferee) - for s.Lead() != transferee { - select { - case <-ctx.Done(): // time out - return errors.ErrTimeoutLeaderTransfer - case <-time.After(interval): - } - } - - // TODO: drain all requests, or drop all messages to the old leader - lg.Info( - "leadership transfer finished", - zap.String("local-member-id", s.MemberId().String()), - zap.String("old-leader-member-id", types.ID(lead).String()), - zap.String("new-leader-member-id", types.ID(transferee).String()), - zap.Duration("took", time.Since(now)), - ) - return nil -} - -// TransferLeadership transfers the leader to the chosen transferee. -func (s *EtcdServer) TransferLeadership() error { - lg := s.Logger() - if !s.isLeader() { - lg.Info( - "skipped leadership transfer; local server is not leader", - zap.String("local-member-id", s.MemberId().String()), - zap.String("current-leader-member-id", types.ID(s.Lead()).String()), - ) - return nil - } - - if !s.hasMultipleVotingMembers() { - lg.Info( - "skipped leadership transfer for single voting member cluster", - zap.String("local-member-id", s.MemberId().String()), - zap.String("current-leader-member-id", types.ID(s.Lead()).String()), - ) - return nil - } - - transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) - if !ok { - return errors.ErrUnhealthy - } - - tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(s.ctx, tm) - err := s.MoveLeader(ctx, s.Lead(), uint64(transferee)) - cancel() - return err -} - -// HardStop stops the server without coordination with other members in the cluster. -func (s *EtcdServer) HardStop() { - select { - case s.stop <- struct{}{}: - case <-s.done: - return - } - <-s.done -} - -// Stop stops the server gracefully, and shuts down the running goroutine. -// Stop should be called after a Start(s), otherwise it will block forever. -// When stopping leader, Stop transfers its leadership to one of its peers -// before stopping the server. -// Stop terminates the Server and performs any necessary finalization. -// Do and Process cannot be called after Stop has been invoked. -func (s *EtcdServer) Stop() { - lg := s.Logger() - if err := s.TransferLeadership(); err != nil { - lg.Warn("leadership transfer failed", zap.String("local-member-id", s.MemberId().String()), zap.Error(err)) - } - s.HardStop() -} - -// ReadyNotify returns a channel that will be closed when the server -// is ready to serve client requests -func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } - -func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { - select { - case <-time.After(d): - case <-s.done: - } - select { - case s.errorc <- err: - default: - } -} - -// StopNotify returns a channel that receives an empty struct -// when the server is stopped. -func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } - -// StoppingNotify returns a channel that receives an empty struct -// when the server is being stopped. -func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping } - -func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { - if s.authStore == nil { - // In the context of ordinary etcd process, s.authStore will never be nil. - // This branch is for handling cases in server_test.go - return nil - } - - // Note that this permission check is done in the API layer, - // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> toApply membership change - // in the state machine layer - // However, both of membership change and role management requires the root privilege. - // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return s.AuthStore().IsAdminPermitted(authInfo) -} - -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - // TODO: move Member to protobuf type - b, err := json.Marshal(memb) - if err != nil { - return nil, err - } - - // by default StrictReconfigCheck is enabled; reject new members if unhealthy. - if err := s.mayAddMember(memb); err != nil { - return nil, err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: uint64(memb.ID), - Context: b, - } - - if memb.IsLearner { - cc.Type = raftpb.ConfChangeAddLearnerNode - } - - return s.configure(ctx, cc) -} - -func (s *EtcdServer) mayAddMember(memb membership.Member) error { - lg := s.Logger() - if !s.Cfg.StrictReconfigCheck { - return nil - } - - // protect quorum when adding voting member - if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() { - lg.Warn( - "rejecting member add request; not enough healthy members", - zap.String("local-member-id", s.MemberId().String()), - zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(errors.ErrNotEnoughStartedMembers), - ) - return errors.ErrNotEnoughStartedMembers - } - - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), s.cluster.VotingMembers()) { - lg.Warn( - "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.MemberId().String()), - zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(errors.ErrUnhealthy), - ) - return errors.ErrUnhealthy - } - - return nil -} - -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss - if err := s.mayRemoveMember(types.ID(id)); err != nil { - return nil, err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - return s.configure(ctx, cc) -} - -// PromoteMember promotes a learner node to a voting node. -func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call - // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error - // other than ErrNotLeader, return the error. - resp, err := s.promoteMember(ctx, id) - if err == nil { - learnerPromoteSucceed.Inc() - return resp, nil - } - if err != errors.ErrNotLeader { - learnerPromoteFailed.WithLabelValues(err.Error()).Inc() - return resp, err - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - // forward to leader - for cctx.Err() == nil { - leader, err := s.waitLeader(cctx) - if err != nil { - return nil, err - } - for _, url := range leader.PeerURLs { - resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt) - if err == nil { - return resp, nil - } - // If member promotion failed, return early. Otherwise keep retry. - if err == errors.ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { - return nil, err - } - } - } - - if cctx.Err() == context.DeadlineExceeded { - return nil, errors.ErrTimeout - } - return nil, errors.ErrCanceled -} - -// promoteMember checks whether the to-be-promoted learner node is ready before sending the promote -// request to raft. -// The function returns ErrNotLeader if the local node is not raft leader (therefore does not have -// enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the -// local node is leader (therefore has enough information) but decided the learner node is not ready -// to be promoted. -func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - - // check if we can promote this learner. - if err := s.mayPromoteMember(types.ID(id)); err != nil { - return nil, err - } - - // build the context for the promote confChange. mark IsLearner to false and IsPromote to true. - promoteChangeContext := membership.ConfigChangeContext{ - Member: membership.Member{ - ID: types.ID(id), - }, - IsPromote: true, - } - - b, err := json.Marshal(promoteChangeContext) - if err != nil { - return nil, err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: id, - Context: b, - } - - return s.configure(ctx, cc) -} - -func (s *EtcdServer) mayPromoteMember(id types.ID) error { - lg := s.Logger() - err := s.isLearnerReady(uint64(id)) - if err != nil { - return err - } - - if !s.Cfg.StrictReconfigCheck { - return nil - } - if !s.cluster.IsReadyToPromoteMember(uint64(id)) { - lg.Warn( - "rejecting member promote request; not enough healthy members", - zap.String("local-member-id", s.MemberId().String()), - zap.String("requested-member-remove-id", id.String()), - zap.Error(errors.ErrNotEnoughStartedMembers), - ) - return errors.ErrNotEnoughStartedMembers - } - - return nil -} - -// check whether the learner catches up with leader or not. -// Note: it will return nil if member is not found in cluster or if member is not learner. -// These two conditions will be checked before toApply phase later. -func (s *EtcdServer) isLearnerReady(id uint64) error { - if err := s.waitAppliedIndex(); err != nil { - return err - } - - rs := s.raftStatus() - - // leader's raftStatus.Progress is not nil - if rs.Progress == nil { - return errors.ErrNotLeader - } - - var learnerMatch uint64 - isFound := false - leaderID := rs.ID - for memberID, progress := range rs.Progress { - if id == memberID { - // check its status - learnerMatch = progress.Match - isFound = true - break - } - } - - // We should return an error in API directly, to avoid the request - // being unnecessarily delivered to raft. - if !isFound { - return membership.ErrIDNotFound - } - - leaderMatch := rs.Progress[leaderID].Match - // the learner's Match not caught up with leader yet - if float64(learnerMatch) < float64(leaderMatch)*readyPercent { - return errors.ErrLearnerNotReady - } - - return nil -} - -func (s *EtcdServer) mayRemoveMember(id types.ID) error { - if !s.Cfg.StrictReconfigCheck { - return nil - } - - lg := s.Logger() - isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner - // no need to check quorum when removing non-voting member - if isLearner { - return nil - } - - if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) { - lg.Warn( - "rejecting member remove request; not enough healthy members", - zap.String("local-member-id", s.MemberId().String()), - zap.String("requested-member-remove-id", id.String()), - zap.Error(errors.ErrNotEnoughStartedMembers), - ) - return errors.ErrNotEnoughStartedMembers - } - - // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.MemberId() && t.IsZero() { - return nil - } - - // protect quorum if some members are down - m := s.cluster.VotingMembers() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), m) - if (active - 1) < 1+((len(m)-1)/2) { - lg.Warn( - "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.MemberId().String()), - zap.String("requested-member-remove", id.String()), - zap.Int("active-peers", active), - zap.Error(errors.ErrUnhealthy), - ) - return errors.ErrUnhealthy - } - - return nil -} - -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - b, merr := json.Marshal(memb) - if merr != nil { - return nil, merr - } - - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return nil, err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) -} - -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setTerm(v uint64) { - atomic.StoreUint64(&s.term, v) -} - -func (s *EtcdServer) getTerm() uint64 { - return atomic.LoadUint64(&s.term) -} - -func (s *EtcdServer) setLead(v uint64) { - atomic.StoreUint64(&s.lead, v) -} - -func (s *EtcdServer) getLead() uint64 { - return atomic.LoadUint64(&s.lead) -} - -func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { - return s.leaderChanged.Receive() -} - -// FirstCommitInTermNotify returns channel that will be unlocked on first -// entry committed in new term, which is necessary for new leader to answer -// read-only requests (leader is not able to respond any read-only requests -// as long as linearizable semantic is required) -func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { - return s.firstCommitInTerm.Receive() -} - -func (s *EtcdServer) MemberId() types.ID { return s.memberId } - -func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } - -func (s *EtcdServer) Lead() uint64 { return s.getLead() } - -func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() } - -func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() } - -func (s *EtcdServer) Term() uint64 { return s.getTerm() } - -type confChangeResponse struct { - membs []*membership.Member - err error -} - -// configure sends a configuration change through consensus and -// then waits for it to be applied to the server. It -// will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { - lg := s.Logger() - cc.ID = s.reqIDGen.Next() - ch := s.w.Register(cc.ID) - - start := time.Now() - if err := s.r.ProposeConfChange(ctx, cc); err != nil { - s.w.Trigger(cc.ID, nil) - return nil, err - } - - select { - case x := <-ch: - if x == nil { - lg.Panic("failed to configure") - } - resp := x.(*confChangeResponse) - lg.Info( - "applied a configuration change through raft", - zap.String("local-member-id", s.MemberId().String()), - zap.String("raft-conf-change", cc.Type.String()), - zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), - ) - return resp.membs, resp.err - - case <-ctx.Done(): - s.w.Trigger(cc.ID, nil) // GC wait - return nil, s.parseProposeCtxErr(ctx.Err(), start) - - case <-s.stopping: - return nil, errors.ErrStopped - } -} - -// sync proposes a SYNC request and is non-blocking. -// This makes no guarantee that the request will be proposed or performed. -// The request will be canceled after the given timeout. -func (s *EtcdServer) sync(timeout time.Duration) { - req := pb.Request{ - Method: "SYNC", - ID: s.reqIDGen.Next(), - Time: time.Now().UnixNano(), - } - data := pbutil.MustMarshal(&req) - // There is no promise that node has leader when do SYNC request, - // so it uses goroutine to propose. - ctx, cancel := context.WithTimeout(s.ctx, timeout) - s.GoAttach(func() { - s.r.Propose(ctx, data) - cancel() - }) -} - -// publishV3 registers server information into the cluster using v3 request. The -// information is the JSON representation of this server's member struct, updated -// with the static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -func (s *EtcdServer) publishV3(timeout time.Duration) { - req := &membershippb.ClusterMemberAttrSetRequest{ - Member_ID: uint64(s.MemberId()), - MemberAttributes: &membershippb.Attributes{ - Name: s.attributes.Name, - ClientUrls: s.attributes.ClientURLs, - }, - } - lg := s.Logger() - for { - select { - case <-s.stopping: - lg.Warn( - "stopped publish because server is stopping", - zap.String("local-member-id", s.MemberId().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.Duration("publish-timeout", timeout), - ) - return - - default: - } - - ctx, cancel := context.WithTimeout(s.ctx, timeout) - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterMemberAttrSet: req}) - cancel() - switch err { - case nil: - close(s.readych) - lg.Info( - "published local member to cluster through raft", - zap.String("local-member-id", s.MemberId().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("cluster-id", s.cluster.ID().String()), - zap.Duration("publish-timeout", timeout), - ) - return - - default: - lg.Warn( - "failed to publish local member to cluster through raft", - zap.String("local-member-id", s.MemberId().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.Duration("publish-timeout", timeout), - zap.Error(err), - ) - } - } -} - -func (s *EtcdServer) sendMergedSnap(merged snap.Message) { - atomic.AddInt64(&s.inflightSnapshots, 1) - - lg := s.Logger() - fields := []zap.Field{ - zap.String("from", s.MemberId().String()), - zap.String("to", types.ID(merged.To).String()), - zap.Int64("bytes", merged.TotalSize), - zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), - } - - now := time.Now() - s.r.transport.SendSnapshot(merged) - lg.Info("sending merged snapshot", fields...) - - s.GoAttach(func() { - select { - case ok := <-merged.CloseNotify(): - // delay releasing inflight snapshot for another 30 seconds to - // block log compaction. - // If the follower still fails to catch up, it is probably just too slow - // to catch up. We cannot avoid the snapshot cycle anyway. - if ok { - select { - case <-time.After(releaseDelayAfterSnapshot): - case <-s.stopping: - } - } - - atomic.AddInt64(&s.inflightSnapshots, -1) - - lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...) - - case <-s.stopping: - lg.Warn("canceled sending merged snapshot; server stopping", fields...) - return - } - }) -} - -// toApply takes entries received from Raft (after it has been committed) and -// applies them to the current state of the EtcdServer. -// The given entries should not be empty. -func (s *EtcdServer) apply( - es []raftpb.Entry, - confState *raftpb.ConfState, -) (appliedt uint64, appliedi uint64, shouldStop bool) { - s.lg.Debug("Applying entries", zap.Int("num-entries", len(es))) - for i := range es { - e := es[i] - s.lg.Debug("Applying entry", - zap.Uint64("index", e.Index), - zap.Uint64("term", e.Term), - zap.Stringer("type", e.Type)) - switch e.Type { - case raftpb.EntryNormal: - s.applyEntryNormal(&e) - s.setAppliedIndex(e.Index) - s.setTerm(e.Term) - - case raftpb.EntryConfChange: - // We need to toApply all WAL entries on top of v2store - // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend. - shouldApplyV3 := membership.ApplyV2storeOnly - - // set the consistent index of current executing entry - if e.Index > s.consistIndex.ConsistentIndex() { - s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term) - shouldApplyV3 = membership.ApplyBoth - } - - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3) - s.setAppliedIndex(e.Index) - s.setTerm(e.Term) - shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) - - default: - lg := s.Logger() - lg.Panic( - "unknown entry type; must be either EntryNormal or EntryConfChange", - zap.String("type", e.Type.String()), - ) - } - appliedi, appliedt = e.Index, e.Term - } - return appliedt, appliedi, shouldStop -} - -// applyEntryNormal applies an EntryNormal type raftpb request to the EtcdServer -func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { - shouldApplyV3 := membership.ApplyV2storeOnly - var ar *apply.Result - index := s.consistIndex.ConsistentIndex() - if e.Index > index { - // set the consistent index of current executing entry - s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term) - shouldApplyV3 = membership.ApplyBoth - defer func() { - // The txPostLockInsideApplyHook will not get called in some cases, - // in which we should move the consistent index forward directly. - newIndex := s.consistIndex.ConsistentIndex() - if newIndex < e.Index { - s.consistIndex.SetConsistentIndex(e.Index, e.Term) - } - }() - } - s.lg.Debug("toApply entry normal", - zap.Uint64("consistent-index", index), - zap.Uint64("entry-index", e.Index), - zap.Bool("should-applyV3", bool(shouldApplyV3))) - - // raft state machine may generate noop entry when leader confirmation. - // skip it in advance to avoid some potential bug in the future - if len(e.Data) == 0 { - s.firstCommitInTerm.Notify() - - // promote lessor when the local member is leader and finished - // applying all entries from the last term. - if s.isLeader() { - s.lessor.Promote(s.Cfg.ElectionTimeout()) - } - return - } - - var raftReq pb.InternalRaftRequest - if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible - var r pb.Request - rp := &r - pbutil.MustUnmarshal(rp, e.Data) - s.lg.Debug("applyEntryNormal", zap.Stringer("V2request", rp)) - s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp), shouldApplyV3)) - return - } - s.lg.Debug("applyEntryNormal", zap.Stringer("raftReq", &raftReq)) - - if raftReq.V2 != nil { - req := (*RequestV2)(raftReq.V2) - s.w.Trigger(req.ID, s.applyV2Request(req, shouldApplyV3)) - return - } - - id := raftReq.ID - if id == 0 { - if raftReq.Header == nil { - s.lg.Panic("applyEntryNormal, could not find a header") - } - id = raftReq.Header.ID - } - - needResult := s.w.IsRegistered(id) - if needResult || !noSideEffect(&raftReq) { - if !needResult && raftReq.Txn != nil { - removeNeedlessRangeReqs(raftReq.Txn) - } - ar = s.uberApply.Apply(&raftReq, shouldApplyV3) - } - - // do not re-toApply applied entries. - if !shouldApplyV3 { - return - } - - if ar == nil { - return - } - - if ar.Err != errors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { - s.w.Trigger(id, ar) - return - } - - lg := s.Logger() - lg.Warn( - "message exceeded backend quota; raising alarm", - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), - zap.Error(ar.Err), - ) - - s.GoAttach(func() { - a := &pb.AlarmRequest{ - MemberID: uint64(s.MemberId()), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) - s.w.Trigger(id, ar) - }) -} - -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - -// applyConfChange applies a ConfChange to the server. It is only -// invoked with a ConfChange that has already passed through Raft -func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) { - if err := s.cluster.ValidateConfigurationChange(cc); err != nil { - cc.NodeID = raft.None - s.r.ApplyConfChange(cc) - - // The txPostLock callback will not get called in this case, - // so we should set the consistent index directly. - if s.consistIndex != nil && membership.ApplyBoth == shouldApplyV3 { - applyingIndex, applyingTerm := s.consistIndex.ConsistentApplyingIndex() - s.consistIndex.SetConsistentIndex(applyingIndex, applyingTerm) - } - return false, err - } - - lg := s.Logger() - *confState = *s.r.ApplyConfChange(cc) - s.beHooks.SetConfState(confState) - switch cc.Type { - case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: - confChangeContext := new(membership.ConfigChangeContext) - if err := json.Unmarshal(cc.Context, confChangeContext); err != nil { - lg.Panic("failed to unmarshal member", zap.Error(err)) - } - if cc.NodeID != uint64(confChangeContext.Member.ID) { - lg.Panic( - "got different member ID", - zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), - zap.String("member-id-from-message", confChangeContext.Member.ID.String()), - ) - } - if confChangeContext.IsPromote { - s.cluster.PromoteMember(confChangeContext.Member.ID, shouldApplyV3) - } else { - s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3) - - if confChangeContext.Member.ID != s.MemberId() { - s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs) - } - } - - // update the isLearner metric when this server id is equal to the id in raft member confChange - if confChangeContext.Member.ID == s.MemberId() { - if cc.Type == raftpb.ConfChangeAddLearnerNode { - isLearner.Set(1) - } else { - isLearner.Set(0) - } - } - - case raftpb.ConfChangeRemoveNode: - id := types.ID(cc.NodeID) - s.cluster.RemoveMember(id, shouldApplyV3) - if id == s.MemberId() { - return true, nil - } - s.r.transport.RemovePeer(id) - - case raftpb.ConfChangeUpdateNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - lg.Panic("failed to unmarshal member", zap.Error(err)) - } - if cc.NodeID != uint64(m.ID) { - lg.Panic( - "got different member ID", - zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), - zap.String("member-id-from-message", m.ID.String()), - ) - } - s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3) - if m.ID != s.MemberId() { - s.r.transport.UpdatePeer(m.ID, m.PeerURLs) - } - } - return false, nil -} - -// TODO: non-blocking snapshot -func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.v2store.Clone() - // commit kv to write metadata (for example: consistent index) to disk. - // - // This guarantees that Backend's consistent_index is >= index of last snapshot. - // - // KV().commit() updates the consistent index in backend. - // All operations that update consistent index must be called sequentially - // from applyAll function. - // So KV().Commit() cannot run in parallel with toApply. It has to be called outside - // the go routine created below. - s.KV().Commit() - - s.GoAttach(func() { - lg := s.Logger() - - d, err := clone.SaveNoCopy() - // TODO: current store will never fail to do a snapshot - // what should we do if the store might fail? - if err != nil { - lg.Panic("failed to save v2 store", zap.Error(err)) - } - snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) - if err != nil { - // the snapshot was done asynchronously with the progress of raft. - // raft might have already got a newer snapshot. - if err == raft.ErrSnapOutOfDate { - return - } - lg.Panic("failed to create snapshot", zap.Error(err)) - } - // SaveSnap saves the snapshot to file and appends the corresponding WAL entry. - if err = s.r.storage.SaveSnap(snap); err != nil { - lg.Panic("failed to save snapshot", zap.Error(err)) - } - if err = s.r.storage.Release(snap); err != nil { - lg.Panic("failed to release wal", zap.Error(err)) - } - - lg.Info( - "saved snapshot", - zap.Uint64("snapshot-index", snap.Metadata.Index), - ) - - // When sending a snapshot, etcd will pause compaction. - // After receives a snapshot, the slow follower needs to get all the entries right after - // the snapshot sent to catch up. If we do not pause compaction, the log entries right after - // the snapshot sent might already be compacted. It happens when the snapshot takes long time - // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. - if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - lg.Info("skip compaction since there is an inflight snapshot") - return - } - - // keep some in memory log entries for slow followers. - compacti := uint64(1) - if snapi > s.Cfg.SnapshotCatchUpEntries { - compacti = snapi - s.Cfg.SnapshotCatchUpEntries - } - - err = s.r.raftStorage.Compact(compacti) - if err != nil { - // the compaction was done asynchronously with the progress of raft. - // raft log might already been compact. - if err == raft.ErrCompacted { - return - } - lg.Panic("failed to compact", zap.Error(err)) - } - lg.Info( - "compacted Raft logs", - zap.Uint64("compact-index", compacti), - ) - }) -} - -// CutPeer drops messages to the specified peer. -func (s *EtcdServer) CutPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.CutPeer(id) - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (s *EtcdServer) MendPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.MendPeer(id) - } -} - -func (s *EtcdServer) PauseSending() { s.r.pauseSending() } - -func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } - -func (s *EtcdServer) ClusterVersion() *semver.Version { - if s.cluster == nil { - return nil - } - return s.cluster.Version() -} - -func (s *EtcdServer) StorageVersion() *semver.Version { - // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. - s.bemu.RLock() - defer s.bemu.RUnlock() - - v, err := schema.DetectSchemaVersion(s.lg, s.be.ReadTx()) - if err != nil { - s.lg.Warn("Failed to detect schema version", zap.Error(err)) - return nil - } - return &v -} - -// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed. -func (s *EtcdServer) monitorClusterVersions() { - monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) - for { - select { - case <-s.firstCommitInTerm.Receive(): - case <-time.After(monitorVersionInterval): - case <-s.stopping: - return - } - - if s.Leader() != s.MemberId() { - continue - } - err := monitor.UpdateClusterVersionIfNeeded() - if err != nil { - s.lg.Error("Failed to monitor cluster version", zap.Error(err)) - } - } -} - -// monitorStorageVersion every monitorVersionInterval updates storage version if needed. -func (s *EtcdServer) monitorStorageVersion() { - monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) - for { - select { - case <-time.After(monitorVersionInterval): - case <-s.clusterVersionChanged.Receive(): - case <-s.stopping: - return - } - monitor.UpdateStorageVersionIfNeeded() - } -} - -func (s *EtcdServer) monitorKVHash() { - t := s.Cfg.CorruptCheckTime - if t == 0 { - return - } - checkTicker := time.NewTicker(t) - defer checkTicker.Stop() - - lg := s.Logger() - lg.Info( - "enabled corruption checking", - zap.String("local-member-id", s.MemberId().String()), - zap.Duration("interval", t), - ) - for { - select { - case <-s.stopping: - return - case <-checkTicker.C: - } - if !s.isLeader() { - continue - } - if err := s.corruptionChecker.PeriodicCheck(); err != nil { - lg.Warn("failed to check hash KV", zap.Error(err)) - } - } -} - -func (s *EtcdServer) monitorCompactHash() { - if !s.Cfg.CompactHashCheckEnabled { - return - } - t := s.Cfg.CompactHashCheckTime - for { - select { - case <-time.After(t): - case <-s.stopping: - return - } - if !s.isLeader() { - continue - } - s.corruptionChecker.CompactHashCheck() - } -} - -func (s *EtcdServer) updateClusterVersionV2(ver string) { - lg := s.Logger() - - if s.cluster.Version() == nil { - lg.Info( - "setting up initial cluster version using v2 API", - zap.String("cluster-version", version.Cluster(ver)), - ) - } else { - lg.Info( - "updating cluster version using v2 API", - zap.String("from", version.Cluster(s.cluster.Version().String())), - zap.String("to", version.Cluster(ver)), - ) - } - - req := pb.Request{ - Method: "PUT", - Path: membership.StoreClusterVersionKey(), - Val: ver, - } - - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) - _, err := s.Do(ctx, req) - cancel() - - switch err { - case nil: - lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) - return - - case errors.ErrStopped: - lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) - return - - default: - lg.Warn("failed to update cluster version", zap.Error(err)) - } -} - -func (s *EtcdServer) updateClusterVersionV3(ver string) { - lg := s.Logger() - - if s.cluster.Version() == nil { - lg.Info( - "setting up initial cluster version using v3 API", - zap.String("cluster-version", version.Cluster(ver)), - ) - } else { - lg.Info( - "updating cluster version using v3 API", - zap.String("from", version.Cluster(s.cluster.Version().String())), - zap.String("to", version.Cluster(ver)), - ) - } - - req := membershippb.ClusterVersionSetRequest{Ver: ver} - - ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterVersionSet: &req}) - cancel() - - switch err { - case nil: - lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) - return - - case errors.ErrStopped: - lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) - return - - default: - lg.Warn("failed to update cluster version", zap.Error(err)) - } -} - -// monitorDowngrade every DowngradeCheckTime checks if it's the leader and cancels downgrade if needed. -func (s *EtcdServer) monitorDowngrade() { - monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s)) - t := s.Cfg.DowngradeCheckTime - if t == 0 { - return - } - for { - select { - case <-time.After(t): - case <-s.stopping: - return - } - - if !s.isLeader() { - continue - } - monitor.CancelDowngradeIfNeeded() - } -} - -func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { - switch err { - case context.Canceled: - return errors.ErrCanceled - - case context.DeadlineExceeded: - s.leadTimeMu.RLock() - curLeadElected := s.leadElectedTime - s.leadTimeMu.RUnlock() - prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) - if start.After(prevLeadLost) && start.Before(curLeadElected) { - return errors.ErrTimeoutDueToLeaderFail - } - lead := types.ID(s.getLead()) - switch lead { - case types.ID(raft.None): - // TODO: return error to specify it happens because the cluster does not have leader now - case s.MemberId(): - if !isConnectedToQuorumSince(s.r.transport, start, s.MemberId(), s.cluster.Members()) { - return errors.ErrTimeoutDueToConnectionLost - } - default: - if !isConnectedSince(s.r.transport, start, lead) { - return errors.ErrTimeoutDueToConnectionLost - } - } - return errors.ErrTimeout - - default: - return err - } -} - -func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv } -func (s *EtcdServer) Backend() backend.Backend { - s.bemu.RLock() - defer s.bemu.RUnlock() - return s.be -} - -func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } - -func (s *EtcdServer) restoreAlarms() error { - as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be)) - if err != nil { - return err - } - s.alarmStore = as - return nil -} - -// GoAttach creates a goroutine on a given function and tracks it using -// the etcdserver waitgroup. -// The passed function should interrupt on s.StoppingNotify(). -func (s *EtcdServer) GoAttach(f func()) { - s.wgMu.RLock() // this blocks with ongoing close(s.stopping) - defer s.wgMu.RUnlock() - select { - case <-s.stopping: - lg := s.Logger() - lg.Warn("server has stopped; skipping GoAttach") - return - default: - } - - // now safe to add since waitgroup wait has not started yet - s.wg.Add(1) - go func() { - defer s.wg.Done() - f() - }() -} - -func (s *EtcdServer) Alarms() []*pb.AlarmMember { - return s.alarmStore.Get(pb.AlarmType_NONE) -} - -// IsLearner returns if the local member is raft learner -func (s *EtcdServer) IsLearner() bool { - return s.cluster.IsLocalMemberLearner() -} - -// IsMemberExist returns if the member with the given id exists in cluster. -func (s *EtcdServer) IsMemberExist(id types.ID) bool { - return s.cluster.IsMemberExist(id) -} - -// raftStatus returns the raft status of this etcd node. -func (s *EtcdServer) raftStatus() raft.Status { - return s.r.Node.Status() -} - -func (s *EtcdServer) Version() *serverversion.Manager { - return serverversion.NewManager(s.Logger(), NewServerVersionAdapter(s)) -} - -func (s *EtcdServer) getTxPostLockInsideApplyHook() func() { - return func() { - applyingIdx, applyingTerm := s.consistIndex.ConsistentApplyingIndex() - if applyingIdx > s.consistIndex.UnsafeConsistentIndex() { - s.consistIndex.SetConsistentIndex(applyingIdx, applyingTerm) - } - } -} - -func (s *EtcdServer) CorruptionChecker() CorruptionChecker { - return s.corruptionChecker -} diff --git a/server/etcdserver/server_access_control.go b/server/etcdserver/server_access_control.go deleted file mode 100644 index 09e2255ccca..00000000000 --- a/server/etcdserver/server_access_control.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import "sync" - -// AccessController controls etcd server HTTP request access. -type AccessController struct { - corsMu sync.RWMutex - CORS map[string]struct{} - hostWhitelistMu sync.RWMutex - HostWhitelist map[string]struct{} -} - -// NewAccessController returns a new "AccessController" with default "*" values. -func NewAccessController() *AccessController { - return &AccessController{ - CORS: map[string]struct{}{"*": {}}, - HostWhitelist: map[string]struct{}{"*": {}}, - } -} - -// OriginAllowed determines whether the server will allow a given CORS origin. -// If CORS is empty, allow all. -func (ac *AccessController) OriginAllowed(origin string) bool { - ac.corsMu.RLock() - defer ac.corsMu.RUnlock() - if len(ac.CORS) == 0 { // allow all - return true - } - _, ok := ac.CORS["*"] - if ok { - return true - } - _, ok = ac.CORS[origin] - return ok -} - -// IsHostWhitelisted returns true if the host is whitelisted. -// If whitelist is empty, allow all. -func (ac *AccessController) IsHostWhitelisted(host string) bool { - ac.hostWhitelistMu.RLock() - defer ac.hostWhitelistMu.RUnlock() - if len(ac.HostWhitelist) == 0 { // allow all - return true - } - _, ok := ac.HostWhitelist["*"] - if ok { - return true - } - _, ok = ac.HostWhitelist[host] - return ok -} diff --git a/server/etcdserver/server_access_control_test.go b/server/etcdserver/server_access_control_test.go deleted file mode 100644 index 4f64c78e228..00000000000 --- a/server/etcdserver/server_access_control_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOriginAllowed(t *testing.T) { - tests := []struct { - accessController *AccessController - origin string - allowed bool - }{ - { - &AccessController{ - CORS: map[string]struct{}{}, - }, - "https://example.com", - true, - }, - { - &AccessController{ - CORS: map[string]struct{}{"*": {}}, - }, - "https://example.com", - true, - }, - { - &AccessController{ - CORS: map[string]struct{}{"https://example.com": {}, "http://example.org": {}}, - }, - "https://example.com", - true, - }, - { - &AccessController{ - CORS: map[string]struct{}{"http://example.org": {}}, - }, - "https://example.com", - false, - }, - { - &AccessController{ - CORS: map[string]struct{}{"*": {}, "http://example.org/": {}}, - }, - "https://example.com", - true, - }, - } - - for _, tt := range tests { - allowed := tt.accessController.OriginAllowed(tt.origin) - assert.Equal(t, allowed, tt.allowed) - } -} - -func TestIsHostWhitelisted(t *testing.T) { - tests := []struct { - accessController *AccessController - host string - whitelisted bool - }{ - { - &AccessController{ - HostWhitelist: map[string]struct{}{}, - }, - "example.com", - true, - }, - { - &AccessController{ - HostWhitelist: map[string]struct{}{"*": {}}, - }, - "example.com", - true, - }, - { - &AccessController{ - HostWhitelist: map[string]struct{}{"example.com": {}, "example.org": {}}, - }, - "example.com", - true, - }, - { - &AccessController{ - HostWhitelist: map[string]struct{}{"example.org": {}}, - }, - "example.com", - false, - }, - { - &AccessController{ - HostWhitelist: map[string]struct{}{"*": {}, "example.org/": {}}, - }, - "example.com", - true, - }, - } - - for _, tt := range tests { - whitelisted := tt.accessController.IsHostWhitelisted(tt.host) - assert.Equal(t, whitelisted, tt.whitelisted) - } -} diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go deleted file mode 100644 index 2bf113505f4..00000000000 --- a/server/etcdserver/server_test.go +++ /dev/null @@ -1,1978 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - "encoding/json" - "fmt" - "math" - "net/http" - "os" - "path" - "path/filepath" - "reflect" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.etcd.io/etcd/pkg/v3/idutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/pkg/v3/wait" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mock/mockstorage" - "go.etcd.io/etcd/server/v3/mock/mockstore" - "go.etcd.io/etcd/server/v3/mock/mockwait" - serverstorage "go.etcd.io/etcd/server/v3/storage" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -// TestDoLocalAction tests requests which do not need to go through raft to be applied, -// and are served through local data. -func TestDoLocalAction(t *testing.T) { - tests := []struct { - req pb.Request - - wresp Response - werr error - wactions []testutil.Action - }{ - { - pb.Request{Method: "GET", ID: 1, Wait: true}, - Response{Watcher: v2store.NewNopWatcher()}, nil, []testutil.Action{{Name: "Watch"}}, - }, - { - pb.Request{Method: "GET", ID: 1}, - Response{Event: &v2store.Event{}}, nil, - []testutil.Action{ - { - Name: "Get", - Params: []interface{}{"", false, false}, - }, - }, - }, - { - pb.Request{Method: "HEAD", ID: 1}, - Response{Event: &v2store.Event{}}, nil, - []testutil.Action{ - { - Name: "Get", - Params: []interface{}{"", false, false}, - }, - }, - }, - { - pb.Request{Method: "BADMETHOD", ID: 1}, - Response{}, errors.ErrUnknownMethod, []testutil.Action{}, - }, - } - for i, tt := range tests { - st := mockstore.NewRecorder() - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - v2store: st, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - resp, err := srv.Do(context.Background(), tt.req) - - if err != tt.werr { - t.Fatalf("#%d: err = %+v, want %+v", i, err, tt.werr) - } - if !reflect.DeepEqual(resp, tt.wresp) { - t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp) - } - gaction := st.Action() - if !reflect.DeepEqual(gaction, tt.wactions) { - t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions) - } - } -} - -// TestDoBadLocalAction tests server requests which do not need to go through consensus, -// and return errors when they fetch from local data. -func TestDoBadLocalAction(t *testing.T) { - storeErr := fmt.Errorf("bah") - tests := []struct { - req pb.Request - - wactions []testutil.Action - }{ - { - pb.Request{Method: "GET", ID: 1, Wait: true}, - []testutil.Action{{Name: "Watch"}}, - }, - { - pb.Request{Method: "GET", ID: 1}, - []testutil.Action{ - { - Name: "Get", - Params: []interface{}{"", false, false}, - }, - }, - }, - { - pb.Request{Method: "HEAD", ID: 1}, - []testutil.Action{ - { - Name: "Get", - Params: []interface{}{"", false, false}, - }, - }, - }, - } - for i, tt := range tests { - st := mockstore.NewErrRecorder(storeErr) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - v2store: st, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - resp, err := srv.Do(context.Background(), tt.req) - - if err != storeErr { - t.Fatalf("#%d: err = %+v, want %+v", i, err, storeErr) - } - if !reflect.DeepEqual(resp, Response{}) { - t.Errorf("#%d: resp = %+v, want %+v", i, resp, Response{}) - } - gaction := st.Action() - if !reflect.DeepEqual(gaction, tt.wactions) { - t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions) - } - } -} - -// TestApplyRepeat tests that server handles repeat raft messages gracefully -func TestApplyRepeat(t *testing.T) { - n := newNodeConfChangeCommitterStream() - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{RaftState: raft.StateLeader}, - } - cl := newTestCluster(t, nil) - st := v2store.New() - cl.SetStore(v2store.New()) - cl.AddMember(&membership.Member{ID: 1234}, true) - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: newNopTransporter(), - }) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *r, - v2store: st, - cluster: cl, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewFakeConsistentIndex(0), - } - s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster} - s.start() - req := &pb.Request{Method: "QGET", ID: uint64(1)} - ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}} - n.readyc <- raft.Ready{CommittedEntries: ents} - // dup msg - n.readyc <- raft.Ready{CommittedEntries: ents} - - // use a conf change to block until dup msgs are all processed - cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} - ents = []raftpb.Entry{{ - Index: 2, - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - }} - n.readyc <- raft.Ready{CommittedEntries: ents} - // wait for conf change message - act, err := n.Wait(1) - // wait for stop message (async to avoid deadlock) - stopc := make(chan error, 1) - go func() { - _, werr := n.Wait(1) - stopc <- werr - }() - s.Stop() - - // only want to confirm etcdserver won't panic; no data to check - - if err != nil { - t.Fatal(err) - } - if len(act) == 0 { - t.Fatalf("expected len(act)=0, got %d", len(act)) - } - - if err = <-stopc; err != nil { - t.Fatalf("error on stop (%v)", err) - } -} - -func TestApplyRequest(t *testing.T) { - tests := []struct { - req pb.Request - - wresp Response - wactions []testutil.Action - }{ - // POST ==> Create - { - pb.Request{Method: "POST", ID: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Create", - Params: []interface{}{"", false, "", true, v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // POST ==> Create, with expiration - { - pb.Request{Method: "POST", ID: 1, Expiration: 1337}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Create", - Params: []interface{}{"", false, "", true, v2store.TTLOptionSet{ExpireTime: time.Unix(0, 1337)}}, - }, - }, - }, - // POST ==> Create, with dir - { - pb.Request{Method: "POST", ID: 1, Dir: true}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Create", - Params: []interface{}{"", true, "", true, v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT ==> Set - { - pb.Request{Method: "PUT", ID: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Set", - Params: []interface{}{"", false, "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT ==> Set, with dir - { - pb.Request{Method: "PUT", ID: 1, Dir: true}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Set", - Params: []interface{}{"", true, "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevExist=true ==> Update - { - pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(true)}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Update", - Params: []interface{}{"", "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevExist=false ==> Create - { - pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(false)}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Create", - Params: []interface{}{"", false, "", false, v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevExist=true *and* PrevIndex set ==> CompareAndSwap - { - pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(true), PrevIndex: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndSwap", - Params: []interface{}{"", "", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevExist=false *and* PrevIndex set ==> Create - { - pb.Request{Method: "PUT", ID: 1, PrevExist: pbutil.Boolp(false), PrevIndex: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Create", - Params: []interface{}{"", false, "", false, v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevIndex set ==> CompareAndSwap - { - pb.Request{Method: "PUT", ID: 1, PrevIndex: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndSwap", - Params: []interface{}{"", "", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevValue set ==> CompareAndSwap - { - pb.Request{Method: "PUT", ID: 1, PrevValue: "bar"}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndSwap", - Params: []interface{}{"", "bar", uint64(0), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // PUT with PrevIndex and PrevValue set ==> CompareAndSwap - { - pb.Request{Method: "PUT", ID: 1, PrevIndex: 1, PrevValue: "bar"}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndSwap", - Params: []interface{}{"", "bar", uint64(1), "", v2store.TTLOptionSet{ExpireTime: time.Time{}}}, - }, - }, - }, - // DELETE ==> Delete - { - pb.Request{Method: "DELETE", ID: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Delete", - Params: []interface{}{"", false, false}, - }, - }, - }, - // DELETE with PrevIndex set ==> CompareAndDelete - { - pb.Request{Method: "DELETE", ID: 1, PrevIndex: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndDelete", - Params: []interface{}{"", "", uint64(1)}, - }, - }, - }, - // DELETE with PrevValue set ==> CompareAndDelete - { - pb.Request{Method: "DELETE", ID: 1, PrevValue: "bar"}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndDelete", - Params: []interface{}{"", "bar", uint64(0)}, - }, - }, - }, - // DELETE with PrevIndex *and* PrevValue set ==> CompareAndDelete - { - pb.Request{Method: "DELETE", ID: 1, PrevIndex: 5, PrevValue: "bar"}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "CompareAndDelete", - Params: []interface{}{"", "bar", uint64(5)}, - }, - }, - }, - // QGET ==> Get - { - pb.Request{Method: "QGET", ID: 1}, - Response{Event: &v2store.Event{}}, - []testutil.Action{ - { - Name: "Get", - Params: []interface{}{"", false, false}, - }, - }, - }, - // SYNC ==> DeleteExpiredKeys - { - pb.Request{Method: "SYNC", ID: 1}, - Response{}, - []testutil.Action{ - { - Name: "DeleteExpiredKeys", - Params: []interface{}{time.Unix(0, 0)}, - }, - }, - }, - { - pb.Request{Method: "SYNC", ID: 1, Time: 12345}, - Response{}, - []testutil.Action{ - { - Name: "DeleteExpiredKeys", - Params: []interface{}{time.Unix(0, 12345)}, - }, - }, - }, - // Unknown method - error - { - pb.Request{Method: "BADMETHOD", ID: 1}, - Response{Err: errors.ErrUnknownMethod}, - []testutil.Action{}, - }, - } - - for i, tt := range tests { - st := mockstore.NewRecorder() - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - v2store: st, - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - resp := srv.applyV2Request((*RequestV2)(&tt.req), membership.ApplyBoth) - - if !reflect.DeepEqual(resp, tt.wresp) { - t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp) - } - gaction := st.Action() - if !reflect.DeepEqual(gaction, tt.wactions) { - t.Errorf("#%d: action = %#v, want %#v", i, gaction, tt.wactions) - } - } -} - -func TestApplyRequestOnAdminMemberAttributes(t *testing.T) { - cl := newTestCluster(t, []*membership.Member{{ID: 1}}) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - v2store: mockstore.NewRecorder(), - cluster: cl, - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - req := pb.Request{ - Method: "PUT", - ID: 1, - Path: membership.MemberAttributesStorePath(1), - Val: `{"Name":"abc","ClientURLs":["http://127.0.0.1:2379"]}`, - } - srv.applyV2Request((*RequestV2)(&req), membership.ApplyBoth) - w := membership.Attributes{Name: "abc", ClientURLs: []string{"http://127.0.0.1:2379"}} - if g := cl.Member(1).Attributes; !reflect.DeepEqual(g, w) { - t.Errorf("attributes = %v, want %v", g, w) - } -} - -func TestApplyConfChangeError(t *testing.T) { - cl := membership.NewCluster(zaptest.NewLogger(t)) - cl.SetStore(v2store.New()) - for i := 1; i <= 4; i++ { - cl.AddMember(&membership.Member{ID: types.ID(i)}, true) - } - cl.RemoveMember(4, true) - - attr := membership.RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 1)}} - ctx, err := json.Marshal(&membership.Member{ID: types.ID(1), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = membership.RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 4)}} - ctx4, err := json.Marshal(&membership.Member{ID: types.ID(1), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - attr = membership.RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", 5)}} - ctx5, err := json.Marshal(&membership.Member{ID: types.ID(1), RaftAttributes: attr}) - if err != nil { - t.Fatal(err) - } - - tests := []struct { - cc raftpb.ConfChange - werr error - }{ - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 4, - Context: ctx4, - }, - membership.ErrIDRemoved, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: 4, - Context: ctx4, - }, - membership.ErrIDRemoved, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: 1, - Context: ctx, - }, - membership.ErrIDExists, - }, - { - raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: 5, - Context: ctx5, - }, - membership.ErrIDNotFound, - }, - } - for i, tt := range tests { - n := newNodeRecorder() - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), - cluster: cl, - } - _, err := srv.applyConfChange(tt.cc, nil, true) - if err != tt.werr { - t.Errorf("#%d: applyConfChange error = %v, want %v", i, err, tt.werr) - } - cc := raftpb.ConfChange{Type: tt.cc.Type, NodeID: raft.None, Context: tt.cc.Context} - w := []testutil.Action{ - { - Name: "ApplyConfChange", - Params: []interface{}{cc}, - }, - } - if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) { - t.Errorf("#%d: action = %+v, want %+v", i, g, w) - } - } -} - -func TestApplyConfChangeShouldStop(t *testing.T) { - cl := membership.NewCluster(zaptest.NewLogger(t)) - cl.SetStore(v2store.New()) - for i := 1; i <= 3; i++ { - cl.AddMember(&membership.Member{ID: types.ID(i)}, true) - } - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: newNodeNop(), - transport: newNopTransporter(), - }) - lg := zaptest.NewLogger(t) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - memberId: 1, - r: *r, - cluster: cl, - beHooks: serverstorage.NewBackendHooks(lg, nil), - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: 2, - } - // remove non-local member - shouldStop, err := srv.applyConfChange(cc, &raftpb.ConfState{}, true) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - if shouldStop { - t.Errorf("shouldStop = %t, want %t", shouldStop, false) - } - - // remove local member - cc.NodeID = 1 - shouldStop, err = srv.applyConfChange(cc, &raftpb.ConfState{}, true) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - if !shouldStop { - t.Errorf("shouldStop = %t, want %t", shouldStop, true) - } -} - -// TestApplyConfigChangeUpdatesConsistIndex ensures a config change also updates the consistIndex -// where consistIndex equals to applied index. -func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) { - lg := zaptest.NewLogger(t) - - cl := membership.NewCluster(zaptest.NewLogger(t)) - cl.SetStore(v2store.New()) - cl.AddMember(&membership.Member{ID: types.ID(1)}, true) - - be, _ := betesting.NewDefaultTmpBackend(t) - defer betesting.Close(t, be) - schema.CreateMetaBucket(be.BatchTx()) - - ci := cindex.NewConsistentIndex(be) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - memberId: 1, - r: *realisticRaftNode(lg), - cluster: cl, - w: wait.New(), - consistIndex: ci, - beHooks: serverstorage.NewBackendHooks(lg, ci), - } - - // create EntryConfChange entry - now := time.Now() - urls, err := types.NewURLs([]string{"http://whatever:123"}) - if err != nil { - t.Fatal(err) - } - m := membership.NewMember("", urls, "", &now) - m.ID = types.ID(2) - b, err := json.Marshal(m) - if err != nil { - t.Fatal(err) - } - cc := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2, Context: b} - ents := []raftpb.Entry{{ - Index: 2, - Term: 4, - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - }} - - _, appliedi, _ := srv.apply(ents, &raftpb.ConfState{}) - consistIndex := srv.consistIndex.ConsistentIndex() - assert.Equal(t, uint64(2), appliedi) - - t.Run("verify-backend", func(t *testing.T) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - srv.beHooks.OnPreCommitUnsafe(tx) - assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *schema.UnsafeConfStateFromBackend(lg, tx)) - }) - rindex, _ := schema.ReadConsistentIndex(be.ReadTx()) - assert.Equal(t, consistIndex, rindex) -} - -func realisticRaftNode(lg *zap.Logger) *raftNode { - storage := raft.NewMemoryStorage() - storage.SetHardState(raftpb.HardState{Commit: 0, Term: 0}) - c := &raft.Config{ - ID: 1, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: math.MaxUint64, - MaxInflightMsgs: 256, - } - n := raft.RestartNode(c) - r := newRaftNode(raftNodeConfig{ - lg: lg, - Node: n, - transport: newNopTransporter(), - }) - return r -} - -// TestApplyMultiConfChangeShouldStop ensures that toApply will return shouldStop -// if the local member is removed along with other conf updates. -func TestApplyMultiConfChangeShouldStop(t *testing.T) { - lg := zaptest.NewLogger(t) - cl := membership.NewCluster(lg) - cl.SetStore(v2store.New()) - for i := 1; i <= 5; i++ { - cl.AddMember(&membership.Member{ID: types.ID(i)}, true) - } - r := newRaftNode(raftNodeConfig{ - lg: lg, - Node: newNodeNop(), - transport: newNopTransporter(), - }) - ci := cindex.NewFakeConsistentIndex(0) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - memberId: 2, - r: *r, - cluster: cl, - w: wait.New(), - consistIndex: ci, - beHooks: serverstorage.NewBackendHooks(lg, ci), - } - var ents []raftpb.Entry - for i := 1; i <= 4; i++ { - ent := raftpb.Entry{ - Term: 1, - Index: uint64(i), - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal( - &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: uint64(i)}), - } - ents = append(ents, ent) - } - - _, _, shouldStop := srv.apply(ents, &raftpb.ConfState{}) - if !shouldStop { - t.Errorf("shouldStop = %t, want %t", shouldStop, true) - } -} - -func TestDoProposal(t *testing.T) { - tests := []pb.Request{ - {Method: "POST", ID: 1}, - {Method: "PUT", ID: 1}, - {Method: "DELETE", ID: 1}, - {Method: "GET", ID: 1, Quorum: true}, - } - for i, tt := range tests { - st := mockstore.NewRecorder() - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: newNodeCommitter(), - storage: mockstorage.NewStorageRecorder(""), - raftStorage: raft.NewMemoryStorage(), - transport: newNopTransporter(), - }) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - v2store: st, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewFakeConsistentIndex(0), - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - srv.start() - resp, err := srv.Do(context.Background(), tt) - srv.Stop() - - action := st.Action() - if len(action) != 1 { - t.Errorf("#%d: len(action) = %d, want 1", i, len(action)) - } - if err != nil { - t.Fatalf("#%d: err = %v, want nil", i, err) - } - // resp.Index is set in Do() based on the raft state; may either be 0 or 1 - wresp := Response{Event: &v2store.Event{}, Index: resp.Index} - if !reflect.DeepEqual(resp, wresp) { - t.Errorf("#%d: resp = %v, want %v", i, resp, wresp) - } - } -} - -func TestDoProposalCancelled(t *testing.T) { - wt := mockwait.NewRecorder() - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}), - w: wt, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) - - if err != errors.ErrCanceled { - t.Fatalf("err = %v, want %v", err, errors.ErrCanceled) - } - w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}} - if !reflect.DeepEqual(wt.Action(), w) { - t.Errorf("wt.action = %+v, want %+v", wt.Action(), w) - } -} - -func TestDoProposalTimeout(t *testing.T) { - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}), - w: mockwait.NewNop(), - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - ctx, cancel := context.WithTimeout(context.Background(), 0) - _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) - cancel() - if err != errors.ErrTimeout { - t.Fatalf("err = %v, want %v", err, errors.ErrTimeout) - } -} - -func TestDoProposalStopped(t *testing.T) { - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: newNodeNop()}), - w: mockwait.NewNop(), - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - srv.stopping = make(chan struct{}) - close(srv.stopping) - _, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1}) - if err != errors.ErrStopped { - t.Errorf("err = %v, want %v", err, errors.ErrStopped) - } -} - -// TestSync tests sync 1. is nonblocking 2. proposes SYNC request. -func TestSync(t *testing.T) { - n := newNodeRecorder() - ctx, cancel := context.WithCancel(context.Background()) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), - reqIDGen: idutil.NewGenerator(0, time.Time{}), - ctx: ctx, - cancel: cancel, - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - // check that sync is non-blocking - done := make(chan struct{}, 1) - go func() { - srv.sync(10 * time.Second) - done <- struct{}{} - }() - - select { - case <-done: - case <-time.After(time.Second): - t.Fatal("sync should be non-blocking but did not return after 1s!") - } - - action, _ := n.Wait(1) - if len(action) != 1 { - t.Fatalf("len(action) = %d, want 1", len(action)) - } - if action[0].Name != "Propose" { - t.Fatalf("action = %s, want Propose", action[0].Name) - } - data := action[0].Params[0].([]byte) - var r pb.Request - if err := r.Unmarshal(data); err != nil { - t.Fatalf("unmarshal request error: %v", err) - } - if r.Method != "SYNC" { - t.Errorf("method = %s, want SYNC", r.Method) - } -} - -// TestSyncTimeout tests the case that sync 1. is non-blocking 2. cancel request -// after timeout -func TestSyncTimeout(t *testing.T) { - n := newProposalBlockerRecorder() - ctx, cancel := context.WithCancel(context.Background()) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), - reqIDGen: idutil.NewGenerator(0, time.Time{}), - ctx: ctx, - cancel: cancel, - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - // check that sync is non-blocking - done := make(chan struct{}, 1) - go func() { - srv.sync(0) - done <- struct{}{} - }() - - select { - case <-done: - case <-time.After(time.Second): - t.Fatal("sync should be non-blocking but did not return after 1s!") - } - - w := []testutil.Action{{Name: "Propose blocked"}} - if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) { - t.Errorf("action = %v, want %v", g, w) - } -} - -// TODO: TestNoSyncWhenNoLeader - -// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks -func TestSyncTrigger(t *testing.T) { - n := newReadyNode() - st := make(chan time.Time, 1) - tk := &time.Ticker{C: st} - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: n, - raftStorage: raft.NewMemoryStorage(), - transport: newNopTransporter(), - storage: mockstorage.NewStorageRecorder(""), - }) - - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - v2store: mockstore.NewNop(), - SyncTicker: tk, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - } - - // trigger the server to become a leader and accept sync requests - go func() { - srv.start() - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{ - RaftState: raft.StateLeader, - }, - } - // trigger a sync request - st <- time.Time{} - }() - - action, _ := n.Wait(1) - go srv.Stop() - - if len(action) != 1 { - t.Fatalf("len(action) = %d, want 1", len(action)) - } - if action[0].Name != "Propose" { - t.Fatalf("action = %s, want Propose", action[0].Name) - } - data := action[0].Params[0].([]byte) - var req pb.Request - if err := req.Unmarshal(data); err != nil { - t.Fatalf("error unmarshalling data: %v", err) - } - if req.Method != "SYNC" { - t.Fatalf("unexpected proposed request: %#v", req.Method) - } - - // wait on stop message - <-n.Chan() -} - -// TestSnapshot should snapshot the store and cut the persistent -func TestSnapshot(t *testing.T) { - be, _ := betesting.NewDefaultTmpBackend(t) - - s := raft.NewMemoryStorage() - s.Append([]raftpb.Entry{{Index: 1}}) - st := mockstore.NewRecorderStream() - p := mockstorage.NewStorageRecorderStream("") - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: newNodeNop(), - raftStorage: s, - storage: p, - }) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *r, - v2store: st, - consistIndex: cindex.NewConsistentIndex(be), - } - srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{}) - srv.be = be - - ch := make(chan struct{}, 2) - - go func() { - gaction, _ := p.Wait(2) - defer func() { ch <- struct{}{} }() - - if len(gaction) != 2 { - t.Errorf("len(action) = %d, want 2", len(gaction)) - return - } - if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) { - t.Errorf("action = %s, want SaveSnap", gaction[0]) - } - - if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "Release"}) { - t.Errorf("action = %s, want Release", gaction[1]) - } - }() - - go func() { - gaction, _ := st.Wait(2) - defer func() { ch <- struct{}{} }() - - if len(gaction) != 2 { - t.Errorf("len(action) = %d, want 2", len(gaction)) - } - if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) { - t.Errorf("action = %s, want Clone", gaction[0]) - } - if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) { - t.Errorf("action = %s, want SaveNoCopy", gaction[1]) - } - }() - - srv.snapshot(1, raftpb.ConfState{Voters: []uint64{1}}) - <-ch - <-ch -} - -// TestSnapshotOrdering ensures raft persists snapshot onto disk before -// snapshot db is applied. -func TestSnapshotOrdering(t *testing.T) { - // Ignore the snapshot index verification in unit test, because - // it doesn't follow the e2e applying logic. - revertFunc := verify.DisableVerifications() - defer revertFunc() - - lg := zaptest.NewLogger(t) - n := newNopReadyNode() - st := v2store.New() - cl := membership.NewCluster(lg) - cl.SetStore(st) - - testdir := t.TempDir() - - snapdir := filepath.Join(testdir, "member", "snap") - if err := os.MkdirAll(snapdir, 0755); err != nil { - t.Fatalf("couldn't make snap dir (%v)", err) - } - - rs := raft.NewMemoryStorage() - p := mockstorage.NewStorageRecorderStream(testdir) - tr, snapDoneC := newSnapTransporter(lg, snapdir) - r := newRaftNode(raftNodeConfig{ - lg: lg, - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - transport: tr, - storage: p, - raftStorage: rs, - }) - be, _ := betesting.NewDefaultTmpBackend(t) - ci := cindex.NewConsistentIndex(be) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - Cfg: config.ServerConfig{Logger: lg, DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - v2store: st, - snapshotter: snap.New(lg, snapdir), - cluster: cl, - SyncTicker: &time.Ticker{}, - consistIndex: ci, - beHooks: serverstorage.NewBackendHooks(lg, ci), - } - s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster} - - s.kv = mvcc.New(lg, be, &lease.FakeLessor{}, mvcc.StoreConfig{}) - s.be = be - - s.start() - defer s.Stop() - - n.readyc <- raft.Ready{Messages: []raftpb.Message{{Type: raftpb.MsgSnap}}} - go func() { - // get the snapshot sent by the transport - snapMsg := <-snapDoneC - // Snapshot first triggers raftnode to persists the snapshot onto disk - // before renaming db snapshot file to db - snapMsg.Snapshot.Metadata.Index = 1 - n.readyc <- raft.Ready{Snapshot: *snapMsg.Snapshot} - }() - - ac := <-p.Chan() - if ac.Name != "Save" { - t.Fatalf("expected Save, got %+v", ac) - } - - if ac := <-p.Chan(); ac.Name != "SaveSnap" { - t.Fatalf("expected SaveSnap, got %+v", ac) - } - - if ac := <-p.Chan(); ac.Name != "Save" { - t.Fatalf("expected Save, got %+v", ac) - } - - // confirm snapshot file still present before calling SaveSnap - snapPath := filepath.Join(snapdir, fmt.Sprintf("%016x.snap.db", 1)) - if !fileutil.Exist(snapPath) { - t.Fatalf("expected file %q, got missing", snapPath) - } - - // unblock SaveSnapshot, etcdserver now permitted to move snapshot file - if ac := <-p.Chan(); ac.Name != "Sync" { - t.Fatalf("expected Sync, got %+v", ac) - } - - if ac := <-p.Chan(); ac.Name != "Release" { - t.Fatalf("expected Release, got %+v", ac) - } -} - -// TestTriggerSnap for Applied > SnapshotCount should trigger a SaveSnap event -func TestTriggerSnap(t *testing.T) { - be, tmpPath := betesting.NewDefaultTmpBackend(t) - defer func() { - os.RemoveAll(tmpPath) - }() - - snapc := 10 - st := mockstore.NewRecorder() - p := mockstorage.NewStorageRecorderStream("") - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: newNodeCommitter(), - raftStorage: raft.NewMemoryStorage(), - storage: p, - transport: newNopTransporter(), - }) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - v2store: st, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewConsistentIndex(be), - } - srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} - - srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{}) - srv.be = be - - srv.start() - - donec := make(chan struct{}) - go func() { - defer close(donec) - wcnt := 3 + snapc - gaction, _ := p.Wait(wcnt) - - // each operation is recorded as a Save - // (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap + Release - if len(gaction) != wcnt { - t.Logf("gaction: %v", gaction) - t.Errorf("len(action) = %d, want %d", len(gaction), wcnt) - return - } - if !reflect.DeepEqual(gaction[wcnt-2], testutil.Action{Name: "SaveSnap"}) { - t.Errorf("action = %s, want SaveSnap", gaction[wcnt-2]) - } - - if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "Release"}) { - t.Errorf("action = %s, want Release", gaction[wcnt-1]) - } - }() - - for i := 0; i < snapc+1; i++ { - srv.Do(context.Background(), pb.Request{Method: "PUT"}) - } - - <-donec - srv.Stop() -} - -// TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with -// proposals. -func TestConcurrentApplyAndSnapshotV3(t *testing.T) { - // Ignore the snapshot index verification in unit test, because - // it doesn't follow the e2e applying logic. - revertFunc := verify.DisableVerifications() - defer revertFunc() - - lg := zaptest.NewLogger(t) - n := newNopReadyNode() - st := v2store.New() - cl := membership.NewCluster(lg) - cl.SetStore(st) - - testdir := t.TempDir() - if err := os.MkdirAll(testdir+"/member/snap", 0755); err != nil { - t.Fatalf("Couldn't make snap dir (%v)", err) - } - - rs := raft.NewMemoryStorage() - tr, snapDoneC := newSnapTransporter(lg, testdir) - r := newRaftNode(raftNodeConfig{ - lg: lg, - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - transport: tr, - storage: mockstorage.NewStorageRecorder(testdir), - raftStorage: rs, - }) - be, _ := betesting.NewDefaultTmpBackend(t) - ci := cindex.NewConsistentIndex(be) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - Cfg: config.ServerConfig{Logger: lg, DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - v2store: st, - snapshotter: snap.New(lg, testdir), - cluster: cl, - SyncTicker: &time.Ticker{}, - consistIndex: ci, - beHooks: serverstorage.NewBackendHooks(lg, ci), - } - s.applyV2 = &applierV2store{store: s.v2store, cluster: s.cluster} - - s.kv = mvcc.New(lg, be, &lease.FakeLessor{}, mvcc.StoreConfig{}) - s.be = be - - s.start() - defer s.Stop() - - // submit applied entries and snap entries - idx := uint64(0) - outdated := 0 - accepted := 0 - for k := 1; k <= 101; k++ { - idx++ - ch := s.w.Register(idx) - req := &pb.Request{Method: "QGET", ID: idx} - ent := raftpb.Entry{Index: idx, Data: pbutil.MustMarshal(req)} - ready := raft.Ready{Entries: []raftpb.Entry{ent}} - n.readyc <- ready - - ready = raft.Ready{CommittedEntries: []raftpb.Entry{ent}} - n.readyc <- ready - - // "idx" applied - <-ch - - // one snapshot for every two messages - if k%2 != 0 { - continue - } - - n.readyc <- raft.Ready{Messages: []raftpb.Message{{Type: raftpb.MsgSnap}}} - // get the snapshot sent by the transport - snapMsg := <-snapDoneC - // If the snapshot trails applied records, recovery will panic - // since there's no allocated snapshot at the place of the - // snapshot record. This only happens when the applier and the - // snapshot sender get out of sync. - if snapMsg.Snapshot.Metadata.Index == idx { - idx++ - snapMsg.Snapshot.Metadata.Index = idx - ready = raft.Ready{Snapshot: *snapMsg.Snapshot} - n.readyc <- ready - accepted++ - } else { - outdated++ - } - // don't wait for the snapshot to complete, move to next message - } - if accepted != 50 { - t.Errorf("accepted=%v, want 50", accepted) - } - if outdated != 0 { - t.Errorf("outdated=%v, want 0", outdated) - } -} - -// TestAddMember tests AddMember can propose and perform node addition. -func TestAddMember(t *testing.T) { - lg := zaptest.NewLogger(t) - n := newNodeConfChangeCommitterRecorder() - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{RaftState: raft.StateLeader}, - } - cl := newTestCluster(t, nil) - st := v2store.New() - cl.SetStore(st) - r := newRaftNode(raftNodeConfig{ - lg: lg, - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: newNopTransporter(), - }) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - r: *r, - v2store: st, - cluster: cl, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewFakeConsistentIndex(0), - beHooks: serverstorage.NewBackendHooks(lg, nil), - } - s.start() - m := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"foo"}}} - _, err := s.AddMember(context.Background(), m) - gaction := n.Action() - s.Stop() - - if err != nil { - t.Fatalf("AddMember error: %v", err) - } - wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeAddNode"}, {Name: "ApplyConfChange:ConfChangeAddNode"}} - if !reflect.DeepEqual(gaction, wactions) { - t.Errorf("action = %v, want %v", gaction, wactions) - } - if cl.Member(1234) == nil { - t.Errorf("member with id 1234 is not added") - } -} - -// TestRemoveMember tests RemoveMember can propose and perform node removal. -func TestRemoveMember(t *testing.T) { - lg := zaptest.NewLogger(t) - n := newNodeConfChangeCommitterRecorder() - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{RaftState: raft.StateLeader}, - } - cl := newTestCluster(t, nil) - st := v2store.New() - cl.SetStore(v2store.New()) - cl.AddMember(&membership.Member{ID: 1234}, true) - r := newRaftNode(raftNodeConfig{ - lg: lg, - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: newNopTransporter(), - }) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - r: *r, - v2store: st, - cluster: cl, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewFakeConsistentIndex(0), - beHooks: serverstorage.NewBackendHooks(lg, nil), - } - s.start() - _, err := s.RemoveMember(context.Background(), 1234) - gaction := n.Action() - s.Stop() - - if err != nil { - t.Fatalf("RemoveMember error: %v", err) - } - wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeRemoveNode"}, {Name: "ApplyConfChange:ConfChangeRemoveNode"}} - if !reflect.DeepEqual(gaction, wactions) { - t.Errorf("action = %v, want %v", gaction, wactions) - } - if cl.Member(1234) != nil { - t.Errorf("member with id 1234 is not removed") - } -} - -// TestUpdateMember tests RemoveMember can propose and perform node update. -func TestUpdateMember(t *testing.T) { - lg := zaptest.NewLogger(t) - n := newNodeConfChangeCommitterRecorder() - n.readyc <- raft.Ready{ - SoftState: &raft.SoftState{RaftState: raft.StateLeader}, - } - cl := newTestCluster(t, nil) - st := v2store.New() - cl.SetStore(st) - cl.AddMember(&membership.Member{ID: 1234}, true) - r := newRaftNode(raftNodeConfig{ - lg: lg, - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: newNopTransporter(), - }) - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - r: *r, - v2store: st, - cluster: cl, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - consistIndex: cindex.NewFakeConsistentIndex(0), - beHooks: serverstorage.NewBackendHooks(lg, nil), - } - s.start() - wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}} - _, err := s.UpdateMember(context.Background(), wm) - gaction := n.Action() - s.Stop() - - if err != nil { - t.Fatalf("UpdateMember error: %v", err) - } - wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeUpdateNode"}, {Name: "ApplyConfChange:ConfChangeUpdateNode"}} - if !reflect.DeepEqual(gaction, wactions) { - t.Errorf("action = %v, want %v", gaction, wactions) - } - if !reflect.DeepEqual(cl.Member(1234), &wm) { - t.Errorf("member = %v, want %v", cl.Member(1234), &wm) - } -} - -// TODO: test server could stop itself when being removed - -func TestPublishV3(t *testing.T) { - n := newNodeRecorder() - ch := make(chan interface{}, 1) - // simulate that request has gone through consensus - ch <- &apply2.Result{} - w := wait.NewWithResponse(ch) - ctx, cancel := context.WithCancel(context.Background()) - lg := zaptest.NewLogger(t) - be, _ := betesting.NewDefaultTmpBackend(t) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - readych: make(chan struct{}), - Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000}, - memberId: 1, - r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}), - attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}, - cluster: &membership.RaftCluster{}, - w: w, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0), - be: be, - ctx: ctx, - cancel: cancel, - } - srv.publishV3(time.Hour) - - action := n.Action() - if len(action) != 1 { - t.Fatalf("len(action) = %d, want 1", len(action)) - } - if action[0].Name != "Propose" { - t.Fatalf("action = %s, want Propose", action[0].Name) - } - data := action[0].Params[0].([]byte) - var r pb.InternalRaftRequest - if err := r.Unmarshal(data); err != nil { - t.Fatalf("unmarshal request error: %v", err) - } - assert.Equal(t, &membershippb.ClusterMemberAttrSetRequest{Member_ID: 0x1, MemberAttributes: &membershippb.Attributes{ - Name: "node1", ClientUrls: []string{"http://a", "http://b"}}}, r.ClusterMemberAttrSet) -} - -// TestPublishV3Stopped tests that publish will be stopped if server is stopped. -func TestPublishV3Stopped(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - r := newRaftNode(raftNodeConfig{ - lg: zaptest.NewLogger(t), - Node: newNodeNop(), - transport: newNopTransporter(), - }) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *r, - cluster: &membership.RaftCluster{}, - w: mockwait.NewNop(), - done: make(chan struct{}), - stopping: make(chan struct{}), - stop: make(chan struct{}), - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - - ctx: ctx, - cancel: cancel, - } - close(srv.stopping) - srv.publishV3(time.Hour) -} - -// TestPublishV3Retry tests that publish will keep retry until success. -func TestPublishV3Retry(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - n := newNodeRecorderStream() - - lg := zaptest.NewLogger(t) - be, _ := betesting.NewDefaultTmpBackend(t) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - readych: make(chan struct{}), - Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000}, - memberId: 1, - r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}), - w: mockwait.NewNop(), - stopping: make(chan struct{}), - attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}, - cluster: &membership.RaftCluster{}, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - authStore: auth.NewAuthStore(lg, schema.NewAuthBackend(lg, be), nil, 0), - be: be, - ctx: ctx, - cancel: cancel, - } - - // expect multiple proposals from retrying - ch := make(chan struct{}) - go func() { - defer close(ch) - if action, err := n.Wait(2); err != nil { - t.Errorf("len(action) = %d, want >= 2 (%v)", len(action), err) - } - close(srv.stopping) - // drain remaining actions, if any, so publish can terminate - for { - select { - case <-ch: - return - default: - n.Action() - } - } - }() - srv.publishV3(10 * time.Nanosecond) - ch <- struct{}{} - <-ch -} - -func TestUpdateVersion(t *testing.T) { - n := newNodeRecorder() - ch := make(chan interface{}, 1) - // simulate that request has gone through consensus - ch <- Response{} - w := wait.NewWithResponse(ch) - ctx, cancel := context.WithCancel(context.TODO()) - srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - memberId: 1, - Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, - r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), - attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}}, - cluster: &membership.RaftCluster{}, - w: w, - reqIDGen: idutil.NewGenerator(0, time.Time{}), - SyncTicker: &time.Ticker{}, - - ctx: ctx, - cancel: cancel, - } - srv.updateClusterVersionV2("2.0.0") - - action := n.Action() - if len(action) != 1 { - t.Fatalf("len(action) = %d, want 1", len(action)) - } - if action[0].Name != "Propose" { - t.Fatalf("action = %s, want Propose", action[0].Name) - } - data := action[0].Params[0].([]byte) - var r pb.Request - if err := r.Unmarshal(data); err != nil { - t.Fatalf("unmarshal request error: %v", err) - } - if r.Method != "PUT" { - t.Errorf("method = %s, want PUT", r.Method) - } - if wpath := path.Join(StoreClusterPrefix, "version"); r.Path != wpath { - t.Errorf("path = %s, want %s", r.Path, wpath) - } - if r.Val != "2.0.0" { - t.Errorf("val = %s, want %s", r.Val, "2.0.0") - } -} - -func TestStopNotify(t *testing.T) { - s := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: zaptest.NewLogger(t), - stop: make(chan struct{}), - done: make(chan struct{}), - } - go func() { - <-s.stop - close(s.done) - }() - - notifier := s.StopNotify() - select { - case <-notifier: - t.Fatalf("received unexpected stop notification") - default: - } - s.Stop() - select { - case <-notifier: - default: - t.Fatalf("cannot receive stop notification") - } -} - -func TestGetOtherPeerURLs(t *testing.T) { - lg := zaptest.NewLogger(t) - tests := []struct { - membs []*membership.Member - wurls []string - }{ - { - []*membership.Member{ - membership.NewMember("1", types.MustNewURLs([]string{"http://10.0.0.1:1"}), "a", nil), - }, - []string{}, - }, - { - []*membership.Member{ - membership.NewMember("1", types.MustNewURLs([]string{"http://10.0.0.1:1"}), "a", nil), - membership.NewMember("2", types.MustNewURLs([]string{"http://10.0.0.2:2"}), "a", nil), - membership.NewMember("3", types.MustNewURLs([]string{"http://10.0.0.3:3"}), "a", nil), - }, - []string{"http://10.0.0.2:2", "http://10.0.0.3:3"}, - }, - { - []*membership.Member{ - membership.NewMember("1", types.MustNewURLs([]string{"http://10.0.0.1:1"}), "a", nil), - membership.NewMember("3", types.MustNewURLs([]string{"http://10.0.0.3:3"}), "a", nil), - membership.NewMember("2", types.MustNewURLs([]string{"http://10.0.0.2:2"}), "a", nil), - }, - []string{"http://10.0.0.2:2", "http://10.0.0.3:3"}, - }, - } - for i, tt := range tests { - cl := membership.NewClusterFromMembers(lg, types.ID(0), tt.membs) - self := "1" - urls := getRemotePeerURLs(cl, self) - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls) - } - } -} - -type nodeRecorder struct{ testutil.Recorder } - -func newNodeRecorder() *nodeRecorder { return &nodeRecorder{&testutil.RecorderBuffered{}} } -func newNodeRecorderStream() *nodeRecorder { return &nodeRecorder{testutil.NewRecorderStream()} } -func newNodeNop() raft.Node { return newNodeRecorder() } - -func (n *nodeRecorder) Tick() { n.Record(testutil.Action{Name: "Tick"}) } -func (n *nodeRecorder) Campaign(ctx context.Context) error { - n.Record(testutil.Action{Name: "Campaign"}) - return nil -} -func (n *nodeRecorder) Propose(ctx context.Context, data []byte) error { - n.Record(testutil.Action{Name: "Propose", Params: []interface{}{data}}) - return nil -} -func (n *nodeRecorder) ProposeConfChange(ctx context.Context, conf raftpb.ConfChangeI) error { - n.Record(testutil.Action{Name: "ProposeConfChange"}) - return nil -} -func (n *nodeRecorder) Step(ctx context.Context, msg raftpb.Message) error { - n.Record(testutil.Action{Name: "Step"}) - return nil -} -func (n *nodeRecorder) Status() raft.Status { return raft.Status{} } -func (n *nodeRecorder) Ready() <-chan raft.Ready { return nil } -func (n *nodeRecorder) TransferLeadership(ctx context.Context, lead, transferee uint64) {} -func (n *nodeRecorder) ReadIndex(ctx context.Context, rctx []byte) error { return nil } -func (n *nodeRecorder) Advance() {} -func (n *nodeRecorder) ApplyConfChange(conf raftpb.ConfChangeI) *raftpb.ConfState { - n.Record(testutil.Action{Name: "ApplyConfChange", Params: []interface{}{conf}}) - return &raftpb.ConfState{} -} - -func (n *nodeRecorder) Stop() { - n.Record(testutil.Action{Name: "Stop"}) -} - -func (n *nodeRecorder) ReportUnreachable(id uint64) {} - -func (n *nodeRecorder) ReportSnapshot(id uint64, status raft.SnapshotStatus) {} - -func (n *nodeRecorder) Compact(index uint64, nodes []uint64, d []byte) { - n.Record(testutil.Action{Name: "Compact"}) -} - -type nodeProposalBlockerRecorder struct { - nodeRecorder -} - -func newProposalBlockerRecorder() *nodeProposalBlockerRecorder { - return &nodeProposalBlockerRecorder{*newNodeRecorderStream()} -} - -func (n *nodeProposalBlockerRecorder) Propose(ctx context.Context, data []byte) error { - <-ctx.Done() - n.Record(testutil.Action{Name: "Propose blocked"}) - return nil -} - -// readyNode is a nodeRecorder with a user-writeable ready channel -type readyNode struct { - nodeRecorder - readyc chan raft.Ready -} - -func newReadyNode() *readyNode { - return &readyNode{ - nodeRecorder{testutil.NewRecorderStream()}, - make(chan raft.Ready, 1)} -} -func newNopReadyNode() *readyNode { - return &readyNode{*newNodeRecorder(), make(chan raft.Ready, 1)} -} - -func (n *readyNode) Ready() <-chan raft.Ready { return n.readyc } - -type nodeConfChangeCommitterRecorder struct { - readyNode - index uint64 -} - -func newNodeConfChangeCommitterRecorder() *nodeConfChangeCommitterRecorder { - return &nodeConfChangeCommitterRecorder{*newNopReadyNode(), 0} -} - -func newNodeConfChangeCommitterStream() *nodeConfChangeCommitterRecorder { - return &nodeConfChangeCommitterRecorder{*newReadyNode(), 0} -} - -func confChangeActionName(conf raftpb.ConfChangeI) string { - var s string - if confV1, ok := conf.AsV1(); ok { - s = confV1.Type.String() - } else { - for i, chg := range conf.AsV2().Changes { - if i > 0 { - s += "/" - } - s += chg.Type.String() - } - } - return s -} - -func (n *nodeConfChangeCommitterRecorder) ProposeConfChange(ctx context.Context, conf raftpb.ConfChangeI) error { - typ, data, err := raftpb.MarshalConfChange(conf) - if err != nil { - return err - } - - n.index++ - n.Record(testutil.Action{Name: "ProposeConfChange:" + confChangeActionName(conf)}) - n.readyc <- raft.Ready{CommittedEntries: []raftpb.Entry{{Index: n.index, Type: typ, Data: data}}} - return nil -} -func (n *nodeConfChangeCommitterRecorder) Ready() <-chan raft.Ready { - return n.readyc -} -func (n *nodeConfChangeCommitterRecorder) ApplyConfChange(conf raftpb.ConfChangeI) *raftpb.ConfState { - n.Record(testutil.Action{Name: "ApplyConfChange:" + confChangeActionName(conf)}) - return &raftpb.ConfState{} -} - -// nodeCommitter commits proposed data immediately. -type nodeCommitter struct { - readyNode - index uint64 -} - -func newNodeCommitter() raft.Node { - return &nodeCommitter{*newNopReadyNode(), 0} -} -func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error { - n.index++ - ents := []raftpb.Entry{{Index: n.index, Data: data}} - n.readyc <- raft.Ready{ - Entries: ents, - CommittedEntries: ents, - } - return nil -} - -func newTestCluster(t testing.TB, membs []*membership.Member) *membership.RaftCluster { - c := membership.NewCluster(zaptest.NewLogger(t)) - for _, m := range membs { - c.AddMember(m, true) - } - return c -} - -type nopTransporter struct{} - -func newNopTransporter() rafthttp.Transporter { - return &nopTransporter{} -} - -func (s *nopTransporter) Start() error { return nil } -func (s *nopTransporter) Handler() http.Handler { return nil } -func (s *nopTransporter) Send(m []raftpb.Message) {} -func (s *nopTransporter) SendSnapshot(m snap.Message) {} -func (s *nopTransporter) AddRemote(id types.ID, us []string) {} -func (s *nopTransporter) AddPeer(id types.ID, us []string) {} -func (s *nopTransporter) RemovePeer(id types.ID) {} -func (s *nopTransporter) RemoveAllPeers() {} -func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {} -func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} } -func (s *nopTransporter) ActivePeers() int { return 0 } -func (s *nopTransporter) Stop() {} -func (s *nopTransporter) Pause() {} -func (s *nopTransporter) Resume() {} - -type snapTransporter struct { - nopTransporter - snapDoneC chan snap.Message - snapDir string - lg *zap.Logger -} - -func newSnapTransporter(lg *zap.Logger, snapDir string) (rafthttp.Transporter, <-chan snap.Message) { - ch := make(chan snap.Message, 1) - tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir, lg: lg} - return tr, ch -} - -func (s *snapTransporter) SendSnapshot(m snap.Message) { - ss := snap.New(s.lg, s.snapDir) - ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1) - m.CloseWithError(nil) - s.snapDoneC <- m -} - -type sendMsgAppRespTransporter struct { - nopTransporter - sendC chan int -} - -func newSendMsgAppRespTransporter() (rafthttp.Transporter, <-chan int) { - ch := make(chan int, 1) - tr := &sendMsgAppRespTransporter{sendC: ch} - return tr, ch -} - -func (s *sendMsgAppRespTransporter) Send(m []raftpb.Message) { - var send int - for _, msg := range m { - if msg.To != 0 { - send++ - } - } - s.sendC <- send -} - -func TestWaitAppliedIndex(t *testing.T) { - cases := []struct { - name string - appliedIndex uint64 - committedIndex uint64 - action func(s *EtcdServer) - ExpectedError error - }{ - { - name: "The applied Id is already equal to the commitId", - appliedIndex: 10, - committedIndex: 10, - action: func(s *EtcdServer) { - s.applyWait.Trigger(10) - }, - ExpectedError: nil, - }, - { - name: "The etcd server has already stopped", - appliedIndex: 10, - committedIndex: 12, - action: func(s *EtcdServer) { - s.stopping <- struct{}{} - }, - ExpectedError: errors.ErrStopped, - }, - { - name: "Timed out waiting for the applied index", - appliedIndex: 10, - committedIndex: 12, - action: nil, - ExpectedError: errors.ErrTimeoutWaitAppliedIndex, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - s := &EtcdServer{ - appliedIndex: tc.appliedIndex, - committedIndex: tc.committedIndex, - stopping: make(chan struct{}, 1), - applyWait: wait.NewTimeList(), - } - - if tc.action != nil { - go tc.action(s) - } - - err := s.waitAppliedIndex() - - if err != tc.ExpectedError { - t.Errorf("Unexpected error, want (%v), got (%v)", tc.ExpectedError, err) - } - }) - } -} diff --git a/server/etcdserver/txn/metrics.go b/server/etcdserver/txn/metrics.go deleted file mode 100644 index 1e7a6f19712..00000000000 --- a/server/etcdserver/txn/metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - slowApplies = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "slow_apply_total", - Help: "The total number of slow apply requests (likely overloaded from slow disk).", - }) - applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "apply_duration_seconds", - Help: "The latency distributions of v2 apply called by backend.", - - // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 - // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), - }, - []string{"version", "op", "success"}) -) - -func ApplySecObserve(version, op string, success bool, latency time.Duration) { - applySec.WithLabelValues(version, op, strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0) -} - -func init() { - prometheus.MustRegister(applySec) - prometheus.MustRegister(slowApplies) -} diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go deleted file mode 100644 index ecd554629c3..00000000000 --- a/server/etcdserver/txn/txn.go +++ /dev/null @@ -1,699 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "bytes" - "context" - "fmt" - "sort" - - "go.uber.org/zap" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txnWrite mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - resp = &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - trace = traceutil.Get(ctx) - // create put tracing if the trace in context is empty - if trace.IsEmpty() { - trace = traceutil.New("put", - lg, - traceutil.Field{Key: "key", Value: string(p.Key)}, - traceutil.Field{Key: "req_size", Value: p.Size()}, - ) - } - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txnWrite == nil { - if leaseID != lease.NoLease { - if l := lessor.Lookup(leaseID); l == nil { - return nil, nil, lease.ErrLeaseNotFound - } - } - txnWrite = kv.Write(trace) - defer txnWrite.End() - } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - trace.StepWithFunction(func() { - rr, err = txnWrite.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) - }, "get previous kv pair") - - if err != nil { - return nil, nil, err - } - } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, nil, errors.ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txnWrite.Put(p.Key, val, leaseID) - trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) - return resp, trace, nil -} - -func DeleteRange(kv mvcc.KV, txnWrite mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - end := mkGteRange(dr.RangeEnd) - - if txnWrite == nil { - txnWrite = kv.Write(traceutil.TODO()) - defer txnWrite.End() - } - - if dr.PrevKv { - rr, err := txnWrite.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - resp.PrevKvs[i] = &rr.KVs[i] - } - } - } - - resp.Deleted, resp.Header.Revision = txnWrite.DeleteRange(dr.Key, end) - return resp, nil -} - -func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, txnRead mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - trace := traceutil.Get(ctx) - - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - if txnRead == nil { - txnRead = kv.Read(mvcc.ConcurrentReadTxMode, trace) - defer txnRead.End() - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - rr, err := txnRead.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) - if err != nil { - return nil, err - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // don't re-sort when target is 'KEY' and order is ASCEND - sortOrder = pb.RangeRequest_NONE - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - default: - lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget))) - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - trace.Step("filter and sort the key-value pairs") - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs[i] = &rr.KVs[i] - } - trace.Step("assemble the response") - return resp, nil -} - -func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWithSharedBuffer bool, kv mvcc.KV, lessor lease.Lessor) (*pb.TxnResponse, *traceutil.Trace, error) { - trace := traceutil.Get(ctx) - if trace.IsEmpty() { - trace = traceutil.New("transaction", lg) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - } - isWrite := !IsTxnReadonly(rt) - - // When the transaction contains write operations, we use ReadTx instead of - // ConcurrentReadTx to avoid extra overhead of copying buffer. - var txnWrite mvcc.TxnWrite - if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer*/ { - txnWrite = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.SharedBufReadTxMode, trace)) - } else { - txnWrite = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.ConcurrentReadTxMode, trace)) - } - - var txnPath []bool - trace.StepWithFunction( - func() { - txnPath = compareToPath(txnWrite, rt) - }, - "compare", - ) - - if isWrite { - trace.AddField(traceutil.Field{Key: "read_only", Value: false}) - if _, err := checkRequests(txnWrite, rt, txnPath, - func(rv mvcc.ReadView, ro *pb.RequestOp) error { return checkRequestPut(rv, lessor, ro) }); err != nil { - txnWrite.End() - return nil, nil, err - } - } - if _, err := checkRequests(txnWrite, rt, txnPath, checkRequestRange); err != nil { - txnWrite.End() - return nil, nil, err - } - trace.Step("check requests") - txnResp, _ := newTxnResp(rt, txnPath) - - // When executing mutable txnWrite ops, etcd must hold the txnWrite lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txnWrite. - if isWrite { - txnWrite.End() - txnWrite = kv.Write(trace) - } - _, err := applyTxn(ctx, lg, kv, lessor, txnWrite, rt, txnPath, txnResp) - if err != nil { - if isWrite { - // end txn to release locks before panic - txnWrite.End() - // When txn with write operations starts it has to be successful - // We don't have a way to recover state in case of write failure - lg.Panic("unexpected error during txn with writes", zap.Error(err)) - } else { - lg.Error("unexpected error during readonly txn", zap.Error(err)) - } - } - rev := txnWrite.Rev() - if len(txnWrite.Changes()) != 0 { - rev++ - } - txnWrite.End() - - txnResp.Header.Revision = rev - trace.AddField( - traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, - traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, - ) - return txnResp, trace, err -} - -// newTxnResp allocates a txn response for a txn request given a path. -func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - resps := make([]*pb.ResponseOp, len(reqs)) - txnResp = &pb.TxnResponse{ - Responses: resps, - Succeeded: txnPath[0], - Header: &pb.ResponseHeader{}, - } - for i, req := range reqs { - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} - case *pb.RequestOp_RequestPut: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} - case *pb.RequestOp_RequestDeleteRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} - case *pb.RequestOp_RequestTxn: - resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} - txnPath = txnPath[1+txns:] - txnCount += txns + 1 - default: - } - } - return txnResp, txnCount -} - -func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Lessor, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int, err error) { - trace := traceutil.Get(ctx) - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - - for i, req := range reqs { - respi := tresp.Responses[i].Response - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "range"}, - traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, - traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) - resp, err := Range(ctx, lg, kv, txnWrite, tv.RequestRange) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed Range: %w", err) - } - respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestPut: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "put"}, - traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, - traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) - resp, _, err := Put(ctx, lg, lessor, kv, txnWrite, tv.RequestPut) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed Put: %w", err) - } - respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestDeleteRange: - resp, err := DeleteRange(kv, txnWrite, tv.RequestDeleteRange) - if err != nil { - return 0, fmt.Errorf("applyTxn: failed DeleteRange: %w", err) - } - respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp - case *pb.RequestOp_RequestTxn: - resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn - applyTxns, err := applyTxn(ctx, lg, kv, lessor, txnWrite, tv.RequestTxn, txnPath[1:], resp) - if err != nil { - // don't wrap the error. It's a recursive call and err should be already wrapped - return 0, err - } - txns += applyTxns + 1 - txnPath = txnPath[applyTxns+1:] - default: - // empty union - } - } - return txns, nil -} - -//--------------------------------------------------------- - -type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error - -func checkRequestPut(rv mvcc.ReadView, lessor lease.Lessor, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut) - if !ok || tv.RequestPut == nil { - return nil - } - req := tv.RequestPut - if req.IgnoreValue || req.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return errors.ErrKeyNotFound - } - } - if lease.LeaseID(req.Lease) != lease.NoLease { - if l := lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange) - if !ok || tv.RequestRange == nil { - return nil - } - req := tv.RequestRange - switch { - case req.Revision == 0: - return nil - case req.Revision > rv.Rev(): - return mvcc.ErrFutureRev - case req.Revision < rv.FirstRev(): - return mvcc.ErrCompacted - } - return nil -} - -func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { - txnCount := 0 - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - for _, req := range reqs { - if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil { - txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) - if err != nil { - return 0, err - } - txnCount += txns + 1 - txnPath = txnPath[txns+1:] - continue - } - if err := f(rv, req); err != nil { - return 0, err - } - } - return txnCount, nil -} - -// mkGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -// If it is a GTE range, then []byte{} is returned to indicate the empty byte -// string (vs nil being no byte string). -func mkGteRange(rangeEnd []byte) []byte { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - return []byte{} - } - return rangeEnd -} - -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { - txnPath := make([]bool, 1) - ops := rt.Success - if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { - ops = rt.Failure - } - for _, op := range ops { - tv, ok := op.Request.(*pb.RequestOp_RequestTxn) - if !ok || tv.RequestTxn == nil { - continue - } - txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) - } - return txnPath -} - -func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { - for _, c := range cmps { - if !applyCompare(rv, c) { - return false - } - } - return true -} - -// applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - // TODO: possible optimizations - // * chunk reads for large ranges to conserve memory - // * rewrite rules for common patterns: - // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" - // * caching - rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) - if err != nil { - return false - } - if len(rr.KVs) == 0 { - if c.Target == pb.Compare_VALUE { - // Always fail if comparing a value on a key/keys that doesn't exist; - // nil == empty string in grpc; no way to represent missing value - return false - } - return compareKV(c, mvccpb.KeyValue{}) - } - for _, kv := range rr.KVs { - if !compareKV(c, kv) { - return false - } - } - return true -} - -func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { - var result int - rev := int64(0) - switch c.Target { - case pb.Compare_VALUE: - var v []byte - if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { - v = tv.Value - } - result = bytes.Compare(ckv.Value, v) - case pb.Compare_CREATE: - if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { - rev = tv.CreateRevision - } - result = compareInt64(ckv.CreateRevision, rev) - case pb.Compare_MOD: - if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { - rev = tv.ModRevision - } - result = compareInt64(ckv.ModRevision, rev) - case pb.Compare_VERSION: - if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { - rev = tv.Version - } - result = compareInt64(ckv.Version, rev) - case pb.Compare_LEASE: - if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil { - rev = tv.Lease - } - result = compareInt64(ckv.Lease, rev) - } - switch c.Result { - case pb.Compare_EQUAL: - return result == 0 - case pb.Compare_NOT_EQUAL: - return result != 0 - case pb.Compare_GREATER: - return result > 0 - case pb.Compare_LESS: - return result < 0 - } - return true -} - -func IsTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func IsTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - -func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - return checkTxnReqsPermission(as, ai, rt.Failure) -} - -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/server/etcdserver/txn/txn_test.go b/server/etcdserver/txn/txn_test.go deleted file mode 100644 index 673d363d6af..00000000000 --- a/server/etcdserver/txn/txn_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "context" - "strings" - "testing" - - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/mvcc" - - "github.com/stretchr/testify/assert" -) - -func TestReadonlyTxnError(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - defer betesting.Close(t, b) - s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{}) - defer s.Close() - - // setup cancelled context - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - - // put some data to prevent early termination in rangeKeys - // we are expecting failure on cancelled context check - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - - txn := &pb.TxnRequest{ - Success: []*pb.RequestOp{ - { - Request: &pb.RequestOp_RequestRange{ - RequestRange: &pb.RangeRequest{ - Key: []byte("foo"), - }, - }, - }, - }, - } - - _, _, err := Txn(ctx, zaptest.NewLogger(t), txn, false, s, &lease.FakeLessor{}) - if err == nil || !strings.Contains(err.Error(), "applyTxn: failed Range: rangeKeys: context cancelled: context canceled") { - t.Fatalf("Expected context canceled error, got %v", err) - } -} - -func TestWriteTxnPanic(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - defer betesting.Close(t, b) - s := mvcc.NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, mvcc.StoreConfig{}) - defer s.Close() - - // setup cancelled context - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - - // write txn that puts some data and then fails in range due to cancelled context - txn := &pb.TxnRequest{ - Success: []*pb.RequestOp{ - { - Request: &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: []byte("foo"), - Value: []byte("bar"), - }, - }, - }, - { - Request: &pb.RequestOp_RequestRange{ - RequestRange: &pb.RangeRequest{ - Key: []byte("foo"), - }, - }, - }, - }, - } - - assert.Panics(t, func() { Txn(ctx, zaptest.NewLogger(t), txn, false, s, &lease.FakeLessor{}) }, "Expected panic in Txn with writes") -} diff --git a/server/etcdserver/txn/util.go b/server/etcdserver/txn/util.go deleted file mode 100644 index a4a3168ffe5..00000000000 --- a/server/etcdserver/txn/util.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "fmt" - "reflect" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - - "go.uber.org/zap" -) - -func WarnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err) -} - -func WarnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - d := time.Since(now) - lg.Warn( - "failed to apply request", - zap.Duration("took", d), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) -} - -func WarnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - reqStringer := pb.NewLoggableTxnRequest(r) - var resp string - if !isNil(txnResponse) { - var resps []string - for _, r := range txnResponse.Responses { - switch r.Response.(type) { - case *pb.ResponseOp_ResponseRange: - if op := r.GetResponseRange(); op != nil { - resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.GetKvs()))) - } else { - resps = append(resps, "range_response:nil") - } - default: - // only range responses should be in a read only txn request - } - } - resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err) -} - -func WarnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(rangeResponse) { - resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err) -} - -// callers need make sure time has passed warningApplyDuration -func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { - lg.Warn( - "apply request took too long", - zap.Duration("took", time.Since(now)), - zap.Duration("expected-duration", warningApplyDuration), - zap.String("prefix", prefix), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) - slowApplies.Inc() -} - -func isNil(msg proto.Message) bool { - return msg == nil || reflect.ValueOf(msg).IsNil() -} diff --git a/server/etcdserver/txn/util_bench_test.go b/server/etcdserver/txn/util_bench_test.go deleted file mode 100644 index 5a84f62f537..00000000000 --- a/server/etcdserver/txn/util_bench_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "errors" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/raft/v3/raftpb" -) - -func BenchmarkWarnOfExpensiveRequestNoLog(b *testing.B) { - m := &raftpb.Message{ - Type: 0, - To: 0, - From: 1, - Term: 2, - LogTerm: 3, - Index: 0, - Entries: []raftpb.Entry{ - { - Term: 0, - Index: 0, - Type: 0, - Data: make([]byte, 1024), - }, - }, - Commit: 0, - Snapshot: nil, - Reject: false, - RejectHint: 0, - Context: nil, - } - err := errors.New("benchmarking warn of expensive request") - lg := zaptest.NewLogger(b) - for n := 0; n < b.N; n++ { - WarnOfExpensiveRequest(lg, time.Second, time.Now(), nil, m, err) - } -} diff --git a/server/etcdserver/txn/util_test.go b/server/etcdserver/txn/util_test.go deleted file mode 100644 index 1fc9eeac9b0..00000000000 --- a/server/etcdserver/txn/util_test.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package txn - -import ( - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - - "go.uber.org/zap/zaptest" -) - -// TestWarnOfExpensiveReadOnlyTxnRequest verifies WarnOfExpensiveReadOnlyTxnRequest -// never panic no matter what data the txnResponse contains. -func TestWarnOfExpensiveReadOnlyTxnRequest(t *testing.T) { - kvs := []*mvccpb.KeyValue{ - &mvccpb.KeyValue{Key: []byte("k1"), Value: []byte("v1")}, - &mvccpb.KeyValue{Key: []byte("k2"), Value: []byte("v2")}, - } - - testCases := []struct { - name string - txnResp *pb.TxnResponse - }{ - { - name: "all readonly responses", - txnResp: &pb.TxnResponse{ - Responses: []*pb.ResponseOp{ - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: &pb.RangeResponse{ - Kvs: kvs, - }, - }, - }, - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: &pb.RangeResponse{}, - }, - }, - }, - }, - }, - { - name: "all readonly responses with partial nil responses", - txnResp: &pb.TxnResponse{ - Responses: []*pb.ResponseOp{ - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: &pb.RangeResponse{}, - }, - }, - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: nil, - }, - }, - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: &pb.RangeResponse{ - Kvs: kvs, - }, - }, - }, - }, - }, - }, - { - name: "all readonly responses with all nil responses", - txnResp: &pb.TxnResponse{ - Responses: []*pb.ResponseOp{ - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: nil, - }, - }, - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: nil, - }, - }, - }, - }, - }, - { - name: "partial non readonly responses", - txnResp: &pb.TxnResponse{ - Responses: []*pb.ResponseOp{ - { - Response: &pb.ResponseOp_ResponseRange{ - ResponseRange: nil, - }, - }, - { - Response: &pb.ResponseOp_ResponsePut{}, - }, - { - Response: &pb.ResponseOp_ResponseDeleteRange{}, - }, - }, - }, - }, - { - name: "all non readonly responses", - txnResp: &pb.TxnResponse{ - Responses: []*pb.ResponseOp{ - { - Response: &pb.ResponseOp_ResponsePut{}, - }, - { - Response: &pb.ResponseOp_ResponseDeleteRange{}, - }, - }, - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - start := time.Now().Add(-1 * time.Second) - // WarnOfExpensiveReadOnlyTxnRequest shouldn't panic. - WarnOfExpensiveReadOnlyTxnRequest(lg, 0, start, &pb.TxnRequest{}, tc.txnResp, nil) - }) - } -} diff --git a/server/etcdserver/util.go b/server/etcdserver/util.go deleted file mode 100644 index fbba5491b07..00000000000 --- a/server/etcdserver/util.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" -) - -// isConnectedToQuorumSince checks whether the local member is connected to the -// quorum of the cluster since the given time. -func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 -} - -// isConnectedSince checks whether the local member is connected to the -// remote member since the given time. -func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { - t := transport.ActiveSince(remote) - return !t.IsZero() && t.Before(since) -} - -// isConnectedFullySince checks whether the local member is connected to all -// members in the cluster since the given time. -func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) == len(members) -} - -// numConnectedSince counts how many members are connected to the local member -// since the given time. -func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { - connectedNum := 0 - for _, m := range members { - if m.ID == self || isConnectedSince(transport, since, m.ID) { - connectedNum++ - } - } - return connectedNum -} - -// longestConnected chooses the member with longest active-since-time. -// It returns false, if nothing is active. -func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { - var longest types.ID - var oldest time.Time - for _, id := range membs { - tm := tp.ActiveSince(id) - if tm.IsZero() { // inactive - continue - } - - if oldest.IsZero() { // first longest candidate - oldest = tm - longest = id - } - - if tm.Before(oldest) { - oldest = tm - longest = id - } - } - if uint64(longest) == 0 { - return longest, false - } - return longest, true -} - -type notifier struct { - c chan struct{} - err error -} - -func newNotifier() *notifier { - return ¬ifier{ - c: make(chan struct{}), - } -} - -func (nc *notifier) notify(err error) { - nc.err = err - close(nc.c) -} - -// panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead. -// This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests -// with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197. -type panicAlternativeStringer struct { - stringer fmt.Stringer - alternative func() string -} - -func (n panicAlternativeStringer) String() (s string) { - defer func() { - if err := recover(); err != nil { - s = n.alternative() - } - }() - s = n.stringer.String() - return s -} diff --git a/server/etcdserver/util_test.go b/server/etcdserver/util_test.go deleted file mode 100644 index cad7c3cf452..00000000000 --- a/server/etcdserver/util_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "net/http" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/raft/v3/raftpb" -) - -func TestLongestConnected(t *testing.T) { - umap, err := types.NewURLsMap("mem1=http://10.1:2379,mem2=http://10.2:2379,mem3=http://10.3:2379") - if err != nil { - t.Fatal(err) - } - clus, err := membership.NewClusterFromURLsMap(zaptest.NewLogger(t), "test", umap) - if err != nil { - t.Fatal(err) - } - memberIDs := clus.MemberIDs() - - tr := newNopTransporterWithActiveTime(memberIDs) - transferee, ok := longestConnected(tr, memberIDs) - if !ok { - t.Fatalf("unexpected ok %v", ok) - } - if memberIDs[0] != transferee { - t.Fatalf("expected first member %s to be transferee, got %s", memberIDs[0], transferee) - } - - // make all members non-active - amap := make(map[types.ID]time.Time) - for _, id := range memberIDs { - amap[id] = time.Time{} - } - tr.(*nopTransporterWithActiveTime).reset(amap) - - _, ok2 := longestConnected(tr, memberIDs) - if ok2 { - t.Fatalf("unexpected ok %v", ok) - } -} - -type nopTransporterWithActiveTime struct { - activeMap map[types.ID]time.Time -} - -// newNopTransporterWithActiveTime creates nopTransporterWithActiveTime with the first member -// being the most stable (longest active-since time). -func newNopTransporterWithActiveTime(memberIDs []types.ID) rafthttp.Transporter { - am := make(map[types.ID]time.Time) - for i, id := range memberIDs { - am[id] = time.Now().Add(time.Duration(i) * time.Second) - } - return &nopTransporterWithActiveTime{activeMap: am} -} - -func (s *nopTransporterWithActiveTime) Start() error { return nil } -func (s *nopTransporterWithActiveTime) Handler() http.Handler { return nil } -func (s *nopTransporterWithActiveTime) Send(m []raftpb.Message) {} -func (s *nopTransporterWithActiveTime) SendSnapshot(m snap.Message) {} -func (s *nopTransporterWithActiveTime) AddRemote(id types.ID, us []string) {} -func (s *nopTransporterWithActiveTime) AddPeer(id types.ID, us []string) {} -func (s *nopTransporterWithActiveTime) RemovePeer(id types.ID) {} -func (s *nopTransporterWithActiveTime) RemoveAllPeers() {} -func (s *nopTransporterWithActiveTime) UpdatePeer(id types.ID, us []string) {} -func (s *nopTransporterWithActiveTime) ActiveSince(id types.ID) time.Time { return s.activeMap[id] } -func (s *nopTransporterWithActiveTime) ActivePeers() int { return 0 } -func (s *nopTransporterWithActiveTime) Stop() {} -func (s *nopTransporterWithActiveTime) Pause() {} -func (s *nopTransporterWithActiveTime) Resume() {} -func (s *nopTransporterWithActiveTime) reset(am map[types.ID]time.Time) { s.activeMap = am } - -func TestPanicAlternativeStringer(t *testing.T) { - p := panicAlternativeStringer{alternative: func() string { return "alternative" }} - - p.stringer = testStringerFunc(func() string { panic("here") }) - if s := p.String(); s != "alternative" { - t.Fatalf("expected 'alternative', got %q", s) - } - - p.stringer = testStringerFunc(func() string { return "test" }) - if s := p.String(); s != "test" { - t.Fatalf("expected 'test', got %q", s) - } -} - -type testStringerFunc func() string - -func (s testStringerFunc) String() string { - return s() -} diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go deleted file mode 100644 index 4f1cd6b13ee..00000000000 --- a/server/etcdserver/v3_server.go +++ /dev/null @@ -1,974 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "strconv" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/server/v3/etcdserver/txn" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/lease/leasehttp" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/raft/v3" - - "github.com/gogo/protobuf/proto" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" -) - -const ( - // In the health case, there might be a small gap (10s of entries) between - // the applied index and committed index. - // However, if the committed entries are very heavy to toApply, the gap might grow. - // We should stop accepting new proposals if the gap growing to a certain point. - maxGapBetweenApplyAndCommitIndex = 5000 - traceThreshold = 100 * time.Millisecond - readIndexRetryTime = 500 * time.Millisecond - - // The timeout for the node to catch up its applied index, and is used in - // lease related operations, such as LeaseRenew and LeaseTimeToLive. - applyTimeout = time.Second -) - -type RaftKV interface { - Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) - Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) - DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) - Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) -} - -type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and toApply it after committed. - LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and toApply it after committed. - LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error - // is returned. - LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) - - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) - - // LeaseLeases lists all leases. - LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) -} - -type Authenticator interface { - AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) - AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) - AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) - Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) - UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - trace := traceutil.New("range", - s.Logger(), - traceutil.Field{Key: "range_begin", Value: string(r.Key)}, - traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)}, - ) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - - var resp *pb.RangeResponse - var err error - defer func(start time.Time) { - txn.WarnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) - if resp != nil { - trace.AddField( - traceutil.Field{Key: "response_count", Value: len(resp.Kvs)}, - traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}, - ) - } - trace.LogIfLong(traceThreshold) - }(time.Now()) - - if !r.Serializable { - err = s.linearizableReadNotify(ctx) - trace.Step("agreement among raft nodes before linearized reading") - if err != nil { - return nil, err - } - } - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - - get := func() { resp, err = txn.Range(ctx, s.Logger(), s.KV(), nil, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - err = serr - return nil, err - } - return resp, err -} - -func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) - if err != nil { - return nil, err - } - return resp.(*pb.PutResponse), nil -} - -func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) - if err != nil { - return nil, err - } - return resp.(*pb.DeleteRangeResponse), nil -} - -func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if txn.IsTxnReadonly(r) { - trace := traceutil.New("transaction", - s.Logger(), - traceutil.Field{Key: "read_only", Value: true}, - ) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - if !txn.IsTxnSerializable(r) { - err := s.linearizableReadNotify(ctx) - trace.Step("agreement among raft nodes before linearized reading") - if err != nil { - return nil, err - } - } - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return txn.CheckTxnAuth(s.authStore, ai, r) - } - - defer func(start time.Time) { - txn.WarnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) - trace.LogIfLong(traceThreshold) - }(time.Now()) - - get := func() { - resp, _, err = txn.Txn(ctx, s.Logger(), r, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.KV(), s.lessor) - } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - - ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - return resp.(*pb.TxnResponse), nil -} - -func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - startTime := time.Now() - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) - trace := traceutil.TODO() - if result != nil && result.Trace != nil { - trace = result.Trace - defer func() { - trace.LogIfLong(traceThreshold) - }() - applyStart := result.Trace.GetStartTime() - result.Trace.SetStartTime(startTime) - trace.InsertStep(0, applyStart, "process raft request") - } - if r.Physical && result != nil && result.Physc != nil { - <-result.Physc - // The compaction is done deleting keys; the hash is now settled - // but the data is not necessarily committed. If there's a crash, - // the hash may revert to a hash prior to compaction completing - // if the compaction resumes. Force the finished compaction to - // commit so it won't resume following a crash. - // - // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock. - s.bemu.RLock() - s.be.ForceCommit() - s.bemu.RUnlock() - trace.Step("physically toApply compaction") - } - if err != nil { - return nil, err - } - if result.Err != nil { - return nil, result.Err - } - resp := result.Resp.(*pb.CompactionResponse) - if resp == nil { - resp = &pb.CompactionResponse{} - } - if resp.Header == nil { - resp.Header = &pb.ResponseHeader{} - } - resp.Header.Revision = s.kv.Rev() - trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) - return resp, nil -} - -func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - // no id given? choose one - for r.ID == int64(lease.NoLease) { - // only use positive int64 id's - r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) - } - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseGrantResponse), nil -} - -func (s *EtcdServer) waitAppliedIndex() error { - select { - case <-s.ApplyWait(): - case <-s.stopping: - return errors.ErrStopped - case <-time.After(applyTimeout): - return errors.ErrTimeoutWaitAppliedIndex - } - - return nil -} - -func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) - if err != nil { - return nil, err - } - return resp.(*pb.LeaseRevokeResponse), nil -} - -func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { - if s.isLeader() { - if err := s.waitAppliedIndex(); err != nil { - return 0, err - } - - ttl, err := s.lessor.Renew(id) - if err == nil { // already requested to primary lessor(leader) - return ttl, nil - } - if err != lease.ErrNotPrimary { - return -1, err - } - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // renewals don't go through raft; forward to leader manually - for cctx.Err() == nil { - leader, lerr := s.waitLeader(cctx) - if lerr != nil { - return -1, lerr - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeasePrefix - ttl, err := leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) - if err == nil || err == lease.ErrLeaseNotFound { - return ttl, err - } - } - // Throttle in case of e.g. connection problems. - time.Sleep(50 * time.Millisecond) - } - - if cctx.Err() == context.DeadlineExceeded { - return -1, errors.ErrTimeout - } - return -1, errors.ErrCanceled -} - -func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - if s.isLeader() { - if err := s.waitAppliedIndex(); err != nil { - return nil, err - } - // primary; timetolive directly from leader - le := s.lessor.Lookup(lease.LeaseID(r.ID)) - if le == nil { - return nil, lease.ErrLeaseNotFound - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} - if r.Keys { - ks := le.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.Keys = kbs - } - return resp, nil - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // forward to leader - for cctx.Err() == nil { - leader, err := s.waitLeader(cctx) - if err != nil { - return nil, err - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeaseInternalPrefix - resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) - if err == nil { - return resp.LeaseTimeToLiveResponse, nil - } - if err == lease.ErrLeaseNotFound { - return nil, err - } - } - } - - if cctx.Err() == context.DeadlineExceeded { - return nil, errors.ErrTimeout - } - return nil, errors.ErrCanceled -} - -func (s *EtcdServer) newHeader() *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(s.cluster.ID()), - MemberId: uint64(s.MemberId()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), - } -} - -// LeaseLeases is really ListLeases !??? -func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { - ls := s.lessor.Leases() - lss := make([]*pb.LeaseStatus, len(ls)) - for i := range ls { - lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)} - } - return &pb.LeaseLeasesResponse{Header: s.newHeader(), Leases: lss}, nil -} - -func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { - leader := s.cluster.Member(s.Leader()) - for leader == nil { - // wait an election - dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond - select { - case <-time.After(dur): - leader = s.cluster.Member(s.Leader()) - case <-s.stopping: - return nil, errors.ErrStopped - case <-ctx.Done(): - return nil, errors.ErrNoLeader - } - } - if len(leader.PeerURLs) == 0 { - return nil, errors.ErrNoLeader - } - return leader, nil -} - -func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AlarmResponse), nil -} - -func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthEnableResponse), nil -} - -func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthDisableResponse), nil -} - -func (s *EtcdServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthStatus: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthStatusResponse), nil -} - -func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - if err := s.linearizableReadNotify(ctx); err != nil { - return nil, err - } - - lg := s.Logger() - - var resp proto.Message - for { - checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) - if err != nil { - if err != auth.ErrAuthNotEnabled { - lg.Warn( - "invalid authentication was requested", - zap.String("user", r.Name), - zap.Error(err), - ) - } - return nil, err - } - - st, err := s.AuthStore().GenTokenPrefix() - if err != nil { - return nil, err - } - - // internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it. - // In addition, it will let a WAL entry not record password as a plain text. - internalReq := &pb.InternalAuthenticateRequest{ - Name: r.Name, - SimpleToken: st, - } - - resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) - if err != nil { - return nil, err - } - if checkedRevision == s.AuthStore().Revision() { - break - } - - lg.Info("revision when password checked became stale; retrying") - } - - return resp.(*pb.AuthenticateResponse), nil -} - -func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - if r.Options == nil || !r.Options.NoPassword { - hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost()) - if err != nil { - return nil, err - } - r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword) - r.Password = "" - } - - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserAddResponse), nil -} - -func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserDeleteResponse), nil -} - -func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - if r.Password != "" { - hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost()) - if err != nil { - return nil, err - } - r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword) - r.Password = "" - } - - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserChangePasswordResponse), nil -} - -func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGrantRoleResponse), nil -} - -func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserGetResponse), nil -} - -func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserListResponse), nil -} - -func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthUserRevokeRoleResponse), nil -} - -func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleAddResponse), nil -} - -func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGrantPermissionResponse), nil -} - -func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleGetResponse), nil -} - -func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleListResponse), nil -} - -func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleRevokePermissionResponse), nil -} - -func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) - if err != nil { - return nil, err - } - return resp.(*pb.AuthRoleDeleteResponse), nil -} - -func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - result, err := s.processInternalRaftRequestOnce(ctx, r) - if err != nil { - return nil, err - } - if result.Err != nil { - return nil, result.Err - } - if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.Trace != nil { - applyStart := result.Trace.GetStartTime() - // The trace object is created in toApply. Here reset the start time to trace - // the raft request time by the difference between the request start time - // and toApply start time - result.Trace.SetStartTime(startTime) - result.Trace.InsertStep(0, applyStart, "process raft request") - result.Trace.LogIfLong(traceThreshold) - } - return result.Resp, nil -} - -func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - return s.raftRequestOnce(ctx, r) -} - -// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. -func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { - trace := traceutil.Get(ctx) - ai, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return err - } - if ai == nil { - // chk expects non-nil AuthInfo; use empty credentials - ai = &auth.AuthInfo{} - } - if err = chk(ai); err != nil { - return err - } - trace.Step("get authentication metadata") - // fetch response for serialized request - get() - // check for stale token revision in case the auth store was updated while - // the request has been handled. - if ai.Revision != 0 && ai.Revision != s.authStore.Revision() { - return auth.ErrAuthOldRevision - } - return nil -} - -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.Result, error) { - ai := s.getAppliedIndex() - ci := s.getCommittedIndex() - if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, errors.ErrTooManyRequests - } - - r.Header = &pb.RequestHeader{ - ID: s.reqIDGen.Next(), - } - - // check authinfo if it is not InternalAuthenticateRequest - if r.Authenticate == nil { - authInfo, err := s.AuthInfoFromCtx(ctx) - if err != nil { - return nil, err - } - if authInfo != nil { - r.Header.Username = authInfo.Username - r.Header.AuthRevision = authInfo.Revision - } - } - - data, err := r.Marshal() - if err != nil { - return nil, err - } - - if len(data) > int(s.Cfg.MaxRequestBytes) { - return nil, errors.ErrRequestTooLarge - } - - id := r.ID - if id == 0 { - id = r.Header.ID - } - ch := s.w.Register(id) - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - start := time.Now() - err = s.r.Propose(cctx, data) - if err != nil { - proposalsFailed.Inc() - s.w.Trigger(id, nil) // GC wait - return nil, err - } - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - return x.(*apply2.Result), nil - case <-cctx.Done(): - proposalsFailed.Inc() - s.w.Trigger(id, nil) // GC wait - return nil, s.parseProposeCtxErr(cctx.Err(), start) - case <-s.done: - return nil, errors.ErrStopped - } -} - -// Watchable returns a watchable interface attached to the etcdserver. -func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } - -func (s *EtcdServer) linearizableReadLoop() { - for { - requestId := s.reqIDGen.Next() - leaderChangedNotifier := s.leaderChanged.Receive() - select { - case <-leaderChangedNotifier: - continue - case <-s.readwaitc: - case <-s.stopping: - return - } - - // as a single loop is can unlock multiple reads, it is not very useful - // to propagate the trace from Txn or Range. - trace := traceutil.New("linearizableReadLoop", s.Logger()) - - nextnr := newNotifier() - s.readMu.Lock() - nr := s.readNotifier - s.readNotifier = nextnr - s.readMu.Unlock() - - confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId) - if isStopped(err) { - return - } - if err != nil { - nr.notify(err) - continue - } - - trace.Step("read index received") - - trace.AddField(traceutil.Field{Key: "readStateIndex", Value: confirmedIndex}) - - appliedIndex := s.getAppliedIndex() - trace.AddField(traceutil.Field{Key: "appliedIndex", Value: strconv.FormatUint(appliedIndex, 10)}) - - if appliedIndex < confirmedIndex { - select { - case <-s.applyWait.Wait(confirmedIndex): - case <-s.stopping: - return - } - } - // unblock all l-reads requested at indices before confirmedIndex - nr.notify(nil) - trace.Step("applied index is now lower than readState.Index") - - trace.LogAllStepsIfLong(traceThreshold) - } -} - -func isStopped(err error) bool { - return err == raft.ErrStopped || err == errors.ErrStopped -} - -func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) { - err := s.sendReadIndex(requestId) - if err != nil { - return 0, err - } - - lg := s.Logger() - errorTimer := time.NewTimer(s.Cfg.ReqTimeout()) - defer errorTimer.Stop() - retryTimer := time.NewTimer(readIndexRetryTime) - defer retryTimer.Stop() - - firstCommitInTermNotifier := s.firstCommitInTerm.Receive() - - for { - select { - case rs := <-s.r.readStateC: - requestIdBytes := uint64ToBigEndianBytes(requestId) - gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes) - if !gotOwnResponse { - // a previous request might time out. now we should ignore the response of it and - // continue waiting for the response of the current requests. - responseId := uint64(0) - if len(rs.RequestCtx) == 8 { - responseId = binary.BigEndian.Uint64(rs.RequestCtx) - } - lg.Warn( - "ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader", - zap.Uint64("sent-request-id", requestId), - zap.Uint64("received-request-id", responseId), - ) - slowReadIndex.Inc() - continue - } - return rs.Index, nil - case <-leaderChangedNotifier: - readIndexFailed.Inc() - // return a retryable error. - return 0, errors.ErrLeaderChanged - case <-firstCommitInTermNotifier: - firstCommitInTermNotifier = s.firstCommitInTerm.Receive() - lg.Info("first commit in current term: resending ReadIndex request") - err := s.sendReadIndex(requestId) - if err != nil { - return 0, err - } - retryTimer.Reset(readIndexRetryTime) - continue - case <-retryTimer.C: - lg.Warn( - "waiting for ReadIndex response took too long, retrying", - zap.Uint64("sent-request-id", requestId), - zap.Duration("retry-timeout", readIndexRetryTime), - ) - err := s.sendReadIndex(requestId) - if err != nil { - return 0, err - } - retryTimer.Reset(readIndexRetryTime) - continue - case <-errorTimer.C: - lg.Warn( - "timed out waiting for read index response (local node might have slow network)", - zap.Duration("timeout", s.Cfg.ReqTimeout()), - ) - slowReadIndex.Inc() - return 0, errors.ErrTimeout - case <-s.stopping: - return 0, errors.ErrStopped - } - } -} - -func uint64ToBigEndianBytes(number uint64) []byte { - byteResult := make([]byte, 8) - binary.BigEndian.PutUint64(byteResult, number) - return byteResult -} - -func (s *EtcdServer) sendReadIndex(requestIndex uint64) error { - ctxToSend := uint64ToBigEndianBytes(requestIndex) - - cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - err := s.r.ReadIndex(cctx, ctxToSend) - cancel() - if err == raft.ErrStopped { - return err - } - if err != nil { - lg := s.Logger() - lg.Warn("failed to get read index from Raft", zap.Error(err)) - readIndexFailed.Inc() - return err - } - return nil -} - -func (s *EtcdServer) LinearizableReadNotify(ctx context.Context) error { - return s.linearizableReadNotify(ctx) -} - -func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { - s.readMu.RLock() - nc := s.readNotifier - s.readMu.RUnlock() - - // signal linearizable loop for current notify if it hasn't been already - select { - case s.readwaitc <- struct{}{}: - default: - } - - // wait for read state notification - select { - case <-nc.c: - return nc.err - case <-ctx.Done(): - return ctx.Err() - case <-s.done: - return errors.ErrStopped - } -} - -func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) - if authInfo != nil || err != nil { - return authInfo, err - } - if !s.Cfg.ClientCertAuthEnabled { - return nil, nil - } - authInfo = s.AuthStore().AuthInfoFromTLS(ctx) - return authInfo, nil -} - -func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - switch r.Action { - case pb.DowngradeRequest_VALIDATE: - return s.downgradeValidate(ctx, r.Version) - case pb.DowngradeRequest_ENABLE: - return s.downgradeEnable(ctx, r) - case pb.DowngradeRequest_CANCEL: - return s.downgradeCancel(ctx) - default: - return nil, errors.ErrUnknownMethod - } -} - -func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.DowngradeResponse, error) { - resp := &pb.DowngradeResponse{} - - targetVersion, err := convertToClusterVersion(v) - if err != nil { - return nil, err - } - - cv := s.ClusterVersion() - if cv == nil { - return nil, errors.ErrClusterVersionUnavailable - } - resp.Version = version.Cluster(cv.String()) - err = s.Version().DowngradeValidate(ctx, targetVersion) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - lg := s.Logger() - targetVersion, err := convertToClusterVersion(r.Version) - if err != nil { - lg.Warn("reject downgrade request", zap.Error(err)) - return nil, err - } - err = s.Version().DowngradeEnable(ctx, targetVersion) - if err != nil { - lg.Warn("reject downgrade request", zap.Error(err)) - return nil, err - } - resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} - return &resp, nil -} - -func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) { - err := s.Version().DowngradeCancel(ctx) - if err != nil { - s.lg.Warn("failed to cancel downgrade", zap.Error(err)) - } - resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} - return &resp, nil -} diff --git a/server/etcdserver/version/doc.go b/server/etcdserver/version/doc.go deleted file mode 100644 index c34f9051195..00000000000 --- a/server/etcdserver/version/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version provides functions for getting/saving storage version. -package version diff --git a/server/etcdserver/version/downgrade.go b/server/etcdserver/version/downgrade.go deleted file mode 100644 index efad367058b..00000000000 --- a/server/etcdserver/version/downgrade.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/version" -) - -type DowngradeInfo struct { - // TargetVersion is the target downgrade version, if the cluster is not under downgrading, - // the targetVersion will be an empty string - TargetVersion string `json:"target-version"` - // Enabled indicates whether the cluster is enabled to downgrade - Enabled bool `json:"enabled"` -} - -func (d *DowngradeInfo) GetTargetVersion() *semver.Version { - return semver.Must(semver.NewVersion(d.TargetVersion)) -} - -// isValidDowngrade verifies whether the cluster can be downgraded from verFrom to verTo -func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool { - return verTo.Equal(*allowedDowngradeVersion(verFrom)) -} - -// MustDetectDowngrade will detect local server joining cluster that doesn't support it's version. -func MustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) { - // only keep major.minor version for comparison against cluster version - sv = &semver.Version{Major: sv.Major, Minor: sv.Minor} - - // if the cluster disables downgrade, check local version against determined cluster version. - // the validation passes when local version is not less than cluster version - if cv != nil && sv.LessThan(*cv) { - lg.Panic( - "invalid downgrade; server version is lower than determined cluster version", - zap.String("current-server-version", sv.String()), - zap.String("determined-cluster-version", version.Cluster(cv.String())), - ) - } -} - -func allowedDowngradeVersion(ver *semver.Version) *semver.Version { - // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x) - return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1} -} - -// IsValidVersionChange checks the two scenario when version is valid to change: -// 1. Downgrade: cluster version is 1 minor version higher than local version, -// cluster version should change. -// 2. Cluster start: when not all members version are available, cluster version -// is set to MinVersion(3.0), when all members are at higher version, cluster version -// is lower than local version, cluster version should change -func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool { - cv = &semver.Version{Major: cv.Major, Minor: cv.Minor} - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - - if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) { - return true - } - return false -} diff --git a/server/etcdserver/version/downgrade_test.go b/server/etcdserver/version/downgrade_test.go deleted file mode 100644 index 95dfcba37c2..00000000000 --- a/server/etcdserver/version/downgrade_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "fmt" - "testing" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" -) - -func TestMustDetectDowngrade(t *testing.T) { - lv := semver.Must(semver.NewVersion(version.Version)) - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - oneMinorHigher := &semver.Version{Major: lv.Major, Minor: lv.Minor + 1} - oneMinorLower := &semver.Version{Major: lv.Major, Minor: lv.Minor - 1} - - tests := []struct { - name string - clusterVersion *semver.Version - success bool - message string - }{ - { - "Succeeded when cluster version is nil", - nil, - true, - "", - }, - { - "Succeeded when cluster version is one minor lower", - oneMinorLower, - true, - "", - }, - { - "Succeeded when cluster version is server version", - lv, - true, - "", - }, - { - "Failed when server version is lower than determined cluster version ", - oneMinorHigher, - false, - "invalid downgrade; server version is lower than determined cluster version", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - sv := semver.Must(semver.NewVersion(version.Version)) - err := tryMustDetectDowngrade(lg, sv, tt.clusterVersion) - - if tt.success != (err == nil) { - t.Errorf("Unexpected success, got: %v, wanted: %v", err == nil, tt.success) - // TODO test err - } - if err != nil && tt.message != fmt.Sprintf("%s", err) { - t.Errorf("Unexpected message, got %q, wanted: %v", err, tt.message) - } - }) - } -} - -func tryMustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) (err interface{}) { - defer func() { - err = recover() - }() - MustDetectDowngrade(lg, sv, cv) - return err -} - -func TestIsValidDowngrade(t *testing.T) { - tests := []struct { - name string - verFrom string - verTo string - result bool - }{ - { - "Valid downgrade", - "3.5.0", - "3.4.0", - true, - }, - { - "Invalid downgrade", - "3.5.2", - "3.3.0", - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res := isValidDowngrade( - semver.Must(semver.NewVersion(tt.verFrom)), semver.Must(semver.NewVersion(tt.verTo))) - if res != tt.result { - t.Errorf("Expected downgrade valid is %v; Got %v", tt.result, res) - } - }) - } -} - -func TestIsVersionChangable(t *testing.T) { - v0 := semver.Must(semver.NewVersion("2.4.0")) - v1 := semver.Must(semver.NewVersion("3.4.0")) - v2 := semver.Must(semver.NewVersion("3.5.0")) - v3 := semver.Must(semver.NewVersion("3.5.1")) - v4 := semver.Must(semver.NewVersion("3.6.0")) - - tests := []struct { - name string - currentVersion *semver.Version - localVersion *semver.Version - expectedResult bool - }{ - { - name: "When local version is one minor lower than cluster version", - currentVersion: v2, - localVersion: v1, - expectedResult: true, - }, - { - name: "When local version is one minor and one patch lower than cluster version", - currentVersion: v3, - localVersion: v1, - expectedResult: true, - }, - { - name: "When local version is one minor higher than cluster version", - currentVersion: v1, - localVersion: v2, - expectedResult: true, - }, - { - name: "When local version is two minor higher than cluster version", - currentVersion: v1, - localVersion: v4, - expectedResult: true, - }, - { - name: "When local version is one major higher than cluster version", - currentVersion: v0, - localVersion: v1, - expectedResult: false, - }, - { - name: "When local version is equal to cluster version", - currentVersion: v1, - localVersion: v1, - expectedResult: false, - }, - { - name: "When local version is one patch higher than cluster version", - currentVersion: v2, - localVersion: v3, - expectedResult: false, - }, - { - name: "When local version is two minor lower than cluster version", - currentVersion: v4, - localVersion: v1, - expectedResult: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if ret := IsValidVersionChange(tt.currentVersion, tt.localVersion); ret != tt.expectedResult { - t.Errorf("Expected %v; Got %v", tt.expectedResult, ret) - } - }) - } -} diff --git a/server/etcdserver/version/errors.go b/server/etcdserver/version/errors.go deleted file mode 100644 index 906aa9f413f..00000000000 --- a/server/etcdserver/version/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import "errors" - -var ( - ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") - ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") - ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") -) diff --git a/server/etcdserver/version/monitor.go b/server/etcdserver/version/monitor.go deleted file mode 100644 index 7ecc19709e7..00000000000 --- a/server/etcdserver/version/monitor.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "context" - "errors" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/version" -) - -// Monitor contains logic used by cluster leader to monitor version changes and decide on cluster version or downgrade progress. -type Monitor struct { - lg *zap.Logger - s Server -} - -// Server lists EtcdServer methods needed by Monitor -type Server interface { - GetClusterVersion() *semver.Version - GetDowngradeInfo() *DowngradeInfo - GetMembersVersions() map[string]*version.Versions - UpdateClusterVersion(string) - LinearizableReadNotify(ctx context.Context) error - DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error - DowngradeCancel(ctx context.Context) error - - GetStorageVersion() *semver.Version - UpdateStorageVersion(semver.Version) error -} - -func NewMonitor(lg *zap.Logger, storage Server) *Monitor { - return &Monitor{ - lg: lg, - s: storage, - } -} - -// UpdateClusterVersionIfNeeded updates the cluster version. -func (m *Monitor) UpdateClusterVersionIfNeeded() error { - newClusterVersion, err := m.decideClusterVersion() - if newClusterVersion != nil { - newClusterVersion = &semver.Version{Major: newClusterVersion.Major, Minor: newClusterVersion.Minor} - m.s.UpdateClusterVersion(newClusterVersion.String()) - } - return err -} - -// decideClusterVersion decides whether to change cluster version and its next value. -// New cluster version is based on the members versions server and whether cluster is downgrading. -// Returns nil if cluster version should be left unchanged. -func (m *Monitor) decideClusterVersion() (*semver.Version, error) { - clusterVersion := m.s.GetClusterVersion() - minimalServerVersion := m.membersMinimalServerVersion() - if clusterVersion == nil { - if minimalServerVersion != nil { - return minimalServerVersion, nil - } - return semver.New(version.MinClusterVersion), nil - } - if minimalServerVersion == nil { - return nil, nil - } - downgrade := m.s.GetDowngradeInfo() - if downgrade != nil && downgrade.Enabled { - if downgrade.GetTargetVersion().Equal(*clusterVersion) { - return nil, nil - } - if !isValidDowngrade(clusterVersion, downgrade.GetTargetVersion()) { - m.lg.Error("Cannot downgrade from cluster-version to downgrade-target", - zap.String("downgrade-target", downgrade.TargetVersion), - zap.String("cluster-version", clusterVersion.String()), - ) - return nil, errors.New("invalid downgrade target") - } - if !isValidDowngrade(minimalServerVersion, downgrade.GetTargetVersion()) { - m.lg.Error("Cannot downgrade from minimal-server-version to downgrade-target", - zap.String("downgrade-target", downgrade.TargetVersion), - zap.String("minimal-server-version", minimalServerVersion.String()), - ) - return nil, errors.New("invalid downgrade target") - } - return downgrade.GetTargetVersion(), nil - } - if clusterVersion.LessThan(*minimalServerVersion) && IsValidVersionChange(clusterVersion, minimalServerVersion) { - return minimalServerVersion, nil - } - return nil, nil -} - -// UpdateStorageVersionIfNeeded updates the storage version if it differs from cluster version. -func (m *Monitor) UpdateStorageVersionIfNeeded() { - cv := m.s.GetClusterVersion() - if cv == nil { - return - } - sv := m.s.GetStorageVersion() - - if sv == nil || sv.Major != cv.Major || sv.Minor != cv.Minor { - if sv != nil { - m.lg.Info("cluster version differs from storage version.", zap.String("cluster-version", cv.String()), zap.String("storage-version", sv.String())) - } - err := m.s.UpdateStorageVersion(semver.Version{Major: cv.Major, Minor: cv.Minor}) - if err != nil { - m.lg.Error("failed to update storage version", zap.String("cluster-version", cv.String()), zap.Error(err)) - return - } - d := m.s.GetDowngradeInfo() - if d != nil && d.Enabled { - m.lg.Info( - "The server is ready to downgrade", - zap.String("target-version", d.TargetVersion), - zap.String("server-version", version.Version), - ) - } - } -} - -func (m *Monitor) CancelDowngradeIfNeeded() { - d := m.s.GetDowngradeInfo() - if d == nil || !d.Enabled { - return - } - - targetVersion := d.TargetVersion - v := semver.Must(semver.NewVersion(targetVersion)) - if m.versionsMatchTarget(v) { - m.lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) - err := m.s.DowngradeCancel(context.Background()) - if err != nil { - m.lg.Warn("failed to cancel downgrade", zap.Error(err)) - } - } -} - -// membersMinimalServerVersion returns the min server version in the map, or nil if the min -// version in unknown. -// It prints out log if there is a member with a higher version than the -// local version. -func (m *Monitor) membersMinimalServerVersion() *semver.Version { - vers := m.s.GetMembersVersions() - var minV *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - m.lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return nil - } - if lv.LessThan(*v) { - m.lg.Warn( - "leader found higher-versioned member", - zap.String("local-member-version", lv.String()), - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - ) - } - if minV == nil { - minV = v - } else if v.LessThan(*minV) { - minV = v - } - } - return minV -} - -// versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false. -// It can be used to decide the whether the cluster finishes downgrading to target version. -func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool { - vers := m.s.GetMembersVersions() - targetVersion = &semver.Version{Major: targetVersion.Major, Minor: targetVersion.Minor} - for mid, ver := range vers { - if ver == nil { - return false - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - m.lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return false - } - v = &semver.Version{Major: v.Major, Minor: v.Minor} - if !targetVersion.Equal(*v) { - m.lg.Warn("remotes server has mismatching etcd version", - zap.String("remote-member-id", mid), - zap.String("current-server-version", v.String()), - zap.String("target-version", targetVersion.String()), - ) - return false - } - } - return true -} diff --git a/server/etcdserver/version/monitor_test.go b/server/etcdserver/version/monitor_test.go deleted file mode 100644 index b4d147f354f..00000000000 --- a/server/etcdserver/version/monitor_test.go +++ /dev/null @@ -1,454 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" -) - -func TestMemberMinimalVersion(t *testing.T) { - tests := []struct { - memberVersions map[string]*version.Versions - wantVersion *semver.Version - }{ - { - map[string]*version.Versions{"a": {Server: "2.0.0"}}, - semver.Must(semver.NewVersion("2.0.0")), - }, - // unknown - { - map[string]*version.Versions{"a": nil}, - nil, - }, - { - map[string]*version.Versions{"a": {Server: "2.0.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}}, - semver.Must(semver.NewVersion("2.0.0")), - }, - { - map[string]*version.Versions{"a": {Server: "2.1.0"}, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}}, - semver.Must(semver.NewVersion("2.1.0")), - }, - { - map[string]*version.Versions{"a": nil, "b": {Server: "2.1.0"}, "c": {Server: "2.1.0"}}, - nil, - }, - } - - for i, tt := range tests { - monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{ - memberVersions: tt.memberVersions, - }) - minV := monitor.membersMinimalServerVersion() - if !reflect.DeepEqual(minV, tt.wantVersion) { - t.Errorf("#%d: ver = %+v, want %+v", i, minV, tt.wantVersion) - } - } -} - -func TestDecideStorageVersion(t *testing.T) { - tests := []struct { - name string - clusterVersion *semver.Version - storageVersion *semver.Version - expectStorageVersion *semver.Version - }{ - { - name: "No action if cluster version is nil", - }, - { - name: "Should set storage version if cluster version is set", - clusterVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "No action if storage version was already set", - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "No action if storage version equals cluster version", - clusterVersion: &version.V3_5, - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "Should set storage version to cluster version", - clusterVersion: &version.V3_6, - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_6, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &storageMock{ - clusterVersion: tt.clusterVersion, - storageVersion: tt.storageVersion, - } - monitor := NewMonitor(zaptest.NewLogger(t), s) - monitor.UpdateStorageVersionIfNeeded() - if !reflect.DeepEqual(s.storageVersion, tt.expectStorageVersion) { - t.Errorf("Unexpected storage version value, got = %+v, want %+v", s.storageVersion, tt.expectStorageVersion) - } - }) - } -} - -func TestVersionMatchTarget(t *testing.T) { - tests := []struct { - name string - targetVersion *semver.Version - versionMap map[string]*version.Versions - expectedFinished bool - }{ - { - "When downgrade finished", - &semver.Version{Major: 3, Minor: 4}, - map[string]*version.Versions{ - "mem1": {Server: "3.4.1", Cluster: "3.4.0"}, - "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"}, - "mem3": {Server: "3.4.2", Cluster: "3.4.0"}, - }, - true, - }, - { - "When cannot parse peer version", - &semver.Version{Major: 3, Minor: 4}, - map[string]*version.Versions{ - "mem1": {Server: "3.4", Cluster: "3.4.0"}, - "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"}, - "mem3": {Server: "3.4.2", Cluster: "3.4.0"}, - }, - false, - }, - { - "When downgrade not finished", - &semver.Version{Major: 3, Minor: 4}, - map[string]*version.Versions{ - "mem1": {Server: "3.4.1", Cluster: "3.4.0"}, - "mem2": {Server: "3.4.2-pre", Cluster: "3.4.0"}, - "mem3": {Server: "3.5.2", Cluster: "3.5.0"}, - }, - false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - monitor := NewMonitor(zaptest.NewLogger(t), &storageMock{ - memberVersions: tt.versionMap, - }) - actual := monitor.versionsMatchTarget(tt.targetVersion) - if actual != tt.expectedFinished { - t.Errorf("expected downgrade finished is %v; got %v", tt.expectedFinished, actual) - } - }) - } -} - -func TestUpdateClusterVersionIfNeeded(t *testing.T) { - tests := []struct { - name string - clusterVersion *semver.Version - memberVersions map[string]*version.Versions - downgrade *DowngradeInfo - expectClusterVersion *semver.Version - expectError error - }{ - { - name: "Default to 3.0 if there are no members", - expectClusterVersion: &version.V3_0, - }, - { - name: "Should pick lowest server version from members", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.6.0"}, - "b": {Server: "3.5.0"}, - }, - expectClusterVersion: &version.V3_5, - }, - { - name: "Should support not full releases", - memberVersions: map[string]*version.Versions{ - "b": {Server: "3.5.0-alpha.0"}, - }, - expectClusterVersion: &version.V3_5, - }, - { - name: "Sets minimal version when member has broken version", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.6.0"}, - "b": {Server: "yyyy"}, - }, - expectClusterVersion: &version.V3_0, - }, - { - name: "Should not downgrade cluster version without explicit downgrade request", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.5.0"}, - "b": {Server: "3.6.0"}, - }, - clusterVersion: &version.V3_6, - expectClusterVersion: &version.V3_6, - }, - { - name: "Should not upgrade cluster version if there is still member old member", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.5.0"}, - "b": {Server: "3.6.0"}, - }, - clusterVersion: &version.V3_5, - expectClusterVersion: &version.V3_5, - }, - { - name: "Should upgrade cluster version if all members have upgraded (have higher server version)", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.6.0"}, - "b": {Server: "3.6.0"}, - }, - clusterVersion: &version.V3_5, - expectClusterVersion: &version.V3_6, - }, - { - name: "Should downgrade cluster version if downgrade is set to allow older members to join", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.6.0"}, - "b": {Server: "3.6.0"}, - }, - clusterVersion: &version.V3_6, - downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - expectClusterVersion: &version.V3_5, - }, - { - name: "Don't downgrade below supported range", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.6.0"}, - "b": {Server: "3.6.0"}, - }, - clusterVersion: &version.V3_5, - downgrade: &DowngradeInfo{TargetVersion: "3.4.0", Enabled: true}, - expectClusterVersion: &version.V3_5, - expectError: fmt.Errorf("invalid downgrade target"), - }, - { - name: "Don't downgrade above cluster version", - memberVersions: map[string]*version.Versions{ - "a": {Server: "3.5.0"}, - "b": {Server: "3.5.0"}, - }, - clusterVersion: &version.V3_5, - downgrade: &DowngradeInfo{TargetVersion: "3.6.0", Enabled: true}, - expectClusterVersion: &version.V3_5, - expectError: fmt.Errorf("invalid downgrade target"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &storageMock{ - clusterVersion: tt.clusterVersion, - memberVersions: tt.memberVersions, - downgradeInfo: tt.downgrade, - } - monitor := NewMonitor(zaptest.NewLogger(t), s) - - err := monitor.UpdateClusterVersionIfNeeded() - assert.Equal(t, tt.expectClusterVersion, s.clusterVersion) - assert.Equal(t, tt.expectError, err) - - // Ensure results are stable - newVersion, err := monitor.decideClusterVersion() - assert.Nil(t, newVersion) - assert.Equal(t, tt.expectError, err) - }) - } -} - -func TestCancelDowngradeIfNeeded(t *testing.T) { - tests := []struct { - name string - memberVersions map[string]*version.Versions - downgrade *DowngradeInfo - expectDowngrade *DowngradeInfo - }{ - { - name: "No action if there no downgrade in progress", - }, - { - name: "Cancel downgrade if there are no members", - downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - expectDowngrade: nil, - }, - // Next entries go through all states that should happen during downgrade - { - name: "No action if downgrade was not started", - memberVersions: map[string]*version.Versions{ - "a": {Cluster: "3.6.0", Server: "3.6.1"}, - "b": {Cluster: "3.6.0", Server: "3.6.2"}, - }, - }, - { - name: "Continue downgrade if just started", - memberVersions: map[string]*version.Versions{ - "a": {Cluster: "3.5.0", Server: "3.6.1"}, - "b": {Cluster: "3.5.0", Server: "3.6.2"}, - }, - downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - }, - { - name: "Continue downgrade if there is at least one member with not matching", - memberVersions: map[string]*version.Versions{ - "a": {Cluster: "3.5.0", Server: "3.5.1"}, - "b": {Cluster: "3.5.0", Server: "3.6.2"}, - }, - downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - expectDowngrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - }, - { - name: "Cancel downgrade if all members have downgraded", - memberVersions: map[string]*version.Versions{ - "a": {Cluster: "3.5.0", Server: "3.5.1"}, - "b": {Cluster: "3.5.0", Server: "3.5.2"}, - }, - downgrade: &DowngradeInfo{TargetVersion: "3.5.0", Enabled: true}, - expectDowngrade: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &storageMock{ - memberVersions: tt.memberVersions, - downgradeInfo: tt.downgrade, - } - monitor := NewMonitor(zaptest.NewLogger(t), s) - - // Run multiple times to ensure that results are stable - for i := 0; i < 3; i++ { - monitor.CancelDowngradeIfNeeded() - assert.Equal(t, tt.expectDowngrade, s.downgradeInfo) - } - }) - } -} - -func TestUpdateStorageVersionIfNeeded(t *testing.T) { - tests := []struct { - name string - clusterVersion *semver.Version - storageVersion *semver.Version - expectStorageVersion *semver.Version - }{ - { - name: "No action if cluster version is nil", - }, - { - name: "Should set storage version if cluster version is set", - clusterVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "No action if storage version was already set", - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "No action if storage version equals cluster version", - clusterVersion: &version.V3_5, - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_5, - }, - { - name: "Should set storage version to cluster version", - clusterVersion: &version.V3_6, - storageVersion: &version.V3_5, - expectStorageVersion: &version.V3_6, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := &storageMock{ - clusterVersion: tt.clusterVersion, - storageVersion: tt.storageVersion, - } - monitor := NewMonitor(zaptest.NewLogger(t), s) - - // Run multiple times to ensure that results are stable - for i := 0; i < 3; i++ { - monitor.UpdateStorageVersionIfNeeded() - assert.Equal(t, tt.expectStorageVersion, s.storageVersion) - } - }) - } -} - -type storageMock struct { - memberVersions map[string]*version.Versions - clusterVersion *semver.Version - storageVersion *semver.Version - downgradeInfo *DowngradeInfo - locked bool -} - -var _ Server = (*storageMock)(nil) - -func (s *storageMock) UpdateClusterVersion(version string) { - s.clusterVersion = semver.New(version) -} - -func (s *storageMock) LinearizableReadNotify(ctx context.Context) error { - return nil -} - -func (s *storageMock) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { - return nil -} - -func (s *storageMock) DowngradeCancel(ctx context.Context) error { - s.downgradeInfo = nil - return nil -} - -func (s *storageMock) GetClusterVersion() *semver.Version { - return s.clusterVersion -} - -func (s *storageMock) GetDowngradeInfo() *DowngradeInfo { - return s.downgradeInfo -} - -func (s *storageMock) GetMembersVersions() map[string]*version.Versions { - return s.memberVersions -} - -func (s *storageMock) GetStorageVersion() *semver.Version { - return s.storageVersion -} - -func (s *storageMock) UpdateStorageVersion(v semver.Version) error { - s.storageVersion = &v - return nil -} diff --git a/server/etcdserver/version/version.go b/server/etcdserver/version/version.go deleted file mode 100644 index 0a2f99a1faf..00000000000 --- a/server/etcdserver/version/version.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "context" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -// Manager contains logic to manage etcd cluster version downgrade process. -type Manager struct { - lg *zap.Logger - s Server -} - -// NewManager returns a new manager instance -func NewManager(lg *zap.Logger, s Server) *Manager { - return &Manager{ - lg: lg, - s: s, - } -} - -// DowngradeValidate validates if cluster is downloadable to provided target version and returns error if not. -func (m *Manager) DowngradeValidate(ctx context.Context, targetVersion *semver.Version) error { - // gets leaders commit index and wait for local store to finish applying that index - // to avoid using stale downgrade information - err := m.s.LinearizableReadNotify(ctx) - if err != nil { - return err - } - cv := m.s.GetClusterVersion() - allowedTargetVersion := allowedDowngradeVersion(cv) - if !targetVersion.Equal(*allowedTargetVersion) { - return ErrInvalidDowngradeTargetVersion - } - - downgradeInfo := m.s.GetDowngradeInfo() - if downgradeInfo != nil && downgradeInfo.Enabled { - // Todo: return the downgrade status along with the error msg - return ErrDowngradeInProcess - } - return nil -} - -// DowngradeEnable initiates etcd cluster version downgrade process. -func (m *Manager) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { - // validate downgrade capability before starting downgrade - err := m.DowngradeValidate(ctx, targetVersion) - if err != nil { - return err - } - return m.s.DowngradeEnable(ctx, targetVersion) -} - -// DowngradeCancel cancels ongoing downgrade process. -func (m *Manager) DowngradeCancel(ctx context.Context) error { - err := m.s.LinearizableReadNotify(ctx) - if err != nil { - return err - } - downgradeInfo := m.s.GetDowngradeInfo() - if !downgradeInfo.Enabled { - return ErrNoInflightDowngrade - } - return m.s.DowngradeCancel(ctx) -} diff --git a/server/etcdserver/version/version_test.go b/server/etcdserver/version/version_test.go deleted file mode 100644 index 7b929026034..00000000000 --- a/server/etcdserver/version/version_test.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version - -import ( - "context" - "fmt" - "math/rand" - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" -) - -func TestUpgradeSingleNode(t *testing.T) { - lg := zaptest.NewLogger(t) - c := newCluster(lg, 1, version.V3_6) - c.StepMonitors() - assert.Equal(t, newCluster(lg, 1, version.V3_6), c) - - c.ReplaceMemberBinary(0, version.V3_7) - c.StepMonitors() - c.StepMonitors() - - assert.Equal(t, newCluster(lg, 1, version.V3_7), c) -} - -func TestUpgradeThreeNodes(t *testing.T) { - lg := zaptest.NewLogger(t) - c := newCluster(lg, 3, version.V3_6) - c.StepMonitors() - assert.Equal(t, newCluster(lg, 3, version.V3_6), c) - - c.ReplaceMemberBinary(0, version.V3_7) - c.StepMonitors() - c.ReplaceMemberBinary(1, version.V3_7) - c.StepMonitors() - c.ReplaceMemberBinary(2, version.V3_7) - c.StepMonitors() - c.StepMonitors() - - assert.Equal(t, newCluster(lg, 3, version.V3_7), c) -} - -func TestDowngradeSingleNode(t *testing.T) { - lg := zaptest.NewLogger(t) - c := newCluster(lg, 1, version.V3_6) - c.StepMonitors() - assert.Equal(t, newCluster(lg, 1, version.V3_6), c) - - assert.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5)) - c.StepMonitors() - assert.Equal(t, version.V3_5, c.clusterVersion) - - c.ReplaceMemberBinary(0, version.V3_5) - c.StepMonitors() - - assert.Equal(t, newCluster(lg, 1, version.V3_5), c) -} - -func TestDowngradeThreeNode(t *testing.T) { - lg := zaptest.NewLogger(t) - c := newCluster(lg, 3, version.V3_6) - c.StepMonitors() - assert.Equal(t, newCluster(lg, 3, version.V3_6), c) - - assert.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5)) - c.StepMonitors() - assert.Equal(t, version.V3_5, c.clusterVersion) - - c.ReplaceMemberBinary(0, version.V3_5) - c.StepMonitors() - c.ReplaceMemberBinary(1, version.V3_5) - c.StepMonitors() - c.ReplaceMemberBinary(2, version.V3_5) - c.StepMonitors() - - assert.Equal(t, newCluster(lg, 3, version.V3_5), c) -} - -func TestNewerMemberCanReconnectDuringDowngrade(t *testing.T) { - lg := zaptest.NewLogger(t) - c := newCluster(lg, 3, version.V3_6) - c.StepMonitors() - assert.Equal(t, newCluster(lg, 3, version.V3_6), c) - - assert.NoError(t, c.Version().DowngradeEnable(context.Background(), &version.V3_5)) - c.StepMonitors() - assert.Equal(t, version.V3_5, c.clusterVersion) - - c.ReplaceMemberBinary(0, version.V3_5) - c.StepMonitors() - - c.MemberCrashes(2) - c.StepMonitors() - c.MemberReconnects(2) - c.StepMonitors() - - c.ReplaceMemberBinary(1, version.V3_5) - c.StepMonitors() - c.ReplaceMemberBinary(2, version.V3_5) - c.StepMonitors() - - assert.Equal(t, newCluster(lg, 3, version.V3_5), c) -} - -func newCluster(lg *zap.Logger, memberCount int, ver semver.Version) *clusterMock { - cluster := &clusterMock{ - lg: lg, - clusterVersion: ver, - members: make([]*memberMock, 0, memberCount), - } - majorMinVer := semver.Version{Major: ver.Major, Minor: ver.Minor} - for i := 0; i < memberCount; i++ { - m := &memberMock{ - isRunning: true, - cluster: cluster, - serverVersion: ver, - storageVersion: majorMinVer, - } - m.monitor = NewMonitor(lg.Named(fmt.Sprintf("m%d", i)), m) - cluster.members = append(cluster.members, m) - } - cluster.members[0].isLeader = true - return cluster -} - -func (c *clusterMock) StepMonitors() { - // Execute monitor functions in random order as it is not guaranteed - var fs []func() - for _, m := range c.members { - fs = append(fs, m.monitor.UpdateStorageVersionIfNeeded) - if m.isLeader { - fs = append(fs, m.monitor.CancelDowngradeIfNeeded, func() { m.monitor.UpdateClusterVersionIfNeeded() }) - } - } - rand.Shuffle(len(fs), func(i, j int) { - fs[i], fs[j] = fs[j], fs[i] - }) - for _, f := range fs { - f() - } -} - -type clusterMock struct { - lg *zap.Logger - clusterVersion semver.Version - downgradeInfo *DowngradeInfo - members []*memberMock -} - -func (c *clusterMock) Version() *Manager { - return NewManager(c.lg, c.members[0]) -} - -func (c *clusterMock) MembersVersions() map[string]*version.Versions { - result := map[string]*version.Versions{} - for i, m := range c.members { - if m.isRunning { - result[fmt.Sprintf("%d", i)] = &version.Versions{ - Server: m.serverVersion.String(), - Cluster: c.clusterVersion.String(), - } - } - } - return result -} - -func (c *clusterMock) ReplaceMemberBinary(mid int, newServerVersion semver.Version) { - MustDetectDowngrade(c.lg, &c.members[mid].serverVersion, &c.clusterVersion) - c.members[mid].serverVersion = newServerVersion -} - -func (c *clusterMock) MemberCrashes(mid int) { - c.members[mid].isRunning = false -} - -func (c *clusterMock) MemberReconnects(mid int) { - MustDetectDowngrade(c.lg, &c.members[mid].serverVersion, &c.clusterVersion) - c.members[mid].isRunning = true -} - -type memberMock struct { - cluster *clusterMock - - isRunning bool - isLeader bool - serverVersion semver.Version - storageVersion semver.Version - monitor *Monitor -} - -var _ Server = (*memberMock)(nil) - -func (m *memberMock) UpdateClusterVersion(version string) { - m.cluster.clusterVersion = *semver.New(version) -} - -func (m *memberMock) LinearizableReadNotify(ctx context.Context) error { - return nil -} - -func (m *memberMock) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { - m.cluster.downgradeInfo = &DowngradeInfo{ - TargetVersion: targetVersion.String(), - Enabled: true, - } - return nil -} - -func (m *memberMock) DowngradeCancel(context.Context) error { - m.cluster.downgradeInfo = nil - return nil -} - -func (m *memberMock) GetClusterVersion() *semver.Version { - return &m.cluster.clusterVersion -} - -func (m *memberMock) GetDowngradeInfo() *DowngradeInfo { - return m.cluster.downgradeInfo -} - -func (m *memberMock) GetMembersVersions() map[string]*version.Versions { - return m.cluster.MembersVersions() -} - -func (m *memberMock) GetStorageVersion() *semver.Version { - return &m.storageVersion -} - -func (m *memberMock) UpdateStorageVersion(v semver.Version) error { - m.storageVersion = v - return nil -} - -func (m *memberMock) TriggerSnapshot() { -} diff --git a/server/etcdserver/zap_raft_test.go b/server/etcdserver/zap_raft_test.go deleted file mode 100644 index 0952ac8c23a..00000000000 --- a/server/etcdserver/zap_raft_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "go.etcd.io/etcd/client/pkg/v3/logutil" -) - -func TestNewRaftLogger(t *testing.T) { - logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano())) - defer os.RemoveAll(logPath) - - lcfg := &zap.Config{ - Level: zap.NewAtomicLevelAt(zap.DebugLevel), - Development: false, - Sampling: &zap.SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: logutil.DefaultZapLoggerConfig.EncoderConfig, - OutputPaths: []string{logPath}, - ErrorOutputPaths: []string{logPath}, - } - gl, err := NewRaftLogger(lcfg) - if err != nil { - t.Fatal(err) - } - - gl.Info("etcd-logutil-1") - data, err := os.ReadFile(logPath) - if err != nil { - t.Fatal(err) - } - if !bytes.Contains(data, []byte("etcd-logutil-1")) { - t.Fatalf("can't find data in log %q", string(data)) - } - - gl.Warning("etcd-logutil-2") - data, err = os.ReadFile(logPath) - if err != nil { - t.Fatal(err) - } - if !bytes.Contains(data, []byte("etcd-logutil-2")) { - t.Fatalf("can't find data in log %q", string(data)) - } - if !bytes.Contains(data, []byte("zap_raft_test.go:")) { - t.Fatalf("unexpected caller; %q", string(data)) - } -} - -func TestNewRaftLoggerFromZapCore(t *testing.T) { - buf := bytes.NewBuffer(nil) - syncer := zapcore.AddSync(buf) - cr := zapcore.NewCore( - zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig), - syncer, - zap.NewAtomicLevelAt(zap.InfoLevel), - ) - - lg := NewRaftLoggerFromZapCore(cr, syncer) - lg.Info("TestNewRaftLoggerFromZapCore") - txt := buf.String() - if !strings.Contains(txt, "TestNewRaftLoggerFromZapCore") { - t.Fatalf("unexpected log %q", txt) - } -} diff --git a/server/go.mod b/server/go.mod deleted file mode 100644 index ca5f5b1df48..00000000000 --- a/server/go.mod +++ /dev/null @@ -1,91 +0,0 @@ -module go.etcd.io/etcd/server/v3 - -go 1.19 - -require ( - github.com/coreos/go-semver v0.3.1 - github.com/coreos/go-systemd/v22 v22.5.0 - github.com/dustin/go-humanize v1.0.1 - github.com/gogo/protobuf v1.3.2 - github.com/golang-jwt/jwt/v4 v4.4.3 - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/golang/protobuf v1.5.2 - github.com/google/btree v1.1.2 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/jonboulle/clockwork v0.3.0 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/client_model v0.3.0 - github.com/soheilhy/cmux v0.1.5 - github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.1 - github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 - go.etcd.io/bbolt v1.3.7 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 - go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 - go.opentelemetry.io/otel v1.11.2 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 - go.opentelemetry.io/otel/sdk v1.11.2 - go.uber.org/multierr v1.9.0 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/net v0.5.0 - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 - google.golang.org/grpc v1.51.0 - google.golang.org/protobuf v1.28.1 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 - sigs.k8s.io/yaml v1.3.0 -) - -require ( - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/gorilla/websocket v1.4.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect -) - -replace ( - go.etcd.io/etcd/api/v3 => ../api - go.etcd.io/etcd/client/pkg/v3 => ../client/pkg - go.etcd.io/etcd/client/v2 => ../client/v2 - go.etcd.io/etcd/client/v3 => ../client/v3 - go.etcd.io/etcd/pkg/v3 => ../pkg -) - -// Bad imports are sometimes causing attempts to pull that code. -// This makes the error more explicit. -replace go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY - -replace go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY diff --git a/server/go.sum b/server/go.sum deleted file mode 100644 index 99e3f27ebde..00000000000 --- a/server/go.sum +++ /dev/null @@ -1,651 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= -github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/server/lease/doc.go b/server/lease/doc.go deleted file mode 100644 index a74eaf76fc5..00000000000 --- a/server/lease/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package lease provides an interface and implementation for time-limited leases over arbitrary resources. -package lease diff --git a/server/lease/lease.go b/server/lease/lease.go deleted file mode 100644 index 308d5fe23ec..00000000000 --- a/server/lease/lease.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "math" - "sync" - "time" - - "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -type Lease struct { - ID LeaseID - ttl int64 // time to live of the lease in seconds - remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used - // expiryMu protects concurrent accesses to expiry - expiryMu sync.RWMutex - // expiry is time when lease should expire. no expiration when expiry.IsZero() is true - expiry time.Time - - // mu protects concurrent accesses to itemSet - mu sync.RWMutex - itemSet map[LeaseItem]struct{} - revokec chan struct{} -} - -func (l *Lease) expired() bool { - return l.Remaining() <= 0 -} - -func (l *Lease) persistTo(b backend.Backend) { - lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} - tx := b.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - schema.MustUnsafePutLease(tx, &lpb) -} - -// TTL returns the TTL of the Lease. -func (l *Lease) TTL() int64 { - return l.ttl -} - -// RemainingTTL returns the last checkpointed remaining TTL of the lease. -func (l *Lease) getRemainingTTL() int64 { - if l.remainingTTL > 0 { - return l.remainingTTL - } - return l.ttl -} - -// refresh refreshes the expiry of the lease. -func (l *Lease) refresh(extend time.Duration) { - newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second) - l.expiryMu.Lock() - defer l.expiryMu.Unlock() - l.expiry = newExpiry -} - -// forever sets the expiry of lease to be forever. -func (l *Lease) forever() { - l.expiryMu.Lock() - defer l.expiryMu.Unlock() - l.expiry = forever -} - -// Keys returns all the keys attached to the lease. -func (l *Lease) Keys() []string { - l.mu.RLock() - keys := make([]string, 0, len(l.itemSet)) - for k := range l.itemSet { - keys = append(keys, k.Key) - } - l.mu.RUnlock() - return keys -} - -// Remaining returns the remaining time of the lease. -func (l *Lease) Remaining() time.Duration { - l.expiryMu.RLock() - defer l.expiryMu.RUnlock() - if l.expiry.IsZero() { - return time.Duration(math.MaxInt64) - } - return time.Until(l.expiry) -} - -type LeaseItem struct { - Key string -} - -// leasesByExpiry implements the sort.Interface. -type leasesByExpiry []*Lease - -func (le leasesByExpiry) Len() int { return len(le) } -func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } -func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } diff --git a/server/lease/lease_queue.go b/server/lease/lease_queue.go deleted file mode 100644 index b5890ecbdda..00000000000 --- a/server/lease/lease_queue.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "container/heap" - "time" -) - -// LeaseWithTime contains lease object with a time. -// For the lessor's lease heap, time identifies the lease expiration time. -// For the lessor's lease checkpoint heap, the time identifies the next lease checkpoint time. -type LeaseWithTime struct { - id LeaseID - time time.Time - index int -} - -type LeaseQueue []*LeaseWithTime - -func (pq LeaseQueue) Len() int { return len(pq) } - -func (pq LeaseQueue) Less(i, j int) bool { - return pq[i].time.Before(pq[j].time) -} - -func (pq LeaseQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *LeaseQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*LeaseWithTime) - item.index = n - *pq = append(*pq, item) -} - -func (pq *LeaseQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -// LeaseExpiredNotifier is a queue used to notify lessor to revoke expired lease. -// Only save one item for a lease, `Register` will update time of the corresponding lease. -type LeaseExpiredNotifier struct { - m map[LeaseID]*LeaseWithTime - queue LeaseQueue -} - -func newLeaseExpiredNotifier() *LeaseExpiredNotifier { - return &LeaseExpiredNotifier{ - m: make(map[LeaseID]*LeaseWithTime), - queue: make(LeaseQueue, 0), - } -} - -func (mq *LeaseExpiredNotifier) Init() { - heap.Init(&mq.queue) - mq.m = make(map[LeaseID]*LeaseWithTime) - for _, item := range mq.queue { - mq.m[item.id] = item - } -} - -func (mq *LeaseExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) { - if old, ok := mq.m[item.id]; ok { - old.time = item.time - heap.Fix(&mq.queue, old.index) - } else { - heap.Push(&mq.queue, item) - mq.m[item.id] = item - } -} - -func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime { - item := heap.Pop(&mq.queue).(*LeaseWithTime) - delete(mq.m, item.id) - return item -} - -func (mq *LeaseExpiredNotifier) Peek() *LeaseWithTime { - if mq.Len() == 0 { - return nil - } - return mq.queue[0] -} - -func (mq *LeaseExpiredNotifier) Len() int { - return len(mq.m) -} diff --git a/server/lease/lease_queue_test.go b/server/lease/lease_queue_test.go deleted file mode 100644 index a25af1a4f8d..00000000000 --- a/server/lease/lease_queue_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "testing" - "time" -) - -func TestLeaseQueue(t *testing.T) { - expiredRetryInterval := 100 * time.Millisecond - le := &lessor{ - leaseExpiredNotifier: newLeaseExpiredNotifier(), - leaseMap: make(map[LeaseID]*Lease), - expiredLeaseRetryInterval: expiredRetryInterval, - } - le.leaseExpiredNotifier.Init() - - // insert in reverse order of expiration time - for i := 50; i >= 1; i-- { - now := time.Now() - exp := now.Add(time.Hour) - if i == 1 { - exp = now - } - le.leaseMap[LeaseID(i)] = &Lease{ID: LeaseID(i)} - le.leaseExpiredNotifier.RegisterOrUpdate(&LeaseWithTime{id: LeaseID(i), time: exp}) - } - - // first element is expired. - if le.leaseExpiredNotifier.Peek().id != LeaseID(1) { - t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Peek().id) - } - - existExpiredEvent := func() { - l, more := le.expireExists() - if l == nil { - t.Fatalf("expect expiry lease exists") - } - if l.ID != 1 { - t.Fatalf("first item expected lease ID %d, got %d", 1, l.ID) - } - if more { - t.Fatal("expect no more expiry lease") - } - - if le.leaseExpiredNotifier.Len() != 50 { - t.Fatalf("expected the expired lease to be pushed back to the heap, heap size got %d", le.leaseExpiredNotifier.Len()) - } - - if le.leaseExpiredNotifier.Peek().id != LeaseID(1) { - t.Fatalf("first item expected lease ID %d, got %d", LeaseID(1), le.leaseExpiredNotifier.Peek().id) - } - } - - noExpiredEvent := func() { - // re-acquire the expired item, nothing exists - l, more := le.expireExists() - if l != nil { - t.Fatal("expect no expiry lease exists") - } - if more { - t.Fatal("expect no more expiry lease") - } - } - - existExpiredEvent() // first acquire - noExpiredEvent() // second acquire - time.Sleep(expiredRetryInterval) - existExpiredEvent() // acquire after retry interval -} diff --git a/server/lease/leasehttp/doc.go b/server/lease/leasehttp/doc.go deleted file mode 100644 index 8177a37b663..00000000000 --- a/server/lease/leasehttp/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package leasehttp serves lease renewals made through HTTP requests. -package leasehttp diff --git a/server/lease/leasehttp/http.go b/server/lease/leasehttp/http.go deleted file mode 100644 index 542c3a82a0c..00000000000 --- a/server/lease/leasehttp/http.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasehttp - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/httputil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/lease/leasepb" -) - -var ( - LeasePrefix = "/leases" - LeaseInternalPrefix = "/leases/internal" - applyTimeout = time.Second - ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out") -) - -// NewHandler returns an http Handler for lease renewals -func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler { - return &leaseHandler{l, waitch} -} - -type leaseHandler struct { - l lease.Lessor - waitch func() <-chan struct{} -} - -func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - defer r.Body.Close() - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, "error reading body", http.StatusBadRequest) - return - } - - var v []byte - switch r.URL.Path { - case LeasePrefix: - lreq := pb.LeaseKeepAliveRequest{} - if uerr := lreq.Unmarshal(b); uerr != nil { - http.Error(w, "error unmarshalling request", http.StatusBadRequest) - return - } - select { - case <-h.waitch(): - case <-time.After(applyTimeout): - http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) - return - } - ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID)) - if rerr != nil { - if rerr == lease.ErrLeaseNotFound { - http.Error(w, rerr.Error(), http.StatusNotFound) - return - } - - http.Error(w, rerr.Error(), http.StatusBadRequest) - return - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl} - v, err = resp.Marshal() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - case LeaseInternalPrefix: - lreq := leasepb.LeaseInternalRequest{} - if lerr := lreq.Unmarshal(b); lerr != nil { - http.Error(w, "error unmarshalling request", http.StatusBadRequest) - return - } - select { - case <-h.waitch(): - case <-time.After(applyTimeout): - http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) - return - } - l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID)) - if l == nil { - http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound) - return - } - // TODO: fill out ResponseHeader - resp := &leasepb.LeaseInternalResponse{ - LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: lreq.LeaseTimeToLiveRequest.ID, - TTL: int64(l.Remaining().Seconds()), - GrantedTTL: l.TTL(), - }, - } - if lreq.LeaseTimeToLiveRequest.Keys { - ks := l.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.LeaseTimeToLiveResponse.Keys = kbs - } - - v, err = resp.Marshal() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - default: - http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest) - return - } - - w.Header().Set("Content-Type", "application/protobuf") - w.Write(v) -} - -// RenewHTTP renews a lease at a given primary server. -// TODO: Batch request in future? -func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) { - // will post lreq protobuf to leader - lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal() - if err != nil { - return -1, err - } - - cc := &http.Client{Transport: rt} - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) - if err != nil { - return -1, err - } - req.Header.Set("Content-Type", "application/protobuf") - req.Cancel = ctx.Done() - - resp, err := cc.Do(req) - if err != nil { - return -1, err - } - b, err := readResponse(resp) - if err != nil { - return -1, err - } - - if resp.StatusCode == http.StatusRequestTimeout { - return -1, ErrLeaseHTTPTimeout - } - - if resp.StatusCode == http.StatusNotFound { - return -1, lease.ErrLeaseNotFound - } - - if resp.StatusCode != http.StatusOK { - return -1, fmt.Errorf("lease: unknown error(%s)", string(b)) - } - - lresp := &pb.LeaseKeepAliveResponse{} - if err := lresp.Unmarshal(b); err != nil { - return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) - } - if lresp.ID != int64(id) { - return -1, fmt.Errorf("lease: renew id mismatch") - } - return lresp.TTL, nil -} - -// TimeToLiveHTTP retrieves lease information of the given lease ID. -func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) { - // will post lreq protobuf to leader - lreq, err := (&leasepb.LeaseInternalRequest{ - LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{ - ID: int64(id), - Keys: keys, - }, - }).Marshal() - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/protobuf") - - req = req.WithContext(ctx) - - cc := &http.Client{Transport: rt} - var b []byte - // buffer errc channel so that errc don't block inside the go routinue - resp, err := cc.Do(req) - if err != nil { - return nil, err - } - b, err = readResponse(resp) - if err != nil { - return nil, err - } - if resp.StatusCode == http.StatusRequestTimeout { - return nil, ErrLeaseHTTPTimeout - } - if resp.StatusCode == http.StatusNotFound { - return nil, lease.ErrLeaseNotFound - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) - } - - lresp := &leasepb.LeaseInternalResponse{} - if err := lresp.Unmarshal(b); err != nil { - return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) - } - if lresp.LeaseTimeToLiveResponse.ID != int64(id) { - return nil, fmt.Errorf("lease: TTL id mismatch") - } - return lresp, nil -} - -func readResponse(resp *http.Response) (b []byte, err error) { - b, err = io.ReadAll(resp.Body) - httputil.GracefulClose(resp) - return -} diff --git a/server/lease/leasehttp/http_test.go b/server/lease/leasehttp/http_test.go deleted file mode 100644 index 7fb284ff41f..00000000000 --- a/server/lease/leasehttp/http_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasehttp - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestRenewHTTP(t *testing.T) { - lg := zaptest.NewLogger(t) - be, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, be) - - le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)}) - le.Promote(time.Second) - l, err := le.Grant(1, int64(5)) - if err != nil { - t.Fatalf("failed to create lease: %v", err) - } - - ts := httptest.NewServer(NewHandler(le, waitReady)) - defer ts.Close() - - ttl, err := RenewHTTP(context.TODO(), l.ID, ts.URL+LeasePrefix, http.DefaultTransport) - if err != nil { - t.Fatal(err) - } - if ttl != 5 { - t.Fatalf("ttl expected 5, got %d", ttl) - } -} - -func TestTimeToLiveHTTP(t *testing.T) { - lg := zaptest.NewLogger(t) - be, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, be) - - le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)}) - le.Promote(time.Second) - l, err := le.Grant(1, int64(5)) - if err != nil { - t.Fatalf("failed to create lease: %v", err) - } - - ts := httptest.NewServer(NewHandler(le, waitReady)) - defer ts.Close() - - resp, err := TimeToLiveHTTP(context.TODO(), l.ID, true, ts.URL+LeaseInternalPrefix, http.DefaultTransport) - if err != nil { - t.Fatal(err) - } - if resp.LeaseTimeToLiveResponse.ID != 1 { - t.Fatalf("lease id expected 1, got %d", resp.LeaseTimeToLiveResponse.ID) - } - if resp.LeaseTimeToLiveResponse.GrantedTTL != 5 { - t.Fatalf("granted TTL expected 5, got %d", resp.LeaseTimeToLiveResponse.GrantedTTL) - } -} - -func TestRenewHTTPTimeout(t *testing.T) { - testApplyTimeout(t, func(l *lease.Lease, serverURL string) error { - _, err := RenewHTTP(context.TODO(), l.ID, serverURL+LeasePrefix, http.DefaultTransport) - return err - }) -} - -func TestTimeToLiveHTTPTimeout(t *testing.T) { - testApplyTimeout(t, func(l *lease.Lease, serverURL string) error { - _, err := TimeToLiveHTTP(context.TODO(), l.ID, true, serverURL+LeaseInternalPrefix, http.DefaultTransport) - return err - }) -} - -func testApplyTimeout(t *testing.T, f func(*lease.Lease, string) error) { - lg := zaptest.NewLogger(t) - be, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, be) - - le := lease.NewLessor(lg, be, nil, lease.LessorConfig{MinLeaseTTL: int64(5)}) - le.Promote(time.Second) - l, err := le.Grant(1, int64(5)) - if err != nil { - t.Fatalf("failed to create lease: %v", err) - } - - ts := httptest.NewServer(NewHandler(le, waitNotReady)) - defer ts.Close() - err = f(l, ts.URL) - if err == nil { - t.Fatalf("expected timeout error, got nil") - } - if err.Error() != ErrLeaseHTTPTimeout.Error() { - t.Fatalf("expected (%v), got (%v)", ErrLeaseHTTPTimeout.Error(), err.Error()) - } -} - -func waitReady() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch -} - -func waitNotReady() <-chan struct{} { - return nil -} diff --git a/server/lease/leasepb/lease.pb.go b/server/lease/leasepb/lease.pb.go deleted file mode 100644 index 8a1c54922fc..00000000000 --- a/server/lease/leasepb/lease.pb.go +++ /dev/null @@ -1,733 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: lease.proto - -package leasepb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Lease struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` - RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (m *Lease) String() string { return proto.CompactTextString(m) } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -type LeaseInternalRequest struct { - LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } -func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalRequest) ProtoMessage() {} -func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{1} -} -func (m *LeaseInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseInternalRequest.Merge(m, src) -} -func (m *LeaseInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseInternalRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseInternalRequest proto.InternalMessageInfo - -type LeaseInternalResponse struct { - LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } -func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalResponse) ProtoMessage() {} -func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{2} -} -func (m *LeaseInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseInternalResponse.Merge(m, src) -} -func (m *LeaseInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseInternalResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseInternalResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Lease)(nil), "leasepb.Lease") - proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest") - proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse") -} - -func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) } - -var fileDescriptor_3dd57e402472b33a = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, - 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, - 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45, - 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, 0x2f, - 0x2a, 0x48, 0x86, 0x28, 0x50, 0xf2, 0xe5, 0x62, 0xf5, 0x01, 0x99, 0x20, 0xc4, 0xc7, 0xc5, 0xe4, - 0xe9, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0xc4, 0xe4, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x1c, - 0x12, 0xe2, 0x23, 0xc1, 0x04, 0x16, 0x00, 0x31, 0x85, 0x94, 0xb8, 0x78, 0x82, 0x52, 0x73, 0x13, - 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x41, 0x52, 0xcc, 0x60, 0x29, 0x14, 0x31, 0xa5, 0x12, 0x2e, 0x11, - 0xb0, 0x71, 0x9e, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, - 0x25, 0x42, 0x31, 0x5c, 0x62, 0x60, 0xf1, 0x90, 0xcc, 0xdc, 0xd4, 0x90, 0x7c, 0x9f, 0xcc, 0xb2, - 0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84, - 0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2, - 0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97, - 0x29, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3, - 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e, - 0x5f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0x94, 0xb9, 0xae, 0x01, 0x00, 0x00, -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RemainingTTL != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.RemainingTTL)) - i-- - dAtA[i] = 0x18 - } - if m.TTL != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x10 - } - if m.ID != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.LeaseTimeToLiveRequest != nil { - { - size, err := m.LeaseTimeToLiveRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.LeaseTimeToLiveResponse != nil { - { - size, err := m.LeaseTimeToLiveResponse.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != 0 { - n += 1 + sovLease(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovLease(uint64(m.TTL)) - } - if m.RemainingTTL != 0 { - n += 1 + sovLease(uint64(m.RemainingTTL)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseInternalRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.LeaseTimeToLiveRequest != nil { - l = m.LeaseTimeToLiveRequest.Size() - n += 1 + l + sovLease(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LeaseInternalResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.LeaseTimeToLiveResponse != nil { - l = m.LeaseTimeToLiveResponse.Size() - n += 1 + l + sovLease(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingTTL", wireType) - } - m.RemainingTTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RemainingTTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseTimeToLiveRequest == nil { - m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{} - } - if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseTimeToLiveResponse == nil { - m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{} - } - if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLease - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLease - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/lease/lessor.go b/server/lease/lessor.go deleted file mode 100644 index 860de54f45f..00000000000 --- a/server/lease/lessor.go +++ /dev/null @@ -1,862 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "container/heap" - "context" - "errors" - "math" - "sort" - "sync" - "time" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -// NoLease is a special LeaseID representing the absence of a lease. -const NoLease = LeaseID(0) - -// MaxLeaseTTL is the maximum lease TTL value -const MaxLeaseTTL = 9000000000 - -var ( - forever = time.Time{} - - // maximum number of leases to revoke per second; configurable for tests - leaseRevokeRate = 1000 - - // maximum number of lease checkpoints recorded to the consensus log per second; configurable for tests - leaseCheckpointRate = 1000 - - // the default interval of lease checkpoint - defaultLeaseCheckpointInterval = 5 * time.Minute - - // maximum number of lease checkpoints to batch into a single consensus log entry - maxLeaseCheckpointBatchSize = 1000 - - // the default interval to check if the expired lease is revoked - defaultExpiredleaseRetryInterval = 3 * time.Second - - ErrNotPrimary = errors.New("not a primary lessor") - ErrLeaseNotFound = errors.New("lease not found") - ErrLeaseExists = errors.New("lease already exists") - ErrLeaseTTLTooLarge = errors.New("too large lease TTL") -) - -// TxnDelete is a TxnWrite that only permits deletes. Defined here -// to avoid circular dependency with mvcc. -type TxnDelete interface { - DeleteRange(key, end []byte) (n, rev int64) - End() -} - -// RangeDeleter is a TxnDelete constructor. -type RangeDeleter func() TxnDelete - -// Checkpointer permits checkpointing of lease remaining TTLs to the consensus log. Defined here to -// avoid circular dependency with mvcc. -type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest) - -type LeaseID int64 - -// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. -type Lessor interface { - // SetRangeDeleter lets the lessor create TxnDeletes to the store. - // Lessor deletes the items in the revoked or expired lease by creating - // new TxnDeletes. - SetRangeDeleter(rd RangeDeleter) - - SetCheckpointer(cp Checkpointer) - - // Grant grants a lease that expires at least after TTL seconds. - Grant(id LeaseID, ttl int64) (*Lease, error) - // Revoke revokes a lease with given ID. The item attached to the - // given lease will be removed. If the ID does not exist, an error - // will be returned. - Revoke(id LeaseID) error - - // Checkpoint applies the remainingTTL of a lease. The remainingTTL is used in Promote to set - // the expiry of leases to less than the full TTL when possible. - Checkpoint(id LeaseID, remainingTTL int64) error - - // Attach attaches given leaseItem to the lease with given LeaseID. - // If the lease does not exist, an error will be returned. - Attach(id LeaseID, items []LeaseItem) error - - // GetLease returns LeaseID for given item. - // If no lease found, NoLease value will be returned. - GetLease(item LeaseItem) LeaseID - - // Detach detaches given leaseItem from the lease with given LeaseID. - // If the lease does not exist, an error will be returned. - Detach(id LeaseID, items []LeaseItem) error - - // Promote promotes the lessor to be the primary lessor. Primary lessor manages - // the expiration and renew of leases. - // Newly promoted lessor renew the TTL of all lease to extend + previous TTL. - Promote(extend time.Duration) - - // Demote demotes the lessor from being the primary lessor. - Demote() - - // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist, - // an error will be returned. - Renew(id LeaseID) (int64, error) - - // Lookup gives the lease at a given lease id, if any - Lookup(id LeaseID) *Lease - - // Leases lists all leases. - Leases() []*Lease - - // ExpiredLeasesC returns a chan that is used to receive expired leases. - ExpiredLeasesC() <-chan []*Lease - - // Recover recovers the lessor state from the given backend and RangeDeleter. - Recover(b backend.Backend, rd RangeDeleter) - - // Stop stops the lessor for managing leases. The behavior of calling Stop multiple - // times is undefined. - Stop() -} - -// lessor implements Lessor interface. -// TODO: use clockwork for testability. -type lessor struct { - mu sync.RWMutex - - // demotec is set when the lessor is the primary. - // demotec will be closed if the lessor is demoted. - demotec chan struct{} - - leaseMap map[LeaseID]*Lease - leaseExpiredNotifier *LeaseExpiredNotifier - leaseCheckpointHeap LeaseQueue - itemMap map[LeaseItem]LeaseID - - // When a lease expires, the lessor will delete the - // leased range (or key) by the RangeDeleter. - rd RangeDeleter - - // When a lease's deadline should be persisted to preserve the remaining TTL across leader - // elections and restarts, the lessor will checkpoint the lease by the Checkpointer. - cp Checkpointer - - // backend to persist leases. We only persist lease ID and expiry for now. - // The leased items can be recovered by iterating all the keys in kv. - b backend.Backend - - // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any - // requests for shorter TTLs are extended to the minimum TTL. - minLeaseTTL int64 - - expiredC chan []*Lease - // stopC is a channel whose closure indicates that the lessor should be stopped. - stopC chan struct{} - // doneC is a channel whose closure indicates that the lessor is stopped. - doneC chan struct{} - - lg *zap.Logger - - // Wait duration between lease checkpoints. - checkpointInterval time.Duration - // the interval to check if the expired lease is revoked - expiredLeaseRetryInterval time.Duration - // whether lessor should always persist remaining TTL (always enabled in v3.6). - checkpointPersist bool - // cluster is used to adapt lessor logic based on cluster version - cluster cluster -} - -type cluster interface { - // Version is the cluster-wide minimum major.minor version. - Version() *semver.Version -} - -type LessorConfig struct { - MinLeaseTTL int64 - CheckpointInterval time.Duration - ExpiredLeasesRetryInterval time.Duration - CheckpointPersist bool -} - -func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor { - return newLessor(lg, b, cluster, cfg) -} - -func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor { - checkpointInterval := cfg.CheckpointInterval - expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval - if checkpointInterval == 0 { - checkpointInterval = defaultLeaseCheckpointInterval - } - if expiredLeaseRetryInterval == 0 { - expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval - } - l := &lessor{ - leaseMap: make(map[LeaseID]*Lease), - itemMap: make(map[LeaseItem]LeaseID), - leaseExpiredNotifier: newLeaseExpiredNotifier(), - leaseCheckpointHeap: make(LeaseQueue, 0), - b: b, - minLeaseTTL: cfg.MinLeaseTTL, - checkpointInterval: checkpointInterval, - expiredLeaseRetryInterval: expiredLeaseRetryInterval, - checkpointPersist: cfg.CheckpointPersist, - // expiredC is a small buffered chan to avoid unnecessary blocking. - expiredC: make(chan []*Lease, 16), - stopC: make(chan struct{}), - doneC: make(chan struct{}), - lg: lg, - cluster: cluster, - } - l.initAndRecover() - - go l.runLoop() - - return l -} - -// isPrimary indicates if this lessor is the primary lessor. The primary -// lessor manages lease expiration and renew. -// -// in etcd, raft leader is the primary. Thus there might be two primary -// leaders at the same time (raft allows concurrent leader but with different term) -// for at most a leader election timeout. -// The old primary leader cannot affect the correctness since its proposal has a -// smaller term and will not be committed. -// -// TODO: raft follower do not forward lease management proposals. There might be a -// very small window (within second normally which depends on go scheduling) that -// a raft follow is the primary between the raft leader demotion and lessor demotion. -// Usually this should not be a problem. Lease should not be that sensitive to timing. -func (le *lessor) isPrimary() bool { - return le.demotec != nil -} - -func (le *lessor) SetRangeDeleter(rd RangeDeleter) { - le.mu.Lock() - defer le.mu.Unlock() - - le.rd = rd -} - -func (le *lessor) SetCheckpointer(cp Checkpointer) { - le.mu.Lock() - defer le.mu.Unlock() - - le.cp = cp -} - -func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { - if id == NoLease { - return nil, ErrLeaseNotFound - } - - if ttl > MaxLeaseTTL { - return nil, ErrLeaseTTLTooLarge - } - - // TODO: when lessor is under high load, it should give out lease - // with longer TTL to reduce renew load. - l := &Lease{ - ID: id, - ttl: ttl, - itemSet: make(map[LeaseItem]struct{}), - revokec: make(chan struct{}), - } - - if l.ttl < le.minLeaseTTL { - l.ttl = le.minLeaseTTL - } - - le.mu.Lock() - defer le.mu.Unlock() - - if _, ok := le.leaseMap[id]; ok { - return nil, ErrLeaseExists - } - - if le.isPrimary() { - l.refresh(0) - } else { - l.forever() - } - - le.leaseMap[id] = l - l.persistTo(le.b) - - leaseTotalTTLs.Observe(float64(l.ttl)) - leaseGranted.Inc() - - if le.isPrimary() { - item := &LeaseWithTime{id: l.ID, time: l.expiry} - le.leaseExpiredNotifier.RegisterOrUpdate(item) - le.scheduleCheckpointIfNeeded(l) - } - - return l, nil -} - -func (le *lessor) Revoke(id LeaseID) error { - le.mu.Lock() - - l := le.leaseMap[id] - if l == nil { - le.mu.Unlock() - return ErrLeaseNotFound - } - - // We shouldn't delete the lease inside the transaction lock, otherwise - // it may lead to deadlock with Grant or Checkpoint operations, which - // acquire the le.mu firstly and then the batchTx lock. - delete(le.leaseMap, id) - - defer close(l.revokec) - // unlock before doing external work - le.mu.Unlock() - - if le.rd == nil { - return nil - } - - txn := le.rd() - - // sort keys so deletes are in same order among all members, - // otherwise the backend hashes will be different - keys := l.Keys() - sort.StringSlice(keys).Sort() - for _, key := range keys { - txn.DeleteRange([]byte(key), nil) - } - - // lease deletion needs to be in the same backend transaction with the - // kv deletion. Or we might end up with not executing the revoke or not - // deleting the keys if etcdserver fails in between. - schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)}) - - txn.End() - - leaseRevoked.Inc() - return nil -} - -func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { - le.mu.Lock() - defer le.mu.Unlock() - - if l, ok := le.leaseMap[id]; ok { - // when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry - l.remainingTTL = remainingTTL - if le.shouldPersistCheckpoints() { - l.persistTo(le.b) - } - if le.isPrimary() { - // schedule the next checkpoint as needed - le.scheduleCheckpointIfNeeded(l) - } - } - return nil -} - -func (le *lessor) shouldPersistCheckpoints() bool { - cv := le.cluster.Version() - return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, version.V3_6)) -} - -func greaterOrEqual(first, second semver.Version) bool { - return !version.LessThan(first, second) -} - -// Renew renews an existing lease. If the given lease does not exist or -// has expired, an error will be returned. -func (le *lessor) Renew(id LeaseID) (int64, error) { - le.mu.RLock() - if !le.isPrimary() { - // forward renew request to primary instead of returning error. - le.mu.RUnlock() - return -1, ErrNotPrimary - } - - demotec := le.demotec - - l := le.leaseMap[id] - if l == nil { - le.mu.RUnlock() - return -1, ErrLeaseNotFound - } - // Clear remaining TTL when we renew if it is set - clearRemainingTTL := le.cp != nil && l.remainingTTL > 0 - - le.mu.RUnlock() - if l.expired() { - select { - // A expired lease might be pending for revoking or going through - // quorum to be revoked. To be accurate, renew request must wait for the - // deletion to complete. - case <-l.revokec: - return -1, ErrLeaseNotFound - // The expired lease might fail to be revoked if the primary changes. - // The caller will retry on ErrNotPrimary. - case <-demotec: - return -1, ErrNotPrimary - case <-le.stopC: - return -1, ErrNotPrimary - } - } - - // Clear remaining TTL when we renew if it is set - // By applying a RAFT entry only when the remainingTTL is already set, we limit the number - // of RAFT entries written per lease to a max of 2 per checkpoint interval. - if clearRemainingTTL { - le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}}) - } - - le.mu.Lock() - l.refresh(0) - item := &LeaseWithTime{id: l.ID, time: l.expiry} - le.leaseExpiredNotifier.RegisterOrUpdate(item) - le.mu.Unlock() - - leaseRenewed.Inc() - return l.ttl, nil -} - -func (le *lessor) Lookup(id LeaseID) *Lease { - le.mu.RLock() - defer le.mu.RUnlock() - return le.leaseMap[id] -} - -func (le *lessor) unsafeLeases() []*Lease { - leases := make([]*Lease, 0, len(le.leaseMap)) - for _, l := range le.leaseMap { - leases = append(leases, l) - } - return leases -} - -func (le *lessor) Leases() []*Lease { - le.mu.RLock() - ls := le.unsafeLeases() - le.mu.RUnlock() - sort.Sort(leasesByExpiry(ls)) - return ls -} - -func (le *lessor) Promote(extend time.Duration) { - le.mu.Lock() - defer le.mu.Unlock() - - le.demotec = make(chan struct{}) - - // refresh the expiries of all leases. - for _, l := range le.leaseMap { - l.refresh(extend) - item := &LeaseWithTime{id: l.ID, time: l.expiry} - le.leaseExpiredNotifier.RegisterOrUpdate(item) - le.scheduleCheckpointIfNeeded(l) - } - - if len(le.leaseMap) < leaseRevokeRate { - // no possibility of lease pile-up - return - } - - // adjust expiries in case of overlap - leases := le.unsafeLeases() - sort.Sort(leasesByExpiry(leases)) - - baseWindow := leases[0].Remaining() - nextWindow := baseWindow + time.Second - expires := 0 - // have fewer expires than the total revoke rate so piled up leases - // don't consume the entire revoke limit - targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 - for _, l := range leases { - remaining := l.Remaining() - if remaining > nextWindow { - baseWindow = remaining - nextWindow = baseWindow + time.Second - expires = 1 - continue - } - expires++ - if expires <= targetExpiresPerSecond { - continue - } - rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) - // If leases are extended by n seconds, leases n seconds ahead of the - // base window should be extended by only one second. - rateDelay -= float64(remaining - baseWindow) - delay := time.Duration(rateDelay) - nextWindow = baseWindow + delay - l.refresh(delay + extend) - item := &LeaseWithTime{id: l.ID, time: l.expiry} - le.leaseExpiredNotifier.RegisterOrUpdate(item) - le.scheduleCheckpointIfNeeded(l) - } -} - -func (le *lessor) Demote() { - le.mu.Lock() - defer le.mu.Unlock() - - // set the expiries of all leases to forever - for _, l := range le.leaseMap { - l.forever() - } - - le.clearScheduledLeasesCheckpoints() - le.clearLeaseExpiredNotifier() - - if le.demotec != nil { - close(le.demotec) - le.demotec = nil - } -} - -// Attach attaches items to the lease with given ID. When the lease -// expires, the attached items will be automatically removed. -// If the given lease does not exist, an error will be returned. -func (le *lessor) Attach(id LeaseID, items []LeaseItem) error { - le.mu.Lock() - defer le.mu.Unlock() - - l := le.leaseMap[id] - if l == nil { - return ErrLeaseNotFound - } - - l.mu.Lock() - for _, it := range items { - l.itemSet[it] = struct{}{} - le.itemMap[it] = id - } - l.mu.Unlock() - return nil -} - -func (le *lessor) GetLease(item LeaseItem) LeaseID { - le.mu.RLock() - id := le.itemMap[item] - le.mu.RUnlock() - return id -} - -// Detach detaches items from the lease with given ID. -// If the given lease does not exist, an error will be returned. -func (le *lessor) Detach(id LeaseID, items []LeaseItem) error { - le.mu.Lock() - defer le.mu.Unlock() - - l := le.leaseMap[id] - if l == nil { - return ErrLeaseNotFound - } - - l.mu.Lock() - for _, it := range items { - delete(l.itemSet, it) - delete(le.itemMap, it) - } - l.mu.Unlock() - return nil -} - -func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) { - le.mu.Lock() - defer le.mu.Unlock() - - le.b = b - le.rd = rd - le.leaseMap = make(map[LeaseID]*Lease) - le.itemMap = make(map[LeaseItem]LeaseID) - le.initAndRecover() -} - -func (le *lessor) ExpiredLeasesC() <-chan []*Lease { - return le.expiredC -} - -func (le *lessor) Stop() { - close(le.stopC) - <-le.doneC -} - -func (le *lessor) runLoop() { - defer close(le.doneC) - - delayTicker := time.NewTicker(500 * time.Millisecond) - defer delayTicker.Stop() - - for { - le.revokeExpiredLeases() - le.checkpointScheduledLeases() - - select { - case <-delayTicker.C: - case <-le.stopC: - return - } - } -} - -// revokeExpiredLeases finds all leases past their expiry and sends them to expired channel for -// to be revoked. -func (le *lessor) revokeExpiredLeases() { - var ls []*Lease - - // rate limit - revokeLimit := leaseRevokeRate / 2 - - le.mu.RLock() - if le.isPrimary() { - ls = le.findExpiredLeases(revokeLimit) - } - le.mu.RUnlock() - - if len(ls) != 0 { - select { - case <-le.stopC: - return - case le.expiredC <- ls: - default: - // the receiver of expiredC is probably busy handling - // other stuff - // let's try this next time after 500ms - } - } -} - -// checkpointScheduledLeases finds all scheduled lease checkpoints that are due and -// submits them to the checkpointer to persist them to the consensus log. -func (le *lessor) checkpointScheduledLeases() { - // rate limit - for i := 0; i < leaseCheckpointRate/2; i++ { - var cps []*pb.LeaseCheckpoint - - le.mu.Lock() - if le.isPrimary() { - cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize) - } - le.mu.Unlock() - - if len(cps) != 0 { - le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps}) - } - if len(cps) < maxLeaseCheckpointBatchSize { - return - } - } -} - -func (le *lessor) clearScheduledLeasesCheckpoints() { - le.leaseCheckpointHeap = make(LeaseQueue, 0) -} - -func (le *lessor) clearLeaseExpiredNotifier() { - le.leaseExpiredNotifier = newLeaseExpiredNotifier() -} - -// expireExists returns "l" which is not nil if expiry items exist. -// It pops only when expiry item exists. -// "next" is true, to indicate that it may exist in next attempt. -func (le *lessor) expireExists() (l *Lease, next bool) { - if le.leaseExpiredNotifier.Len() == 0 { - return nil, false - } - - item := le.leaseExpiredNotifier.Peek() - l = le.leaseMap[item.id] - if l == nil { - // lease has expired or been revoked - // no need to revoke (nothing is expiry) - le.leaseExpiredNotifier.Unregister() // O(log N) - return nil, true - } - now := time.Now() - if now.Before(item.time) /* item.time: expiration time */ { - // Candidate expirations are caught up, reinsert this item - // and no need to revoke (nothing is expiry) - return nil, false - } - - // recheck if revoke is complete after retry interval - item.time = now.Add(le.expiredLeaseRetryInterval) - le.leaseExpiredNotifier.RegisterOrUpdate(item) - return l, false -} - -// findExpiredLeases loops leases in the leaseMap until reaching expired limit -// and returns the expired leases that needed to be revoked. -func (le *lessor) findExpiredLeases(limit int) []*Lease { - leases := make([]*Lease, 0, 16) - - for { - l, next := le.expireExists() - if l == nil && !next { - break - } - if next { - continue - } - - if l.expired() { - leases = append(leases, l) - - // reach expired limit - if len(leases) == limit { - break - } - } - } - - return leases -} - -func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) { - if le.cp == nil { - return - } - - if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) { - if le.lg != nil { - le.lg.Debug("Scheduling lease checkpoint", - zap.Int64("leaseID", int64(lease.ID)), - zap.Duration("intervalSeconds", le.checkpointInterval), - ) - } - heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{ - id: lease.ID, - time: time.Now().Add(le.checkpointInterval), - }) - } -} - -func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint { - if le.cp == nil { - return nil - } - - now := time.Now() - var cps []*pb.LeaseCheckpoint - for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit { - lt := le.leaseCheckpointHeap[0] - if lt.time.After(now) /* lt.time: next checkpoint time */ { - return cps - } - heap.Pop(&le.leaseCheckpointHeap) - var l *Lease - var ok bool - if l, ok = le.leaseMap[lt.id]; !ok { - continue - } - if !now.Before(l.expiry) { - continue - } - remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds())) - if remainingTTL >= l.ttl { - continue - } - if le.lg != nil { - le.lg.Debug("Checkpointing lease", - zap.Int64("leaseID", int64(lt.id)), - zap.Int64("remainingTTL", remainingTTL), - ) - } - cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), Remaining_TTL: remainingTTL}) - } - return cps -} - -func (le *lessor) initAndRecover() { - tx := le.b.BatchTx() - - tx.LockOutsideApply() - schema.UnsafeCreateLeaseBucket(tx) - lpbs := schema.MustUnsafeGetAllLeases(tx) - tx.Unlock() - for _, lpb := range lpbs { - ID := LeaseID(lpb.ID) - if lpb.TTL < le.minLeaseTTL { - lpb.TTL = le.minLeaseTTL - } - le.leaseMap[ID] = &Lease{ - ID: ID, - ttl: lpb.TTL, - // itemSet will be filled in when recover key-value pairs - // set expiry to forever, refresh when promoted - itemSet: make(map[LeaseItem]struct{}), - expiry: forever, - revokec: make(chan struct{}), - remainingTTL: lpb.RemainingTTL, - } - } - le.leaseExpiredNotifier.Init() - heap.Init(&le.leaseCheckpointHeap) - - le.b.ForceCommit() -} - -// FakeLessor is a fake implementation of Lessor interface. -// Used for testing only. -type FakeLessor struct{} - -func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} - -func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {} - -func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } - -func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } - -func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil } - -func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil } - -func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 } -func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil } - -func (fl *FakeLessor) Promote(extend time.Duration) {} - -func (fl *FakeLessor) Demote() {} - -func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil } - -func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil } - -func (fl *FakeLessor) Leases() []*Lease { return nil } - -func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil } - -func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {} - -func (fl *FakeLessor) Stop() {} - -type FakeTxnDelete struct { - backend.BatchTx -} - -func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 } -func (ftd *FakeTxnDelete) End() { ftd.Unlock() } diff --git a/server/lease/lessor_bench_test.go b/server/lease/lessor_bench_test.go deleted file mode 100644 index 71e970e5f25..00000000000 --- a/server/lease/lessor_bench_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "math/rand" - "testing" - "time" - - "go.uber.org/zap" - - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func BenchmarkLessorGrant1000(b *testing.B) { benchmarkLessorGrant(1000, b) } -func BenchmarkLessorGrant100000(b *testing.B) { benchmarkLessorGrant(100000, b) } - -func BenchmarkLessorRevoke1000(b *testing.B) { benchmarkLessorRevoke(1000, b) } -func BenchmarkLessorRevoke100000(b *testing.B) { benchmarkLessorRevoke(100000, b) } - -func BenchmarkLessorRenew1000(b *testing.B) { benchmarkLessorRenew(1000, b) } -func BenchmarkLessorRenew100000(b *testing.B) { benchmarkLessorRenew(100000, b) } - -// BenchmarkLessorFindExpired10000 uses findExpired10000 replace findExpired1000, which takes too long. -func BenchmarkLessorFindExpired10000(b *testing.B) { benchmarkLessorFindExpired(10000, b) } -func BenchmarkLessorFindExpired100000(b *testing.B) { benchmarkLessorFindExpired(100000, b) } - -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - -const ( - // minTTL keep lease will not auto expire in benchmark - minTTL = 1000 - // maxTTL control repeat probability of ttls - maxTTL = 2000 -) - -func randomTTL(n int, min, max int64) (out []int64) { - for i := 0; i < n; i++ { - out = append(out, rand.Int63n(max-min)+min) - } - return out -} - -// demote lessor from being the primary, but don't change any lease's expiry -func demote(le *lessor) { - le.mu.Lock() - defer le.mu.Unlock() - close(le.demotec) - le.demotec = nil -} - -// return new lessor and tearDown to release resource -func setUp(t testing.TB) (le *lessor, tearDown func()) { - lg := zap.NewNop() - be, _ := betesting.NewDefaultTmpBackend(t) - // MinLeaseTTL is negative, so we can grant expired lease in benchmark. - // ExpiredLeasesRetryInterval should small, so benchmark of findExpired will recheck expired lease. - le = newLessor(lg, be, nil, LessorConfig{MinLeaseTTL: -1000, ExpiredLeasesRetryInterval: 10 * time.Microsecond}) - le.SetRangeDeleter(func() TxnDelete { - ftd := &FakeTxnDelete{be.BatchTx()} - ftd.Lock() - return ftd - }) - le.Promote(0) - - return le, func() { - le.Stop() - be.Close() - } -} - -func benchmarkLessorGrant(benchSize int, b *testing.B) { - ttls := randomTTL(benchSize, minTTL, maxTTL) - - var le *lessor - var tearDown func() - - b.ResetTimer() - for i := 0; i < b.N; { - b.StopTimer() - if tearDown != nil { - tearDown() - tearDown = nil - } - le, tearDown = setUp(b) - b.StartTimer() - - for j := 1; j <= benchSize; j++ { - le.Grant(LeaseID(j), ttls[j-1]) - } - i += benchSize - } - b.StopTimer() - - if tearDown != nil { - tearDown() - } -} - -func benchmarkLessorRevoke(benchSize int, b *testing.B) { - ttls := randomTTL(benchSize, minTTL, maxTTL) - - var le *lessor - var tearDown func() - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - if tearDown != nil { - tearDown() - tearDown = nil - } - le, tearDown = setUp(b) - for j := 1; j <= benchSize; j++ { - le.Grant(LeaseID(j), ttls[j-1]) - } - b.StartTimer() - - for j := 1; j <= benchSize; j++ { - le.Revoke(LeaseID(j)) - } - i += benchSize - } - b.StopTimer() - - if tearDown != nil { - tearDown() - } -} - -func benchmarkLessorRenew(benchSize int, b *testing.B) { - ttls := randomTTL(benchSize, minTTL, maxTTL) - - var le *lessor - var tearDown func() - - b.ResetTimer() - for i := 0; i < b.N; { - b.StopTimer() - if tearDown != nil { - tearDown() - tearDown = nil - } - le, tearDown = setUp(b) - for j := 1; j <= benchSize; j++ { - le.Grant(LeaseID(j), ttls[j-1]) - } - b.StartTimer() - - for j := 1; j <= benchSize; j++ { - le.Renew(LeaseID(j)) - } - i += benchSize - } - b.StopTimer() - - if tearDown != nil { - tearDown() - } -} - -func benchmarkLessorFindExpired(benchSize int, b *testing.B) { - // 50% lease are expired. - ttls := randomTTL(benchSize, -500, 500) - findExpiredLimit := 50 - - var le *lessor - var tearDown func() - - b.ResetTimer() - for i := 0; i < b.N; { - b.StopTimer() - if tearDown != nil { - tearDown() - tearDown = nil - } - le, tearDown = setUp(b) - for j := 1; j <= benchSize; j++ { - le.Grant(LeaseID(j), ttls[j-1]) - } - // lessor's runLoop should not call findExpired - demote(le) - b.StartTimer() - - // refresh fixture after pop all expired lease - for ; ; i++ { - le.mu.Lock() - ls := le.findExpiredLeases(findExpiredLimit) - if len(ls) == 0 { - le.mu.Unlock() - break - } - le.mu.Unlock() - - // simulation: revoke lease after expired - b.StopTimer() - for _, lease := range ls { - le.Revoke(lease.ID) - } - b.StartTimer() - } - } - b.StopTimer() - - if tearDown != nil { - tearDown() - } -} diff --git a/server/lease/lessor_test.go b/server/lease/lessor_test.go deleted file mode 100644 index ae9ad52e820..00000000000 --- a/server/lease/lessor_test.go +++ /dev/null @@ -1,716 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "context" - "fmt" - "os" - "path/filepath" - "reflect" - "sort" - "sync" - "testing" - "time" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -const ( - minLeaseTTL = int64(5) - minLeaseTTLDuration = time.Duration(minLeaseTTL) * time.Second -) - -// TestLessorGrant ensures Lessor can grant wanted lease. -// The granted lease should have a unique ID with a term -// that is greater than minLeaseTTL. -func TestLessorGrant(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - le.Promote(0) - - l, err := le.Grant(1, 1) - if err != nil { - t.Fatalf("could not grant lease 1 (%v)", err) - } - if l.ttl != minLeaseTTL { - t.Fatalf("ttl = %v, expect minLeaseTTL %v", l.ttl, minLeaseTTL) - } - - gl := le.Lookup(l.ID) - - if !reflect.DeepEqual(gl, l) { - t.Errorf("lease = %v, want %v", gl, l) - } - if l.Remaining() < minLeaseTTLDuration-time.Second { - t.Errorf("term = %v, want at least %v", l.Remaining(), minLeaseTTLDuration-time.Second) - } - - _, err = le.Grant(1, 1) - if err == nil { - t.Errorf("allocated the same lease") - } - - var nl *Lease - nl, err = le.Grant(2, 1) - if err != nil { - t.Errorf("could not grant lease 2 (%v)", err) - } - if nl.ID == l.ID { - t.Errorf("new lease.id = %x, want != %x", nl.ID, l.ID) - } - - lss := []*Lease{gl, nl} - leases := le.Leases() - for i := range lss { - if lss[i].ID != leases[i].ID { - t.Fatalf("lease ID expected %d, got %d", lss[i].ID, leases[i].ID) - } - if lss[i].ttl != leases[i].ttl { - t.Fatalf("ttl expected %d, got %d", lss[i].ttl, leases[i].ttl) - } - } - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - lpb := schema.MustUnsafeGetLease(tx, int64(l.ID)) - if lpb == nil { - t.Errorf("lpb = %d, want not nil", lpb) - } -} - -// TestLeaseConcurrentKeys ensures Lease.Keys method calls are guarded -// from concurrent map writes on 'itemSet'. -func TestLeaseConcurrentKeys(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) }) - - // grant a lease with long term (100 seconds) to - // avoid early termination during the test. - l, err := le.Grant(1, 100) - if err != nil { - t.Fatalf("could not grant lease for 100s ttl (%v)", err) - } - - itemn := 10 - items := make([]LeaseItem, itemn) - for i := 0; i < itemn; i++ { - items[i] = LeaseItem{Key: fmt.Sprintf("foo%d", i)} - } - if err = le.Attach(l.ID, items); err != nil { - t.Fatalf("failed to attach items to the lease: %v", err) - } - - donec := make(chan struct{}) - go func() { - le.Detach(l.ID, items) - close(donec) - }() - - var wg sync.WaitGroup - wg.Add(itemn) - for i := 0; i < itemn; i++ { - go func() { - defer wg.Done() - l.Keys() - }() - } - - <-donec - wg.Wait() -} - -// TestLessorRevoke ensures Lessor can revoke a lease. -// The items in the revoked lease should be removed from -// the backend. -// The revoked lease cannot be got from Lessor again. -func TestLessorRevoke(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - var fd *fakeDeleter - le.SetRangeDeleter(func() TxnDelete { - fd = newFakeDeleter(be) - return fd - }) - - // grant a lease with long term (100 seconds) to - // avoid early termination during the test. - l, err := le.Grant(1, 100) - if err != nil { - t.Fatalf("could not grant lease for 100s ttl (%v)", err) - } - - items := []LeaseItem{ - {"foo"}, - {"bar"}, - } - - if err = le.Attach(l.ID, items); err != nil { - t.Fatalf("failed to attach items to the lease: %v", err) - } - - if err = le.Revoke(l.ID); err != nil { - t.Fatal("failed to revoke lease:", err) - } - - if le.Lookup(l.ID) != nil { - t.Errorf("got revoked lease %x", l.ID) - } - - wdeleted := []string{"bar_", "foo_"} - sort.Strings(fd.deleted) - if !reflect.DeepEqual(fd.deleted, wdeleted) { - t.Errorf("deleted= %v, want %v", fd.deleted, wdeleted) - } - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - lpb := schema.MustUnsafeGetLease(tx, int64(l.ID)) - if lpb != nil { - t.Errorf("lpb = %d, want nil", lpb) - } -} - -func renew(t *testing.T, le *lessor, id LeaseID) int64 { - ch := make(chan int64, 1) - errch := make(chan error, 1) - go func() { - ttl, err := le.Renew(id) - if err != nil { - errch <- err - } else { - ch <- ttl - } - }() - - select { - case ttl := <-ch: - return ttl - case err := <-errch: - t.Fatalf("failed to renew lease (%v)", err) - case <-time.After(10 * time.Second): - t.Fatal("timed out while renewing lease") - } - panic("unreachable") -} - -// TestLessorRenew ensures Lessor can renew an existing lease. -func TestLessorRenew(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer be.Close() - defer os.RemoveAll(dir) - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - le.Promote(0) - - l, err := le.Grant(1, minLeaseTTL) - if err != nil { - t.Fatalf("failed to grant lease (%v)", err) - } - - // manually change the ttl field - le.mu.Lock() - l.ttl = 10 - le.mu.Unlock() - ttl := renew(t, le, l.ID) - if ttl != l.ttl { - t.Errorf("ttl = %d, want %d", ttl, l.ttl) - } - - l = le.Lookup(l.ID) - if l.Remaining() < 9*time.Second { - t.Errorf("failed to renew the lease") - } -} - -func TestLessorRenewWithCheckpointer(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer be.Close() - defer os.RemoveAll(dir) - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - fakerCheckerpointer := func(ctx context.Context, cp *pb.LeaseCheckpointRequest) { - for _, cp := range cp.GetCheckpoints() { - le.Checkpoint(LeaseID(cp.GetID()), cp.GetRemaining_TTL()) - } - } - defer le.Stop() - // Set checkpointer - le.SetCheckpointer(fakerCheckerpointer) - le.Promote(0) - - l, err := le.Grant(1, minLeaseTTL) - if err != nil { - t.Fatalf("failed to grant lease (%v)", err) - } - - // manually change the ttl field - le.mu.Lock() - l.ttl = 10 - l.remainingTTL = 10 - le.mu.Unlock() - ttl := renew(t, le, l.ID) - if ttl != l.ttl { - t.Errorf("ttl = %d, want %d", ttl, l.ttl) - } - if l.remainingTTL != 0 { - t.Fatalf("remianingTTL = %d, want %d", l.remainingTTL, 0) - } - - l = le.Lookup(l.ID) - if l.Remaining() < 9*time.Second { - t.Errorf("failed to renew the lease") - } -} - -// TestLessorRenewExtendPileup ensures Lessor extends leases on promotion if too many -// expire at the same time. -func TestLessorRenewExtendPileup(t *testing.T) { - oldRevokeRate := leaseRevokeRate - defer func() { leaseRevokeRate = oldRevokeRate }() - lg := zap.NewNop() - leaseRevokeRate = 10 - - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - ttl := int64(10) - for i := 1; i <= leaseRevokeRate*10; i++ { - if _, err := le.Grant(LeaseID(2*i), ttl); err != nil { - t.Fatal(err) - } - // ttls that overlap spillover for ttl=10 - if _, err := le.Grant(LeaseID(2*i+1), ttl+1); err != nil { - t.Fatal(err) - } - } - - // simulate stop and recovery - le.Stop() - be.Close() - bcfg := backend.DefaultBackendConfig(lg) - bcfg.Path = filepath.Join(dir, "be") - be = backend.New(bcfg) - defer be.Close() - le = newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - - // extend after recovery should extend expiration on lease pile-up - le.Promote(0) - - windowCounts := make(map[int64]int) - for _, l := range le.leaseMap { - // round up slightly for baseline ttl - s := int64(l.Remaining().Seconds() + 0.1) - windowCounts[s]++ - } - - for i := ttl; i < ttl+20; i++ { - c := windowCounts[i] - if c > leaseRevokeRate { - t.Errorf("expected at most %d expiring at %ds, got %d", leaseRevokeRate, i, c) - } - if c < leaseRevokeRate/2 { - t.Errorf("expected at least %d expiring at %ds, got %d", leaseRevokeRate/2, i, c) - } - } -} - -func TestLessorDetach(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) }) - - // grant a lease with long term (100 seconds) to - // avoid early termination during the test. - l, err := le.Grant(1, 100) - if err != nil { - t.Fatalf("could not grant lease for 100s ttl (%v)", err) - } - - items := []LeaseItem{ - {"foo"}, - {"bar"}, - } - - if err := le.Attach(l.ID, items); err != nil { - t.Fatalf("failed to attach items to the lease: %v", err) - } - - if err := le.Detach(l.ID, items[0:1]); err != nil { - t.Fatalf("failed to de-attach items to the lease: %v", err) - } - - l = le.Lookup(l.ID) - if len(l.itemSet) != 1 { - t.Fatalf("len(l.itemSet) = %d, failed to de-attach items", len(l.itemSet)) - } - if _, ok := l.itemSet[LeaseItem{"bar"}]; !ok { - t.Fatalf("de-attached wrong item, want %q exists", "bar") - } -} - -// TestLessorRecover ensures Lessor recovers leases from -// persist backend. -func TestLessorRecover(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - l1, err1 := le.Grant(1, 10) - l2, err2 := le.Grant(2, 20) - if err1 != nil || err2 != nil { - t.Fatalf("could not grant initial leases (%v, %v)", err1, err2) - } - - // Create a new lessor with the same backend - nle := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer nle.Stop() - nl1 := nle.Lookup(l1.ID) - if nl1 == nil || nl1.ttl != l1.ttl { - t.Errorf("nl1 = %v, want nl1.ttl= %d", nl1.ttl, l1.ttl) - } - - nl2 := nle.Lookup(l2.ID) - if nl2 == nil || nl2.ttl != l2.ttl { - t.Errorf("nl2 = %v, want nl2.ttl= %d", nl2.ttl, l2.ttl) - } -} - -func TestLessorExpire(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - testMinTTL := int64(1) - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: testMinTTL}) - defer le.Stop() - - le.Promote(1 * time.Second) - l, err := le.Grant(1, testMinTTL) - if err != nil { - t.Fatalf("failed to create lease: %v", err) - } - - select { - case el := <-le.ExpiredLeasesC(): - if el[0].ID != l.ID { - t.Fatalf("expired id = %x, want %x", el[0].ID, l.ID) - } - case <-time.After(10 * time.Second): - t.Fatalf("failed to receive expired lease") - } - - donec := make(chan struct{}, 1) - go func() { - // expired lease cannot be renewed - if _, err := le.Renew(l.ID); err != ErrLeaseNotFound { - t.Errorf("unexpected renew") - } - donec <- struct{}{} - }() - - select { - case <-donec: - t.Fatalf("renew finished before lease revocation") - case <-time.After(50 * time.Millisecond): - } - - // expired lease can be revoked - if err := le.Revoke(l.ID); err != nil { - t.Fatalf("failed to revoke expired lease: %v", err) - } - - select { - case <-donec: - case <-time.After(10 * time.Second): - t.Fatalf("renew has not returned after lease revocation") - } -} - -func TestLessorExpireAndDemote(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - testMinTTL := int64(1) - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: testMinTTL}) - defer le.Stop() - - le.Promote(1 * time.Second) - l, err := le.Grant(1, testMinTTL) - if err != nil { - t.Fatalf("failed to create lease: %v", err) - } - - select { - case el := <-le.ExpiredLeasesC(): - if el[0].ID != l.ID { - t.Fatalf("expired id = %x, want %x", el[0].ID, l.ID) - } - case <-time.After(10 * time.Second): - t.Fatalf("failed to receive expired lease") - } - - donec := make(chan struct{}, 1) - go func() { - // expired lease cannot be renewed - if _, err := le.Renew(l.ID); err != ErrNotPrimary { - t.Errorf("unexpected renew: %v", err) - } - donec <- struct{}{} - }() - - select { - case <-donec: - t.Fatalf("renew finished before demotion") - case <-time.After(50 * time.Millisecond): - } - - // demote will cause the renew request to fail with ErrNotPrimary - le.Demote() - - select { - case <-donec: - case <-time.After(10 * time.Second): - t.Fatalf("renew has not returned after lessor demotion") - } -} - -func TestLessorMaxTTL(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - - _, err := le.Grant(1, MaxLeaseTTL+1) - if err != ErrLeaseTTLTooLarge { - t.Fatalf("grant unexpectedly succeeded") - } -} - -func TestLessorCheckpointScheduling(t *testing.T) { - lg := zap.NewNop() - - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL, CheckpointInterval: 1 * time.Second}) - defer le.Stop() - le.minLeaseTTL = 1 - checkpointedC := make(chan struct{}) - le.SetCheckpointer(func(ctx context.Context, lc *pb.LeaseCheckpointRequest) { - close(checkpointedC) - if len(lc.Checkpoints) != 1 { - t.Errorf("expected 1 checkpoint but got %d", len(lc.Checkpoints)) - } - c := lc.Checkpoints[0] - if c.Remaining_TTL != 1 { - t.Errorf("expected checkpoint to be called with Remaining_TTL=%d but got %d", 1, c.Remaining_TTL) - } - }) - _, err := le.Grant(1, 2) - if err != nil { - t.Fatal(err) - } - le.Promote(0) - - // TODO: Is there any way to avoid doing this wait? Lease TTL granularity is in seconds. - select { - case <-checkpointedC: - case <-time.After(2 * time.Second): - t.Fatal("expected checkpointer to be called, but it was not") - } -} - -func TestLessorCheckpointsRestoredOnPromote(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - le := newLessor(lg, be, clusterLatest(), LessorConfig{MinLeaseTTL: minLeaseTTL}) - defer le.Stop() - l, err := le.Grant(1, 10) - if err != nil { - t.Fatal(err) - } - le.Checkpoint(l.ID, 5) - le.Promote(0) - remaining := l.Remaining().Seconds() - if !(remaining > 4 && remaining < 5) { - t.Fatalf("expected expiry to be less than 1s in the future, but got %f seconds", remaining) - } -} - -func TestLessorCheckpointPersistenceAfterRestart(t *testing.T) { - const ttl int64 = 10 - const checkpointTTL int64 = 5 - - tcs := []struct { - name string - cluster cluster - checkpointPersist bool - expectRemainingTTL int64 - }{ - { - name: "Etcd v3.6 and newer persist remainingTTL on checkpoint", - cluster: clusterLatest(), - expectRemainingTTL: checkpointTTL, - }, - { - name: "Etcd v3.5 and older persist remainingTTL if CheckpointPersist is set", - cluster: clusterV3_5(), - checkpointPersist: true, - expectRemainingTTL: checkpointTTL, - }, - { - name: "Etcd with version unknown persists remainingTTL if CheckpointPersist is set", - cluster: clusterNil(), - checkpointPersist: true, - expectRemainingTTL: checkpointTTL, - }, - { - name: "Etcd v3.5 and older reset remainingTTL on checkpoint", - cluster: clusterV3_5(), - expectRemainingTTL: ttl, - }, - { - name: "Etcd with version unknown fallbacks to v3.5 behavior", - cluster: clusterNil(), - expectRemainingTTL: ttl, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zap.NewNop() - dir, be := NewTestBackend(t) - defer os.RemoveAll(dir) - defer be.Close() - - cfg := LessorConfig{MinLeaseTTL: minLeaseTTL} - cfg.CheckpointPersist = tc.checkpointPersist - le := newLessor(lg, be, tc.cluster, cfg) - l, err := le.Grant(2, ttl) - if err != nil { - t.Fatal(err) - } - if l.getRemainingTTL() != ttl { - t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), ttl) - } - le.Checkpoint(2, checkpointTTL) - if l.getRemainingTTL() != checkpointTTL { - t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), checkpointTTL) - } - le.Stop() - le2 := newLessor(lg, be, clusterLatest(), cfg) - l = le2.Lookup(2) - if l.getRemainingTTL() != tc.expectRemainingTTL { - t.Errorf("getRemainingTTL() = %d, expected: %d", l.getRemainingTTL(), tc.expectRemainingTTL) - } - }) - } -} - -type fakeDeleter struct { - deleted []string - tx backend.BatchTx -} - -func newFakeDeleter(be backend.Backend) *fakeDeleter { - fd := &fakeDeleter{nil, be.BatchTx()} - fd.tx.Lock() - return fd -} - -func (fd *fakeDeleter) End() { fd.tx.Unlock() } - -func (fd *fakeDeleter) DeleteRange(key, end []byte) (int64, int64) { - fd.deleted = append(fd.deleted, string(key)+"_"+string(end)) - return 0, 0 -} - -func NewTestBackend(t *testing.T) (string, backend.Backend) { - lg := zaptest.NewLogger(t) - tmpPath := t.TempDir() - bcfg := backend.DefaultBackendConfig(lg) - bcfg.Path = filepath.Join(tmpPath, "be") - return tmpPath, backend.New(bcfg) -} - -func clusterLatest() cluster { - return fakeCluster{semver.New(version.Cluster(version.Version) + ".0")} -} - -func clusterV3_5() cluster { - return fakeCluster{semver.New("3.5.0")} -} - -func clusterNil() cluster { - return fakeCluster{} -} - -type fakeCluster struct { - version *semver.Version -} - -func (c fakeCluster) Version() *semver.Version { - return c.version -} diff --git a/server/lease/metrics.go b/server/lease/metrics.go deleted file mode 100644 index 06f8b58015f..00000000000 --- a/server/lease/metrics.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -var ( - leaseGranted = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "lease", - Name: "granted_total", - Help: "The total number of granted leases.", - }) - - leaseRevoked = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "lease", - Name: "revoked_total", - Help: "The total number of revoked leases.", - }) - - leaseRenewed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "lease", - Name: "renewed_total", - Help: "The number of renewed leases seen by the leader.", - }) - - leaseTotalTTLs = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "lease", - Name: "ttl_total", - Help: "Bucketed histogram of lease TTLs.", - // 1 second -> 3 months - Buckets: prometheus.ExponentialBuckets(1, 2, 24), - }) -) - -func init() { - prometheus.MustRegister(leaseGranted) - prometheus.MustRegister(leaseRevoked) - prometheus.MustRegister(leaseRenewed) - prometheus.MustRegister(leaseTotalTTLs) -} diff --git a/server/main.go b/server/main.go deleted file mode 100644 index 0468094fff3..00000000000 --- a/server/main.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main is a simple wrapper of the real etcd entrypoint package -// (located at go.etcd.io/etcd/etcdmain) to ensure that etcd is still -// "go getable"; e.g. `go get go.etcd.io/etcd` works as expected and -// builds a binary in $GOBIN/etcd -// -// This package should NOT be extended or modified in any way; to modify the -// etcd binary, work in the `go.etcd.io/etcd/etcdmain` package. -package main - -import ( - "os" - - "go.etcd.io/etcd/server/v3/etcdmain" -) - -func main() { - etcdmain.Main(os.Args) -} diff --git a/server/main_test.go b/server/main_test.go deleted file mode 100644 index bc96bb33d74..00000000000 --- a/server/main_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "log" - "os" - "os/signal" - "strings" - "syscall" - "testing" - - "go.etcd.io/etcd/server/v3/etcdmain" -) - -func SplitTestArgs(args []string) (testArgs, appArgs []string) { - for i, arg := range args { - switch { - case strings.HasPrefix(arg, "-test."): - testArgs = append(testArgs, arg) - case i == 0: - appArgs = append(appArgs, arg) - testArgs = append(testArgs, arg) - default: - appArgs = append(appArgs, arg) - } - } - return -} - -func TestEmpty(t *testing.T) {} - -/** - * The purpose of this "test" is to run etcd server with code-coverage - * collection turned on. The technique is documented here: - * - * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage - */ -func TestMain(m *testing.M) { - // don't launch etcd server when invoked via go test - if strings.HasSuffix(os.Args[0], ".test") { - log.Print("skip launching etcd server when invoked via go test") - return - } - - testArgs, appArgs := SplitTestArgs(os.Args) - - notifier := make(chan os.Signal, 1) - signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM) - go etcdmain.Main(appArgs) - <-notifier - - // This will generate coverage files: - os.Args = testArgs - m.Run() -} diff --git a/server/mock/mockstorage/doc.go b/server/mock/mockstorage/doc.go deleted file mode 100644 index b298ab48ca8..00000000000 --- a/server/mock/mockstorage/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mockstorage provides mock implementations for etcdserver's storage interface. -package mockstorage diff --git a/server/mock/mockstorage/storage_recorder.go b/server/mock/mockstorage/storage_recorder.go deleted file mode 100644 index 41d2952e8a1..00000000000 --- a/server/mock/mockstorage/storage_recorder.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mockstorage - -import ( - "github.com/coreos/go-semver/semver" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" -) - -type storageRecorder struct { - testutil.Recorder - dbPath string // must have '/' suffix if set -} - -func NewStorageRecorder(db string) *storageRecorder { - return &storageRecorder{&testutil.RecorderBuffered{}, db} -} - -func NewStorageRecorderStream(db string) *storageRecorder { - return &storageRecorder{testutil.NewRecorderStream(), db} -} - -func (p *storageRecorder) Save(st raftpb.HardState, ents []raftpb.Entry) error { - p.Record(testutil.Action{Name: "Save"}) - return nil -} - -func (p *storageRecorder) SaveSnap(st raftpb.Snapshot) error { - if !raft.IsEmptySnap(st) { - p.Record(testutil.Action{Name: "SaveSnap"}) - } - return nil -} - -func (p *storageRecorder) Release(st raftpb.Snapshot) error { - if !raft.IsEmptySnap(st) { - p.Record(testutil.Action{Name: "Release"}) - } - return nil -} - -func (p *storageRecorder) Sync() error { - p.Record(testutil.Action{Name: "Sync"}) - return nil -} - -func (p *storageRecorder) Close() error { return nil } -func (p *storageRecorder) MinimalEtcdVersion() *semver.Version { return nil } diff --git a/server/mock/mockstore/doc.go b/server/mock/mockstore/doc.go deleted file mode 100644 index e74cebea2cd..00000000000 --- a/server/mock/mockstore/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mockstore provides mock structures for the etcd store package. -package mockstore diff --git a/server/mock/mockstore/store_recorder.go b/server/mock/mockstore/store_recorder.go deleted file mode 100644 index 64aa46e9c2a..00000000000 --- a/server/mock/mockstore/store_recorder.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mockstore - -import ( - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -// StoreRecorder provides a Store interface with a testutil.Recorder -type StoreRecorder struct { - v2store.Store - testutil.Recorder -} - -// storeRecorder records all the methods it receives. -// storeRecorder DOES NOT work as a actual v2store. -// It always returns invalid empty response and no error. -type storeRecorder struct { - v2store.Store - testutil.Recorder -} - -func NewNop() v2store.Store { return &storeRecorder{Recorder: &testutil.RecorderBuffered{}} } -func NewRecorder() *StoreRecorder { - sr := &storeRecorder{Recorder: &testutil.RecorderBuffered{}} - return &StoreRecorder{Store: sr, Recorder: sr.Recorder} -} -func NewRecorderStream() *StoreRecorder { - sr := &storeRecorder{Recorder: testutil.NewRecorderStream()} - return &StoreRecorder{Store: sr, Recorder: sr.Recorder} -} - -func (s *storeRecorder) Version() int { return 0 } -func (s *storeRecorder) Index() uint64 { return 0 } -func (s *storeRecorder) Get(path string, recursive, sorted bool) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "Get", - Params: []interface{}{path, recursive, sorted}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) Set(path string, dir bool, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "Set", - Params: []interface{}{path, dir, val, expireOpts}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) Update(path, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "Update", - Params: []interface{}{path, val, expireOpts}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) Create(path string, dir bool, val string, uniq bool, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "Create", - Params: []interface{}{path, dir, val, uniq, expireOpts}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) CompareAndSwap(path, prevVal string, prevIdx uint64, val string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "CompareAndSwap", - Params: []interface{}{path, prevVal, prevIdx, val, expireOpts}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) Delete(path string, dir, recursive bool) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "Delete", - Params: []interface{}{path, dir, recursive}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) CompareAndDelete(path, prevVal string, prevIdx uint64) (*v2store.Event, error) { - s.Record(testutil.Action{ - Name: "CompareAndDelete", - Params: []interface{}{path, prevVal, prevIdx}, - }) - return &v2store.Event{}, nil -} -func (s *storeRecorder) Watch(_ string, _, _ bool, _ uint64) (v2store.Watcher, error) { - s.Record(testutil.Action{Name: "Watch"}) - return v2store.NewNopWatcher(), nil -} -func (s *storeRecorder) Save() ([]byte, error) { - s.Record(testutil.Action{Name: "Save"}) - return nil, nil -} -func (s *storeRecorder) Recovery(b []byte) error { - s.Record(testutil.Action{Name: "Recovery"}) - return nil -} - -func (s *storeRecorder) SaveNoCopy() ([]byte, error) { - s.Record(testutil.Action{Name: "SaveNoCopy"}) - return nil, nil -} - -func (s *storeRecorder) Clone() v2store.Store { - s.Record(testutil.Action{Name: "Clone"}) - return s -} - -func (s *storeRecorder) JsonStats() []byte { return nil } -func (s *storeRecorder) DeleteExpiredKeys(cutoff time.Time) { - s.Record(testutil.Action{ - Name: "DeleteExpiredKeys", - Params: []interface{}{cutoff}, - }) -} - -func (s *storeRecorder) HasTTLKeys() bool { - s.Record(testutil.Action{ - Name: "HasTTLKeys", - }) - return true -} - -// errStoreRecorder is a storeRecorder, but returns the given error on -// Get, Watch methods. -type errStoreRecorder struct { - storeRecorder - err error -} - -func NewErrRecorder(err error) *StoreRecorder { - sr := &errStoreRecorder{err: err} - sr.Recorder = &testutil.RecorderBuffered{} - return &StoreRecorder{Store: sr, Recorder: sr.Recorder} -} - -func (s *errStoreRecorder) Get(path string, recursive, sorted bool) (*v2store.Event, error) { - s.storeRecorder.Get(path, recursive, sorted) - return nil, s.err -} -func (s *errStoreRecorder) Watch(path string, recursive, sorted bool, index uint64) (v2store.Watcher, error) { - s.storeRecorder.Watch(path, recursive, sorted, index) - return nil, s.err -} diff --git a/server/mock/mockwait/doc.go b/server/mock/mockwait/doc.go deleted file mode 100644 index ac3c5d27314..00000000000 --- a/server/mock/mockwait/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mockwait provides mock implementations for pkg/wait. -package mockwait diff --git a/server/mock/mockwait/wait_recorder.go b/server/mock/mockwait/wait_recorder.go deleted file mode 100644 index df16cc3b0e5..00000000000 --- a/server/mock/mockwait/wait_recorder.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mockwait - -import ( - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/pkg/v3/wait" -) - -type WaitRecorder struct { - wait.Wait - testutil.Recorder -} - -type waitRecorder struct { - testutil.RecorderBuffered -} - -func NewRecorder() *WaitRecorder { - wr := &waitRecorder{} - return &WaitRecorder{Wait: wr, Recorder: wr} -} -func NewNop() wait.Wait { return NewRecorder() } - -func (w *waitRecorder) Register(id uint64) <-chan interface{} { - w.Record(testutil.Action{Name: "Register"}) - return nil -} -func (w *waitRecorder) Trigger(id uint64, x interface{}) { - w.Record(testutil.Action{Name: "Trigger"}) -} - -func (w *waitRecorder) IsRegistered(id uint64) bool { - panic("waitRecorder.IsRegistered() shouldn't be called") -} diff --git a/server/proxy/grpcproxy/adapter/doc.go b/server/proxy/grpcproxy/adapter/doc.go deleted file mode 100644 index 7170be23304..00000000000 --- a/server/proxy/grpcproxy/adapter/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package adapter provides gRPC adapters between client and server -// gRPC interfaces without needing to go through a gRPC connection. -package adapter diff --git a/server/proxy/grpcproxy/auth.go b/server/proxy/grpcproxy/auth.go deleted file mode 100644 index 753dfa47acf..00000000000 --- a/server/proxy/grpcproxy/auth.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type AuthProxy struct { - authClient pb.AuthClient -} - -func NewAuthProxy(c *clientv3.Client) pb.AuthServer { - return &AuthProxy{authClient: pb.NewAuthClient(c.ActiveConnection())} -} - -func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - return ap.authClient.AuthEnable(ctx, r) -} - -func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - return ap.authClient.AuthDisable(ctx, r) -} - -func (ap *AuthProxy) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) { - return ap.authClient.AuthStatus(ctx, r) -} - -func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - return ap.authClient.Authenticate(ctx, r) -} - -func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - return ap.authClient.RoleAdd(ctx, r) -} - -func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - return ap.authClient.RoleDelete(ctx, r) -} - -func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - return ap.authClient.RoleGet(ctx, r) -} - -func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - return ap.authClient.RoleList(ctx, r) -} - -func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - return ap.authClient.RoleRevokePermission(ctx, r) -} - -func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - return ap.authClient.RoleGrantPermission(ctx, r) -} - -func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - return ap.authClient.UserAdd(ctx, r) -} - -func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - return ap.authClient.UserDelete(ctx, r) -} - -func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - return ap.authClient.UserGet(ctx, r) -} - -func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - return ap.authClient.UserList(ctx, r) -} - -func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - return ap.authClient.UserGrantRole(ctx, r) -} - -func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - return ap.authClient.UserRevokeRole(ctx, r) -} - -func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - return ap.authClient.UserChangePassword(ctx, r) -} diff --git a/server/proxy/grpcproxy/cache/store.go b/server/proxy/grpcproxy/cache/store.go deleted file mode 100644 index 69fb38c070f..00000000000 --- a/server/proxy/grpcproxy/cache/store.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cache exports functionality for efficiently caching and mapping -// `RangeRequest`s to corresponding `RangeResponse`s. -package cache - -import ( - "errors" - "sync" - - "github.com/golang/groupcache/lru" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/pkg/v3/adt" -) - -var ( - DefaultMaxEntries = 2048 - ErrCompacted = rpctypes.ErrGRPCCompacted -) - -type Cache interface { - Add(req *pb.RangeRequest, resp *pb.RangeResponse) - Get(req *pb.RangeRequest) (*pb.RangeResponse, error) - Compact(revision int64) - Invalidate(key []byte, endkey []byte) - Size() int - Close() -} - -// keyFunc returns the key of a request, which is used to look up its caching response in the cache. -func keyFunc(req *pb.RangeRequest) string { - // TODO: use marshalTo to reduce allocation - b, err := req.Marshal() - if err != nil { - panic(err) - } - return string(b) -} - -func NewCache(maxCacheEntries int) Cache { - return &cache{ - lru: lru.New(maxCacheEntries), - cachedRanges: adt.NewIntervalTree(), - compactedRev: -1, - } -} - -func (c *cache) Close() {} - -// cache implements Cache -type cache struct { - mu sync.RWMutex - lru *lru.Cache - - // a reverse index for cache invalidation - cachedRanges adt.IntervalTree - - compactedRev int64 -} - -// Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache. -func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { - key := keyFunc(req) - - c.mu.Lock() - defer c.mu.Unlock() - - if req.Revision > c.compactedRev { - c.lru.Add(key, resp) - } - // we do not need to invalidate a request with a revision specified. - // so we do not need to add it into the reverse index. - if req.Revision != 0 { - return - } - - var ( - iv *adt.IntervalValue - ivl adt.Interval - ) - if len(req.RangeEnd) != 0 { - ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd)) - } else { - ivl = adt.NewStringAffinePoint(string(req.Key)) - } - - iv = c.cachedRanges.Find(ivl) - - if iv == nil { - val := map[string]struct{}{key: {}} - c.cachedRanges.Insert(ivl, val) - } else { - val := iv.Val.(map[string]struct{}) - val[key] = struct{}{} - iv.Val = val - } -} - -// Get looks up the caching response for a given request. -// Get is also responsible for lazy eviction when accessing compacted entries. -func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { - key := keyFunc(req) - - c.mu.Lock() - defer c.mu.Unlock() - - if req.Revision > 0 && req.Revision < c.compactedRev { - c.lru.Remove(key) - return nil, ErrCompacted - } - - if resp, ok := c.lru.Get(key); ok { - return resp.(*pb.RangeResponse), nil - } - return nil, errors.New("not exist") -} - -// Invalidate invalidates the cache entries that intersecting with the given range from key to endkey. -func (c *cache) Invalidate(key, endkey []byte) { - c.mu.Lock() - defer c.mu.Unlock() - - var ( - ivs []*adt.IntervalValue - ivl adt.Interval - ) - if len(endkey) == 0 { - ivl = adt.NewStringAffinePoint(string(key)) - } else { - ivl = adt.NewStringAffineInterval(string(key), string(endkey)) - } - - ivs = c.cachedRanges.Stab(ivl) - for _, iv := range ivs { - keys := iv.Val.(map[string]struct{}) - for key := range keys { - c.lru.Remove(key) - } - } - // delete after removing all keys since it is destructive to 'ivs' - c.cachedRanges.Delete(ivl) -} - -// Compact invalidate all caching response before the given rev. -// Replace with the invalidation is lazy. The actual removal happens when the entries is accessed. -func (c *cache) Compact(revision int64) { - c.mu.Lock() - defer c.mu.Unlock() - - if revision > c.compactedRev { - c.compactedRev = revision - } -} - -func (c *cache) Size() int { - c.mu.RLock() - defer c.mu.RUnlock() - return c.lru.Len() -} diff --git a/server/proxy/grpcproxy/cluster.go b/server/proxy/grpcproxy/cluster.go deleted file mode 100644 index 2467277129e..00000000000 --- a/server/proxy/grpcproxy/cluster.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "errors" - "fmt" - "os" - "sync" - - "golang.org/x/time/rate" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/naming/endpoints" - - "go.uber.org/zap" -) - -// allow maximum 1 retry per second -const resolveRetryRate = 1 - -type clusterProxy struct { - lg *zap.Logger - clus pb.ClusterClient - ctx context.Context - - // advertise client URL - advaddr string - prefix string - - em endpoints.Manager - - umu sync.RWMutex - umap map[string]endpoints.Endpoint -} - -// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints. -// The returned channel is closed when there is grpc-proxy endpoint registered -// and the client's context is canceled so the 'register' loop returns. -// TODO: Expand the API to report creation errors -func NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) { - if lg == nil { - lg = zap.NewNop() - } - - var em endpoints.Manager - if advaddr != "" && prefix != "" { - var err error - if em, err = endpoints.NewManager(c, prefix); err != nil { - lg.Error("failed to provision endpointsManager", zap.String("prefix", prefix), zap.Error(err)) - return nil, nil - } - } - - cp := &clusterProxy{ - lg: lg, - clus: pb.NewClusterClient(c.ActiveConnection()), - ctx: c.Ctx(), - - advaddr: advaddr, - prefix: prefix, - umap: make(map[string]endpoints.Endpoint), - em: em, - } - - donec := make(chan struct{}) - if em != nil { - go func() { - defer close(donec) - cp.establishEndpointWatch(prefix) - }() - return cp, donec - } - - close(donec) - return cp, donec -} - -func (cp *clusterProxy) establishEndpointWatch(prefix string) { - rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate) - for rm.Wait(cp.ctx) == nil { - wc, err := cp.em.NewWatchChannel(cp.ctx) - if err != nil { - cp.lg.Warn("failed to establish endpoint watch", zap.String("prefix", prefix), zap.Error(err)) - continue - } - cp.monitor(wc) - } -} - -func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) { - for { - select { - case <-cp.ctx.Done(): - cp.lg.Info("watching endpoints interrupted", zap.Error(cp.ctx.Err())) - return - case updates := <-wa: - cp.umu.Lock() - for _, up := range updates { - switch up.Op { - case endpoints.Add: - cp.umap[up.Endpoint.Addr] = up.Endpoint - case endpoints.Delete: - delete(cp.umap, up.Endpoint.Addr) - } - } - cp.umu.Unlock() - } - } -} - -func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - return cp.clus.MemberAdd(ctx, r) -} - -func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - return cp.clus.MemberRemove(ctx, r) -} - -func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - return cp.clus.MemberUpdate(ctx, r) -} - -func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { - cp.umu.RLock() - defer cp.umu.RUnlock() - mbs := make([]*pb.Member, 0, len(cp.umap)) - for addr, upt := range cp.umap { - m, err := decodeMeta(fmt.Sprint(upt.Metadata)) - if err != nil { - return nil, err - } - mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}}) - } - return mbs, nil -} - -// MemberList wraps member list API with following rules: -// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver -// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr' -// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register' -// - If 'advaddr' is empty, forward to member list API -func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - if cp.advaddr != "" { - if cp.prefix != "" { - mbs, err := cp.membersFromUpdates() - if err != nil { - return nil, err - } - if len(mbs) > 0 { - return &pb.MemberListResponse{Members: mbs}, nil - } - } - // prefix is empty or no grpc-proxy members haven't been registered - hostname, _ := os.Hostname() - return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil - } - return cp.clus.MemberList(ctx, r) -} - -func (cp *clusterProxy) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { - // TODO: implement - return nil, errors.New("not implemented") -} diff --git a/server/proxy/grpcproxy/election.go b/server/proxy/grpcproxy/election.go deleted file mode 100644 index 9ea8d961576..00000000000 --- a/server/proxy/grpcproxy/election.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The etcd Lockors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" -) - -type electionProxy struct { - electionClient v3electionpb.ElectionClient -} - -func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { - return &electionProxy{electionClient: v3electionpb.NewElectionClient(client.ActiveConnection())} -} - -func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { - return ep.electionClient.Campaign(ctx, req) -} - -func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { - return ep.electionClient.Proclaim(ctx, req) -} - -func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { - return ep.electionClient.Leader(ctx, req) -} - -func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { - ctx, cancel := context.WithCancel(s.Context()) - defer cancel() - sc, err := ep.electionClient.Observe(ctx, req) - if err != nil { - return err - } - for { - rr, err := sc.Recv() - if err != nil { - return err - } - if err = s.Send(rr); err != nil { - return err - } - } -} - -func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { - return ep.electionClient.Resign(ctx, req) -} diff --git a/server/proxy/grpcproxy/health.go b/server/proxy/grpcproxy/health.go deleted file mode 100644 index ec9781bfe27..00000000000 --- a/server/proxy/grpcproxy/health.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "fmt" - "net/http" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" -) - -// HandleHealth registers health handler on '/health'. -func HandleHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { - if lg == nil { - lg = zap.NewNop() - } - mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet, serializable bool) etcdhttp.Health { return checkHealth(c) })) -} - -// HandleProxyHealth registers health handler on '/proxy/health'. -func HandleProxyHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { - if lg == nil { - lg = zap.NewNop() - } - mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet, serializable bool) etcdhttp.Health { return checkProxyHealth(c) })) -} - -func checkHealth(c *clientv3.Client) etcdhttp.Health { - h := etcdhttp.Health{Health: "false"} - ctx, cancel := context.WithTimeout(c.Ctx(), time.Second) - _, err := c.Get(ctx, "a") - cancel() - if err == nil || err == rpctypes.ErrPermissionDenied { - h.Health = "true" - } else { - h.Reason = fmt.Sprintf("GET ERROR:%s", err) - } - return h -} - -func checkProxyHealth(c *clientv3.Client) etcdhttp.Health { - if c == nil { - return etcdhttp.Health{Health: "false", Reason: "no connection to proxy"} - } - h := checkHealth(c) - if h.Health != "true" { - return h - } - ctx, cancel := context.WithTimeout(c.Ctx(), time.Second*3) - ch := c.Watch(ctx, "a", clientv3.WithCreatedNotify()) - select { - case <-ch: - case <-ctx.Done(): - h.Health = "false" - h.Reason = "WATCH TIMEOUT" - } - cancel() - return h -} diff --git a/server/proxy/grpcproxy/kv.go b/server/proxy/grpcproxy/kv.go deleted file mode 100644 index 36ff0dd615c..00000000000 --- a/server/proxy/grpcproxy/kv.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy/cache" -) - -type kvProxy struct { - kv clientv3.KV - cache cache.Cache -} - -func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { - kv := &kvProxy{ - kv: c.KV, - cache: cache.NewCache(cache.DefaultMaxEntries), - } - donec := make(chan struct{}) - close(donec) - return kv, donec -} - -func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - resp, err := p.cache.Get(r) - switch err { - case nil: - cacheHits.Inc() - return resp, nil - case cache.ErrCompacted: - cacheHits.Inc() - return nil, err - } - - cachedMisses.Inc() - } - - resp, err := p.kv.Do(ctx, RangeRequestToOp(r)) - if err != nil { - return nil, err - } - - // cache linearizable as serializable - req := *r - req.Serializable = true - gresp := (*pb.RangeResponse)(resp.Get()) - p.cache.Add(&req, gresp) - cacheKeys.Set(float64(p.cache.Size())) - - return gresp, nil -} - -func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - p.cache.Invalidate(r.Key, nil) - cacheKeys.Set(float64(p.cache.Size())) - - resp, err := p.kv.Do(ctx, PutRequestToOp(r)) - return (*pb.PutResponse)(resp.Put()), err -} - -func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - p.cache.Invalidate(r.Key, r.RangeEnd) - cacheKeys.Set(float64(p.cache.Size())) - - resp, err := p.kv.Do(ctx, DelRequestToOp(r)) - return (*pb.DeleteRangeResponse)(resp.Del()), err -} - -func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) { - for i := range resps { - switch tv := resps[i].Response.(type) { - case *pb.ResponseOp_ResponsePut: - p.cache.Invalidate(reqs[i].GetRequestPut().Key, nil) - case *pb.ResponseOp_ResponseDeleteRange: - rdr := reqs[i].GetRequestDeleteRange() - p.cache.Invalidate(rdr.Key, rdr.RangeEnd) - case *pb.ResponseOp_ResponseRange: - req := *(reqs[i].GetRequestRange()) - req.Serializable = true - p.cache.Add(&req, tv.ResponseRange) - } - } -} - -func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - op := TxnRequestToOp(r) - opResp, err := p.kv.Do(ctx, op) - if err != nil { - return nil, err - } - resp := opResp.Txn() - - // txn may claim an outdated key is updated; be safe and invalidate - for _, cmp := range r.Compare { - p.cache.Invalidate(cmp.Key, cmp.RangeEnd) - } - // update any fetched keys - if resp.Succeeded { - p.txnToCache(r.Success, resp.Responses) - } else { - p.txnToCache(r.Failure, resp.Responses) - } - - cacheKeys.Set(float64(p.cache.Size())) - - return (*pb.TxnResponse)(resp), nil -} - -func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - var opts []clientv3.CompactOption - if r.Physical { - opts = append(opts, clientv3.WithCompactPhysical()) - } - - resp, err := p.kv.Compact(ctx, r.Revision, opts...) - if err == nil { - p.cache.Compact(r.Revision) - } - - cacheKeys.Set(float64(p.cache.Size())) - - return (*pb.CompactionResponse)(resp), err -} - -func requestOpToOp(union *pb.RequestOp) clientv3.Op { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { - return RangeRequestToOp(tv.RequestRange) - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { - return PutRequestToOp(tv.RequestPut) - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { - return DelRequestToOp(tv.RequestDeleteRange) - } - case *pb.RequestOp_RequestTxn: - if tv.RequestTxn != nil { - return TxnRequestToOp(tv.RequestTxn) - } - } - panic("unknown request") -} - -func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { - var opts []clientv3.OpOption - if len(r.RangeEnd) != 0 { - opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) - } - opts = append(opts, clientv3.WithRev(r.Revision)) - opts = append(opts, clientv3.WithLimit(r.Limit)) - opts = append(opts, clientv3.WithSort( - clientv3.SortTarget(r.SortTarget), - clientv3.SortOrder(r.SortOrder)), - ) - opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision)) - opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) - opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) - opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) - if r.CountOnly { - opts = append(opts, clientv3.WithCountOnly()) - } - if r.KeysOnly { - opts = append(opts, clientv3.WithKeysOnly()) - } - if r.Serializable { - opts = append(opts, clientv3.WithSerializable()) - } - - return clientv3.OpGet(string(r.Key), opts...) -} - -func PutRequestToOp(r *pb.PutRequest) clientv3.Op { - var opts []clientv3.OpOption - opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) - if r.IgnoreValue { - opts = append(opts, clientv3.WithIgnoreValue()) - } - if r.IgnoreLease { - opts = append(opts, clientv3.WithIgnoreLease()) - } - if r.PrevKv { - opts = append(opts, clientv3.WithPrevKV()) - } - return clientv3.OpPut(string(r.Key), string(r.Value), opts...) -} - -func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op { - var opts []clientv3.OpOption - if len(r.RangeEnd) != 0 { - opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) - } - if r.PrevKv { - opts = append(opts, clientv3.WithPrevKV()) - } - return clientv3.OpDelete(string(r.Key), opts...) -} - -func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op { - cmps := make([]clientv3.Cmp, len(r.Compare)) - thenops := make([]clientv3.Op, len(r.Success)) - elseops := make([]clientv3.Op, len(r.Failure)) - for i := range r.Compare { - cmps[i] = (clientv3.Cmp)(*r.Compare[i]) - } - for i := range r.Success { - thenops[i] = requestOpToOp(r.Success[i]) - } - for i := range r.Failure { - elseops[i] = requestOpToOp(r.Failure[i]) - } - return clientv3.OpTxn(cmps, thenops, elseops) -} diff --git a/server/proxy/grpcproxy/lease.go b/server/proxy/grpcproxy/lease.go deleted file mode 100644 index d206dcab49b..00000000000 --- a/server/proxy/grpcproxy/lease.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "io" - "sync" - "sync/atomic" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -type leaseProxy struct { - // leaseClient handles req from LeaseGrant() that requires a lease ID. - leaseClient pb.LeaseClient - - lessor clientv3.Lease - - ctx context.Context - - leader *leader - - // mu protects adding outstanding leaseProxyStream through wg. - mu sync.RWMutex - - // wg waits until all outstanding leaseProxyStream quit. - wg sync.WaitGroup -} - -func NewLeaseProxy(ctx context.Context, c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { - cctx, cancel := context.WithCancel(ctx) - lp := &leaseProxy{ - leaseClient: pb.NewLeaseClient(c.ActiveConnection()), - lessor: c.Lease, - ctx: cctx, - leader: newLeader(cctx, c.Watcher), - } - ch := make(chan struct{}) - go func() { - defer close(ch) - <-lp.leader.stopNotify() - lp.mu.Lock() - select { - case <-lp.ctx.Done(): - case <-lp.leader.disconnectNotify(): - cancel() - } - <-lp.ctx.Done() - lp.mu.Unlock() - lp.wg.Wait() - }() - return lp, ch -} - -func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - lp.leader.gotLeader() - return rp, nil -} - -func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID)) - if err != nil { - return nil, err - } - lp.leader.gotLeader() - return (*pb.LeaseRevokeResponse)(r), nil -} - -func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - var ( - r *clientv3.LeaseTimeToLiveResponse - err error - ) - if rr.Keys { - r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys()) - } else { - r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID)) - } - if err != nil { - return nil, err - } - rp := &pb.LeaseTimeToLiveResponse{ - Header: r.ResponseHeader, - ID: int64(r.ID), - TTL: r.TTL, - GrantedTTL: r.GrantedTTL, - Keys: r.Keys, - } - return rp, err -} - -func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { - r, err := lp.lessor.Leases(ctx) - if err != nil { - return nil, err - } - leases := make([]*pb.LeaseStatus, len(r.Leases)) - for i := range r.Leases { - leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)} - } - rp := &pb.LeaseLeasesResponse{ - Header: r.ResponseHeader, - Leases: leases, - } - return rp, err -} - -func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - lp.mu.Lock() - select { - case <-lp.ctx.Done(): - lp.mu.Unlock() - return lp.ctx.Err() - default: - lp.wg.Add(1) - } - lp.mu.Unlock() - - ctx, cancel := context.WithCancel(stream.Context()) - lps := leaseProxyStream{ - stream: stream, - lessor: lp.lessor, - keepAliveLeases: make(map[int64]*atomicCounter), - respc: make(chan *pb.LeaseKeepAliveResponse), - ctx: ctx, - cancel: cancel, - } - - errc := make(chan error, 2) - - var lostLeaderC <-chan struct{} - if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { - v := md[rpctypes.MetadataRequireLeaderKey] - if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - lostLeaderC = lp.leader.lostNotify() - // if leader is known to be lost at creation time, avoid - // letting events through at all - select { - case <-lostLeaderC: - lp.wg.Done() - return rpctypes.ErrNoLeader - default: - } - } - } - stopc := make(chan struct{}, 3) - go func() { - defer func() { stopc <- struct{}{} }() - if err := lps.recvLoop(); err != nil { - errc <- err - } - }() - - go func() { - defer func() { stopc <- struct{}{} }() - if err := lps.sendLoop(); err != nil { - errc <- err - } - }() - - // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated. - go func() { - defer func() { stopc <- struct{}{} }() - select { - case <-lostLeaderC: - case <-ctx.Done(): - case <-lp.ctx.Done(): - } - }() - - var err error - select { - case <-stopc: - stopc <- struct{}{} - case err = <-errc: - } - cancel() - - // recv/send may only shutdown after function exits; - // this goroutine notifies lease proxy that the stream is through - go func() { - <-stopc - <-stopc - <-stopc - lps.close() - close(errc) - lp.wg.Done() - }() - - select { - case <-lostLeaderC: - return rpctypes.ErrNoLeader - case <-lp.leader.disconnectNotify(): - return status.Error(codes.Canceled, "the client connection is closing") - default: - if err != nil { - return err - } - return ctx.Err() - } -} - -type leaseProxyStream struct { - stream pb.Lease_LeaseKeepAliveServer - - lessor clientv3.Lease - // wg tracks keepAliveLoop goroutines - wg sync.WaitGroup - // mu protects keepAliveLeases - mu sync.RWMutex - // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease. - keepAliveLeases map[int64]*atomicCounter - // respc receives lease keepalive responses from etcd backend - respc chan *pb.LeaseKeepAliveResponse - - ctx context.Context - cancel context.CancelFunc -} - -func (lps *leaseProxyStream) recvLoop() error { - for { - rr, err := lps.stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - lps.mu.Lock() - neededResps, ok := lps.keepAliveLeases[rr.ID] - if !ok { - neededResps = &atomicCounter{} - lps.keepAliveLeases[rr.ID] = neededResps - lps.wg.Add(1) - go func() { - defer lps.wg.Done() - if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil { - lps.cancel() - } - }() - } - neededResps.add(1) - lps.mu.Unlock() - } -} - -func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error { - cctx, ccancel := context.WithCancel(lps.ctx) - defer ccancel() - respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID)) - if err != nil { - return err - } - // ticker expires when loop hasn't received keepalive within TTL - var ticker <-chan time.Time - for { - select { - case <-ticker: - lps.mu.Lock() - // if there are outstanding keepAlive reqs at the moment of ticker firing, - // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs. - if neededResps.get() > 0 { - lps.mu.Unlock() - ticker = nil - continue - } - delete(lps.keepAliveLeases, leaseID) - lps.mu.Unlock() - return nil - case rp, ok := <-respc: - if !ok { - lps.mu.Lock() - delete(lps.keepAliveLeases, leaseID) - lps.mu.Unlock() - if neededResps.get() == 0 { - return nil - } - ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID)) - if err != nil { - return err - } - r := &pb.LeaseKeepAliveResponse{ - Header: ttlResp.ResponseHeader, - ID: int64(ttlResp.ID), - TTL: ttlResp.TTL, - } - for neededResps.get() > 0 { - select { - case lps.respc <- r: - neededResps.add(-1) - case <-lps.ctx.Done(): - return nil - } - } - return nil - } - if neededResps.get() == 0 { - continue - } - ticker = time.After(time.Duration(rp.TTL) * time.Second) - r := &pb.LeaseKeepAliveResponse{ - Header: rp.ResponseHeader, - ID: int64(rp.ID), - TTL: rp.TTL, - } - lps.replyToClient(r, neededResps) - } - } -} - -func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) { - timer := time.After(500 * time.Millisecond) - for neededResps.get() > 0 { - select { - case lps.respc <- r: - neededResps.add(-1) - case <-timer: - return - case <-lps.ctx.Done(): - return - } - } -} - -func (lps *leaseProxyStream) sendLoop() error { - for { - select { - case lrp, ok := <-lps.respc: - if !ok { - return nil - } - if err := lps.stream.Send(lrp); err != nil { - return err - } - case <-lps.ctx.Done(): - return lps.ctx.Err() - } - } -} - -func (lps *leaseProxyStream) close() { - lps.cancel() - lps.wg.Wait() - // only close respc channel if all the keepAliveLoop() goroutines have finished - // this ensures those goroutines don't send resp to a closed resp channel - close(lps.respc) -} - -type atomicCounter struct { - counter int64 -} - -func (ac *atomicCounter) add(delta int64) { - atomic.AddInt64(&ac.counter, delta) -} - -func (ac *atomicCounter) get() int64 { - return atomic.LoadInt64(&ac.counter) -} diff --git a/server/proxy/grpcproxy/lock.go b/server/proxy/grpcproxy/lock.go deleted file mode 100644 index 9458080db7b..00000000000 --- a/server/proxy/grpcproxy/lock.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2017 The etcd Lockors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" -) - -type lockProxy struct { - lockClient v3lockpb.LockClient -} - -func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { - return &lockProxy{lockClient: v3lockpb.NewLockClient(client.ActiveConnection())} -} - -func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { - return lp.lockClient.Lock(ctx, req) -} - -func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { - return lp.lockClient.Unlock(ctx, req) -} diff --git a/server/proxy/grpcproxy/maintenance.go b/server/proxy/grpcproxy/maintenance.go deleted file mode 100644 index 50ecf67ffa0..00000000000 --- a/server/proxy/grpcproxy/maintenance.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "io" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" -) - -type maintenanceProxy struct { - maintenanceClient pb.MaintenanceClient -} - -func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer { - return &maintenanceProxy{ - maintenanceClient: pb.NewMaintenanceClient(c.ActiveConnection()), - } -} - -func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - return mp.maintenanceClient.Defragment(ctx, dr) -} - -func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error { - ctx, cancel := context.WithCancel(stream.Context()) - defer cancel() - - ctx = withClientAuthToken(ctx, stream.Context()) - - sc, err := mp.maintenanceClient.Snapshot(ctx, sr) - if err != nil { - return err - } - - for { - rr, err := sc.Recv() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - err = stream.Send(rr) - if err != nil { - return err - } - } -} - -func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - return mp.maintenanceClient.Hash(ctx, r) -} - -func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { - return mp.maintenanceClient.HashKV(ctx, r) -} - -func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - return mp.maintenanceClient.Alarm(ctx, r) -} - -func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) { - return mp.maintenanceClient.Status(ctx, r) -} - -func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { - return mp.maintenanceClient.MoveLeader(ctx, r) -} - -func (mp *maintenanceProxy) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - return mp.maintenanceClient.Downgrade(ctx, r) -} diff --git a/server/proxy/grpcproxy/metrics.go b/server/proxy/grpcproxy/metrics.go deleted file mode 100644 index cc94b7f6e9f..00000000000 --- a/server/proxy/grpcproxy/metrics.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "fmt" - "io" - "math/rand" - "net/http" - "strings" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" -) - -var ( - watchersCoalescing = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "watchers_coalescing_total", - Help: "Total number of current watchers coalescing", - }) - eventsCoalescing = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "events_coalescing_total", - Help: "Total number of events coalescing", - }) - cacheKeys = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_keys_total", - Help: "Total number of keys/ranges cached", - }) - cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_hits_total", - Help: "Total number of cache hits", - }) - cachedMisses = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_misses_total", - Help: "Total number of cache misses", - }) -) - -func init() { - prometheus.MustRegister(watchersCoalescing) - prometheus.MustRegister(eventsCoalescing) - prometheus.MustRegister(cacheKeys) - prometheus.MustRegister(cacheHits) - prometheus.MustRegister(cachedMisses) -} - -// HandleMetrics performs a GET request against etcd endpoint and returns '/metrics'. -func HandleMetrics(mux *http.ServeMux, c *http.Client, eps []string) { - // random shuffle endpoints - r := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - if len(eps) > 1 { - eps = shuffleEndpoints(r, eps) - } - - pathMetrics := etcdhttp.PathMetrics - mux.HandleFunc(pathMetrics, func(w http.ResponseWriter, r *http.Request) { - target := fmt.Sprintf("%s%s", eps[0], pathMetrics) - if !strings.HasPrefix(target, "http") { - scheme := "http" - if r.TLS != nil { - scheme = "https" - } - target = fmt.Sprintf("%s://%s", scheme, target) - } - - resp, err := c.Get(target) - if err != nil { - http.Error(w, "Internal server error", http.StatusInternalServerError) - return - } - defer resp.Body.Close() - w.Header().Set("Content-Type", "text/plain; version=0.0.4") - body, _ := io.ReadAll(resp.Body) - fmt.Fprintf(w, "%s", body) - }) -} - -// HandleProxyMetrics registers metrics handler on '/proxy/metrics'. -func HandleProxyMetrics(mux *http.ServeMux) { - mux.Handle(etcdhttp.PathProxyMetrics, promhttp.Handler()) -} - -func shuffleEndpoints(r *rand.Rand, eps []string) []string { - // copied from Go 1.9<= rand.Rand.Perm - n := len(eps) - p := make([]int, n) - for i := 0; i < n; i++ { - j := r.Intn(i + 1) - p[i] = p[j] - p[j] = i - } - neps := make([]string, n) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} diff --git a/server/proxy/grpcproxy/util.go b/server/proxy/grpcproxy/util.go deleted file mode 100644 index 856ac5769e1..00000000000 --- a/server/proxy/grpcproxy/util.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -func getAuthTokenFromClient(ctx context.Context) string { - md, ok := metadata.FromIncomingContext(ctx) - if ok { - ts, ok := md[rpctypes.TokenFieldNameGRPC] - if ok { - return ts[0] - } - } - return "" -} - -func withClientAuthToken(ctx, ctxWithToken context.Context) context.Context { - token := getAuthTokenFromClient(ctxWithToken) - if token != "" { - ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token) - } - return ctx -} - -type proxyTokenCredential struct { - token string -} - -func (cred *proxyTokenCredential) RequireTransportSecurity() bool { - return false -} - -func (cred *proxyTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { - return map[string]string{ - rpctypes.TokenFieldNameGRPC: cred.token, - }, nil -} - -func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - token := getAuthTokenFromClient(ctx) - if token != "" { - tokenCred := &proxyTokenCredential{token} - opts = append(opts, grpc.PerRPCCredentials(tokenCred)) - } - return invoker(ctx, method, req, reply, cc, opts...) -} - -func AuthStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - tokenif := ctx.Value(rpctypes.TokenFieldNameGRPC) - if tokenif != nil { - tokenCred := &proxyTokenCredential{tokenif.(string)} - opts = append(opts, grpc.PerRPCCredentials(tokenCred)) - } - return streamer(ctx, desc, cc, method, opts...) -} diff --git a/server/proxy/grpcproxy/watch.go b/server/proxy/grpcproxy/watch.go deleted file mode 100644 index 90eb21d4a40..00000000000 --- a/server/proxy/grpcproxy/watch.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "sync" - - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" -) - -type watchProxy struct { - cw clientv3.Watcher - ctx context.Context - - leader *leader - - ranges *watchRanges - - // mu protects adding outstanding watch servers through wg. - mu sync.Mutex - - // wg waits until all outstanding watch servers quit. - wg sync.WaitGroup - - // kv is used for permission checking - kv clientv3.KV - lg *zap.Logger -} - -func NewWatchProxy(ctx context.Context, lg *zap.Logger, c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { - cctx, cancel := context.WithCancel(ctx) - wp := &watchProxy{ - cw: c.Watcher, - ctx: cctx, - leader: newLeader(cctx, c.Watcher), - - kv: c.KV, // for permission checking - lg: lg, - } - wp.ranges = newWatchRanges(wp) - ch := make(chan struct{}) - go func() { - defer close(ch) - <-wp.leader.stopNotify() - wp.mu.Lock() - select { - case <-wp.ctx.Done(): - case <-wp.leader.disconnectNotify(): - cancel() - } - <-wp.ctx.Done() - wp.mu.Unlock() - wp.wg.Wait() - wp.ranges.stop() - }() - return wp, ch -} - -func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { - wp.mu.Lock() - select { - case <-wp.ctx.Done(): - wp.mu.Unlock() - select { - case <-wp.leader.disconnectNotify(): - return status.Error(codes.Canceled, "the client connection is closing") - default: - return wp.ctx.Err() - } - default: - wp.wg.Add(1) - } - wp.mu.Unlock() - - ctx, cancel := context.WithCancel(stream.Context()) - wps := &watchProxyStream{ - ranges: wp.ranges, - watchers: make(map[int64]*watcher), - stream: stream, - watchCh: make(chan *pb.WatchResponse, 1024), - ctx: ctx, - cancel: cancel, - kv: wp.kv, - lg: wp.lg, - } - - var lostLeaderC <-chan struct{} - if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { - v := md[rpctypes.MetadataRequireLeaderKey] - if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - lostLeaderC = wp.leader.lostNotify() - // if leader is known to be lost at creation time, avoid - // letting events through at all - select { - case <-lostLeaderC: - wp.wg.Done() - return rpctypes.ErrNoLeader - default: - } - } - } - - // post to stopc => terminate server stream; can't use a waitgroup - // since all goroutines will only terminate after Watch() exits. - stopc := make(chan struct{}, 3) - go func() { - defer func() { stopc <- struct{}{} }() - wps.recvLoop() - }() - go func() { - defer func() { stopc <- struct{}{} }() - wps.sendLoop() - }() - // tear down watch if leader goes down or entire watch proxy is terminated - go func() { - defer func() { stopc <- struct{}{} }() - select { - case <-lostLeaderC: - case <-ctx.Done(): - case <-wp.ctx.Done(): - } - }() - - <-stopc - cancel() - - // recv/send may only shutdown after function exits; - // goroutine notifies proxy that stream is through - go func() { - <-stopc - <-stopc - wps.close() - wp.wg.Done() - }() - - select { - case <-lostLeaderC: - return rpctypes.ErrNoLeader - case <-wp.leader.disconnectNotify(): - return status.Error(codes.Canceled, "the client connection is closing") - default: - return wps.ctx.Err() - } -} - -// watchProxyStream forwards etcd watch events to a proxied client stream. -type watchProxyStream struct { - ranges *watchRanges - - // mu protects watchers and nextWatcherID - mu sync.Mutex - // watchers receive events from watch broadcast. - watchers map[int64]*watcher - // nextWatcherID is the id to assign the next watcher on this stream. - nextWatcherID int64 - - stream pb.Watch_WatchServer - - // watchCh receives watch responses from the watchers. - watchCh chan *pb.WatchResponse - - ctx context.Context - cancel context.CancelFunc - - // kv is used for permission checking - kv clientv3.KV - lg *zap.Logger -} - -func (wps *watchProxyStream) close() { - var wg sync.WaitGroup - wps.cancel() - wps.mu.Lock() - wg.Add(len(wps.watchers)) - for _, wpsw := range wps.watchers { - go func(w *watcher) { - wps.ranges.delete(w) - wg.Done() - }(wpsw) - } - wps.watchers = nil - wps.mu.Unlock() - - wg.Wait() - - close(wps.watchCh) -} - -func (wps *watchProxyStream) checkPermissionForWatch(key, rangeEnd []byte) error { - if len(key) == 0 { - // If the length of the key is 0, we need to obtain full range. - // look at clientv3.WithPrefix() - key = []byte{0} - rangeEnd = []byte{0} - } - req := &pb.RangeRequest{ - Serializable: true, - Key: key, - RangeEnd: rangeEnd, - CountOnly: true, - Limit: 1, - } - _, err := wps.kv.Do(wps.ctx, RangeRequestToOp(req)) - return err -} - -func (wps *watchProxyStream) recvLoop() error { - for { - req, err := wps.stream.Recv() - if err != nil { - return err - } - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - cr := uv.CreateRequest - - if err := wps.checkPermissionForWatch(cr.Key, cr.RangeEnd); err != nil { - wps.watchCh <- &pb.WatchResponse{ - Header: &pb.ResponseHeader{}, - WatchId: clientv3.InvalidWatchID, - Created: true, - Canceled: true, - CancelReason: err.Error(), - } - continue - } - - wps.mu.Lock() - w := &watcher{ - wr: watchRange{string(cr.Key), string(cr.RangeEnd)}, - id: wps.nextWatcherID, - wps: wps, - - nextrev: cr.StartRevision, - progress: cr.ProgressNotify, - prevKV: cr.PrevKv, - filters: v3rpc.FiltersFromRequest(cr), - } - if !w.wr.valid() { - w.post(&pb.WatchResponse{WatchId: clientv3.InvalidWatchID, Created: true, Canceled: true}) - wps.mu.Unlock() - continue - } - wps.nextWatcherID++ - w.nextrev = cr.StartRevision - wps.watchers[w.id] = w - wps.ranges.add(w) - wps.mu.Unlock() - wps.lg.Debug("create watcher", zap.String("key", w.wr.key), zap.String("end", w.wr.end), zap.Int64("watcherId", wps.nextWatcherID)) - case *pb.WatchRequest_CancelRequest: - wps.delete(uv.CancelRequest.WatchId) - wps.lg.Debug("cancel watcher", zap.Int64("watcherId", uv.CancelRequest.WatchId)) - default: - // Panic or Fatalf would allow to network clients to crash the serve remotely. - wps.lg.Error("not supported request type by gRPC proxy", zap.Stringer("request", req)) - } - } -} - -func (wps *watchProxyStream) sendLoop() { - for { - select { - case wresp, ok := <-wps.watchCh: - if !ok { - return - } - if err := wps.stream.Send(wresp); err != nil { - return - } - case <-wps.ctx.Done(): - return - } - } -} - -func (wps *watchProxyStream) delete(id int64) { - wps.mu.Lock() - defer wps.mu.Unlock() - - w, ok := wps.watchers[id] - if !ok { - return - } - wps.ranges.delete(w) - delete(wps.watchers, id) - resp := &pb.WatchResponse{ - Header: &w.lastHeader, - WatchId: id, - Canceled: true, - } - wps.watchCh <- resp -} diff --git a/server/proxy/grpcproxy/watcher.go b/server/proxy/grpcproxy/watcher.go deleted file mode 100644 index 45d3a5352f5..00000000000 --- a/server/proxy/grpcproxy/watcher.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/storage/mvcc" -) - -type watchRange struct { - key, end string -} - -func (wr *watchRange) valid() bool { - return len(wr.end) == 0 || wr.end > wr.key || (wr.end[0] == 0 && len(wr.end) == 1) -} - -type watcher struct { - // user configuration - - wr watchRange - filters []mvcc.FilterFunc - progress bool - prevKV bool - - // id is the id returned to the client on its watch stream. - id int64 - // nextrev is the minimum expected next event revision. - nextrev int64 - // lastHeader has the last header sent over the stream. - lastHeader pb.ResponseHeader - - // wps is the parent. - wps *watchProxyStream -} - -// send filters out repeated events by discarding revisions older -// than the last one sent over the watch channel. -func (w *watcher) send(wr clientv3.WatchResponse) { - if wr.IsProgressNotify() && !w.progress { - return - } - if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 { - return - } - if w.nextrev == 0 { - // current watch; expect updates following this revision - w.nextrev = wr.Header.Revision + 1 - } - - events := make([]*mvccpb.Event, 0, len(wr.Events)) - - var lastRev int64 - for i := range wr.Events { - ev := (*mvccpb.Event)(wr.Events[i]) - if ev.Kv.ModRevision < w.nextrev { - continue - } else { - // We cannot update w.rev here. - // txn can have multiple events with the same rev. - // If w.nextrev updates here, it would skip events in the same txn. - lastRev = ev.Kv.ModRevision - } - - filtered := false - for _, filter := range w.filters { - if filter(*ev) { - filtered = true - break - } - } - if filtered { - continue - } - - if !w.prevKV { - evCopy := *ev - evCopy.PrevKv = nil - ev = &evCopy - } - events = append(events, ev) - } - - if lastRev >= w.nextrev { - w.nextrev = lastRev + 1 - } - - // all events are filtered out? - if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 { - return - } - - w.lastHeader = wr.Header - w.post(&pb.WatchResponse{ - Header: &wr.Header, - Created: wr.Created, - CompactRevision: wr.CompactRevision, - Canceled: wr.Canceled, - WatchId: w.id, - Events: events, - }) -} - -// post puts a watch response on the watcher's proxy stream channel -func (w *watcher) post(wr *pb.WatchResponse) bool { - select { - case w.wps.watchCh <- wr: - case <-time.After(50 * time.Millisecond): - w.wps.cancel() - w.wps.lg.Error("failed to put a watch response on the watcher's proxy stream channel,err is timeout") - return false - } - return true -} diff --git a/server/proxy/tcpproxy/userspace_test.go b/server/proxy/tcpproxy/userspace_test.go deleted file mode 100644 index 892e87eb882..00000000000 --- a/server/proxy/tcpproxy/userspace_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tcpproxy - -import ( - "fmt" - "io" - "net" - "net/http" - "net/http/httptest" - "net/url" - "testing" -) - -func TestUserspaceProxy(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - - want := "hello proxy" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, want) - })) - defer ts.Close() - - u, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } - - var port uint16 - fmt.Sscanf(u.Port(), "%d", &port) - p := TCPProxy{ - Listener: l, - Endpoints: []*net.SRV{{Target: u.Hostname(), Port: port}}, - } - go p.Run() - defer p.Stop() - - u.Host = l.Addr().String() - - res, err := http.Get(u.String()) - if err != nil { - t.Fatal(err) - } - got, gerr := io.ReadAll(res.Body) - res.Body.Close() - if gerr != nil { - t.Fatal(gerr) - } - - if string(got) != want { - t.Errorf("got = %s, want %s", got, want) - } -} - -func TestUserspaceProxyPriority(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - defer l.Close() - - backends := []struct { - Payload string - Priority uint16 - }{ - {"hello proxy 1", 1}, - {"hello proxy 2", 2}, - {"hello proxy 3", 3}, - } - - var eps []*net.SRV - var front *url.URL - for _, b := range backends { - backend := b - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, backend.Payload) - })) - defer ts.Close() - - front, err = url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } - - var port uint16 - fmt.Sscanf(front.Port(), "%d", &port) - - ep := &net.SRV{Target: front.Hostname(), Port: port, Priority: backend.Priority} - eps = append(eps, ep) - } - - p := TCPProxy{ - Listener: l, - Endpoints: eps, - } - go p.Run() - defer p.Stop() - - front.Host = l.Addr().String() - - res, err := http.Get(front.String()) - if err != nil { - t.Fatal(err) - } - got, gerr := io.ReadAll(res.Body) - res.Body.Close() - if gerr != nil { - t.Fatal(gerr) - } - - want := "hello proxy 1" - if string(got) != want { - t.Errorf("got = %s, want %s", got, want) - } -} - -func TestFormatAddr(t *testing.T) { - addrs := []struct { - host string - port uint16 - expectedAddr string - }{ - { - "192.168.1.10", - 2379, - "192.168.1.10:2379", - }, - { - "::1", - 2379, - "[::1]:2379", - }, - { - "2001:db8::ff00:42:8329", - 80, - "[2001:db8::ff00:42:8329]:80", - }, - } - - for _, addr := range addrs { - actualAddr := formatAddr(addr.host, addr.port) - if actualAddr != addr.expectedAddr { - t.Errorf("actualAddr: %s, expectedAddr: %s", actualAddr, addr.expectedAddr) - } - } -} diff --git a/server/storage/backend.go b/server/storage/backend.go deleted file mode 100644 index a93fd8a3f11..00000000000 --- a/server/storage/backend.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "os" - "time" - - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/raft/v3/raftpb" - - "go.uber.org/zap" -) - -func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { - bcfg := backend.DefaultBackendConfig(cfg.Logger) - bcfg.Path = cfg.BackendPath() - bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync - if cfg.BackendBatchLimit != 0 { - bcfg.BatchLimit = cfg.BackendBatchLimit - if cfg.Logger != nil { - cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit)) - } - } - if cfg.BackendBatchInterval != 0 { - bcfg.BatchInterval = cfg.BackendBatchInterval - if cfg.Logger != nil { - cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval)) - } - } - bcfg.BackendFreelistType = cfg.BackendFreelistType - bcfg.Logger = cfg.Logger - if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { - // permit 10% excess over quota for disarm - bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) - } - bcfg.Mlock = cfg.ExperimentalMemoryMlock - bcfg.Hooks = hooks - return backend.New(bcfg) -} - -// OpenSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func OpenSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks *BackendHooks) (backend.Backend, error) { - snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) - if err != nil { - return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) - } - if err := os.Rename(snapPath, cfg.BackendPath()); err != nil { - return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) - } - return OpenBackend(cfg, hooks), nil -} - -// OpenBackend returns a backend using the current etcd db. -func OpenBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { - fn := cfg.BackendPath() - - now, beOpened := time.Now(), make(chan backend.Backend) - go func() { - beOpened <- newBackend(cfg, hooks) - }() - - select { - case be := <-beOpened: - cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now))) - return be - - case <-time.After(10 * time.Second): - cfg.Logger.Info( - "db file is flocked by another process, or taking too long", - zap.String("path", fn), - zap.Duration("took", time.Since(now)), - ) - } - - return <-beOpened -} - -// RecoverSnapshotBackend recovers the DB from a snapshot in case etcd crashes -// before updating the backend db after persisting raft snapshot to disk, -// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this -// case, replace the db with the snapshot db sent by the leader. -func RecoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks *BackendHooks) (backend.Backend, error) { - consistentIndex := uint64(0) - if beExist { - consistentIndex, _ = schema.ReadConsistentIndex(oldbe.ReadTx()) - } - if snapshot.Metadata.Index <= consistentIndex { - return oldbe, nil - } - oldbe.Close() - return OpenSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) -} diff --git a/server/storage/backend/backend.go b/server/storage/backend/backend.go deleted file mode 100644 index e7b951ee7e6..00000000000 --- a/server/storage/backend/backend.go +++ /dev/null @@ -1,661 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - humanize "github.com/dustin/go-humanize" - "go.uber.org/zap" - - bolt "go.etcd.io/bbolt" -) - -var ( - defaultBatchLimit = 10000 - defaultBatchInterval = 100 * time.Millisecond - - defragLimit = 10000 - - // initialMmapSize is the initial size of the mmapped region. Setting this larger than - // the potential max db size can prevent writer from blocking reader. - // This only works for linux. - initialMmapSize = uint64(10 * 1024 * 1024 * 1024) - - // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. - minSnapshotWarningTimeout = 30 * time.Second -) - -type Backend interface { - // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523. - ReadTx() ReadTx - BatchTx() BatchTx - // ConcurrentReadTx returns a non-blocking read transaction. - ConcurrentReadTx() ReadTx - - Snapshot() Snapshot - Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) - // Size returns the current size of the backend physically allocated. - // The backend can hold DB space that is not utilized at the moment, - // since it can conduct pre-allocation or spare unused space for recycling. - // Use SizeInUse() instead for the actual DB size. - Size() int64 - // SizeInUse returns the current size of the backend logically in use. - // Since the backend can manage free space in a non-byte unit such as - // number of pages, the returned value can be not exactly accurate in bytes. - SizeInUse() int64 - // OpenReadTxN returns the number of currently open read transactions in the backend. - OpenReadTxN() int64 - Defrag() error - ForceCommit() - Close() error - - // SetTxPostLockInsideApplyHook sets a txPostLockInsideApplyHook. - SetTxPostLockInsideApplyHook(func()) -} - -type Snapshot interface { - // Size gets the size of the snapshot. - Size() int64 - // WriteTo writes the snapshot into the given writer. - WriteTo(w io.Writer) (n int64, err error) - // Close closes the snapshot. - Close() error -} - -type txReadBufferCache struct { - mu sync.Mutex - buf *txReadBuffer - bufVersion uint64 -} - -type backend struct { - // size and commits are used with atomic operations so they must be - // 64-bit aligned, otherwise 32-bit tests will crash - - // size is the number of bytes allocated in the backend - size int64 - // sizeInUse is the number of bytes actually used in the backend - sizeInUse int64 - // commits counts number of commits since start - commits int64 - // openReadTxN is the number of currently open read transactions in the backend - openReadTxN int64 - // mlock prevents backend database file to be swapped - mlock bool - - mu sync.RWMutex - bopts *bolt.Options - db *bolt.DB - - batchInterval time.Duration - batchLimit int - batchTx *batchTxBuffered - - readTx *readTx - // txReadBufferCache mirrors "txReadBuffer" within "readTx" -- readTx.baseReadTx.buf. - // When creating "concurrentReadTx": - // - if the cache is up-to-date, "readTx.baseReadTx.buf" copy can be skipped - // - if the cache is empty or outdated, "readTx.baseReadTx.buf" copy is required - txReadBufferCache txReadBufferCache - - stopc chan struct{} - donec chan struct{} - - hooks Hooks - - // txPostLockInsideApplyHook is called each time right after locking the tx. - txPostLockInsideApplyHook func() - - lg *zap.Logger -} - -type BackendConfig struct { - // Path is the file path to the backend file. - Path string - // BatchInterval is the maximum time before flushing the BatchTx. - BatchInterval time.Duration - // BatchLimit is the maximum puts before flushing the BatchTx. - BatchLimit int - // BackendFreelistType is the backend boltdb's freelist type. - BackendFreelistType bolt.FreelistType - // MmapSize is the number of bytes to mmap for the backend. - MmapSize uint64 - // Logger logs backend-side operations. - Logger *zap.Logger - // UnsafeNoFsync disables all uses of fsync. - UnsafeNoFsync bool `json:"unsafe-no-fsync"` - // Mlock prevents backend database file to be swapped - Mlock bool - - // Hooks are getting executed during lifecycle of Backend's transactions. - Hooks Hooks -} - -func DefaultBackendConfig(lg *zap.Logger) BackendConfig { - return BackendConfig{ - BatchInterval: defaultBatchInterval, - BatchLimit: defaultBatchLimit, - MmapSize: initialMmapSize, - Logger: lg, - } -} - -func New(bcfg BackendConfig) Backend { - return newBackend(bcfg) -} - -func NewDefaultBackend(lg *zap.Logger, path string) Backend { - bcfg := DefaultBackendConfig(lg) - bcfg.Path = path - return newBackend(bcfg) -} - -func newBackend(bcfg BackendConfig) *backend { - bopts := &bolt.Options{} - if boltOpenOptions != nil { - *bopts = *boltOpenOptions - } - bopts.InitialMmapSize = bcfg.mmapSize() - bopts.FreelistType = bcfg.BackendFreelistType - bopts.NoSync = bcfg.UnsafeNoFsync - bopts.NoGrowSync = bcfg.UnsafeNoFsync - bopts.Mlock = bcfg.Mlock - - db, err := bolt.Open(bcfg.Path, 0600, bopts) - if err != nil { - bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err)) - } - - // In future, may want to make buffering optional for low-concurrency systems - // or dynamically swap between buffered/non-buffered depending on workload. - b := &backend{ - bopts: bopts, - db: db, - - batchInterval: bcfg.BatchInterval, - batchLimit: bcfg.BatchLimit, - mlock: bcfg.Mlock, - - readTx: &readTx{ - baseReadTx: baseReadTx{ - buf: txReadBuffer{ - txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)}, - bufVersion: 0, - }, - buckets: make(map[BucketID]*bolt.Bucket), - txWg: new(sync.WaitGroup), - txMu: new(sync.RWMutex), - }, - }, - txReadBufferCache: txReadBufferCache{ - mu: sync.Mutex{}, - bufVersion: 0, - buf: nil, - }, - - stopc: make(chan struct{}), - donec: make(chan struct{}), - - lg: bcfg.Logger, - } - - b.batchTx = newBatchTxBuffered(b) - // We set it after newBatchTxBuffered to skip the 'empty' commit. - b.hooks = bcfg.Hooks - - go b.run() - return b -} - -// BatchTx returns the current batch tx in coalescer. The tx can be used for read and -// write operations. The write result can be retrieved within the same tx immediately. -// The write result is isolated with other txs until the current one get committed. -func (b *backend) BatchTx() BatchTx { - return b.batchTx -} - -func (b *backend) SetTxPostLockInsideApplyHook(hook func()) { - // It needs to lock the batchTx, because the periodic commit - // may be accessing the txPostLockInsideApplyHook at the moment. - b.batchTx.lock() - defer b.batchTx.Unlock() - b.txPostLockInsideApplyHook = hook -} - -func (b *backend) ReadTx() ReadTx { return b.readTx } - -// ConcurrentReadTx creates and returns a new ReadTx, which: -// A) creates and keeps a copy of backend.readTx.txReadBuffer, -// B) references the boltdb read Tx (and its bucket cache) of current batch interval. -func (b *backend) ConcurrentReadTx() ReadTx { - b.readTx.RLock() - defer b.readTx.RUnlock() - // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock(). - b.readTx.txWg.Add(1) - - // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval. - - // inspect/update cache recency iff there's no ongoing update to the cache - // this falls through if there's no cache update - - // by this line, "ConcurrentReadTx" code path is already protected against concurrent "writeback" operations - // which requires write lock to update "readTx.baseReadTx.buf". - // Which means setting "buf *txReadBuffer" with "readTx.buf.unsafeCopy()" is guaranteed to be up-to-date, - // whereas "txReadBufferCache.buf" may be stale from concurrent "writeback" operations. - // We only update "txReadBufferCache.buf" if we know "buf *txReadBuffer" is up-to-date. - // The update to "txReadBufferCache.buf" will benefit the following "ConcurrentReadTx" creation - // by avoiding copying "readTx.baseReadTx.buf". - b.txReadBufferCache.mu.Lock() - - curCache := b.txReadBufferCache.buf - curCacheVer := b.txReadBufferCache.bufVersion - curBufVer := b.readTx.buf.bufVersion - - isEmptyCache := curCache == nil - isStaleCache := curCacheVer != curBufVer - - var buf *txReadBuffer - switch { - case isEmptyCache: - // perform safe copy of buffer while holding "b.txReadBufferCache.mu.Lock" - // this is only supposed to run once so there won't be much overhead - curBuf := b.readTx.buf.unsafeCopy() - buf = &curBuf - case isStaleCache: - // to maximize the concurrency, try unsafe copy of buffer - // release the lock while copying buffer -- cache may become stale again and - // get overwritten by someone else. - // therefore, we need to check the readTx buffer version again - b.txReadBufferCache.mu.Unlock() - curBuf := b.readTx.buf.unsafeCopy() - b.txReadBufferCache.mu.Lock() - buf = &curBuf - default: - // neither empty nor stale cache, just use the current buffer - buf = curCache - } - // txReadBufferCache.bufVersion can be modified when we doing an unsafeCopy() - // as a result, curCacheVer could be no longer the same as - // txReadBufferCache.bufVersion - // if !isEmptyCache && curCacheVer != b.txReadBufferCache.bufVersion - // then the cache became stale while copying "readTx.baseReadTx.buf". - // It is safe to not update "txReadBufferCache.buf", because the next following - // "ConcurrentReadTx" creation will trigger a new "readTx.baseReadTx.buf" copy - // and "buf" is still used for the current "concurrentReadTx.baseReadTx.buf". - if isEmptyCache || curCacheVer == b.txReadBufferCache.bufVersion { - // continue if the cache is never set or no one has modified the cache - b.txReadBufferCache.buf = buf - b.txReadBufferCache.bufVersion = curBufVer - } - - b.txReadBufferCache.mu.Unlock() - - // concurrentReadTx is not supposed to write to its txReadBuffer - return &concurrentReadTx{ - baseReadTx: baseReadTx{ - buf: *buf, - txMu: b.readTx.txMu, - tx: b.readTx.tx, - buckets: b.readTx.buckets, - txWg: b.readTx.txWg, - }, - } -} - -// ForceCommit forces the current batching tx to commit. -func (b *backend) ForceCommit() { - b.batchTx.Commit() -} - -func (b *backend) Snapshot() Snapshot { - b.batchTx.Commit() - - b.mu.RLock() - defer b.mu.RUnlock() - tx, err := b.db.Begin(false) - if err != nil { - b.lg.Fatal("failed to begin tx", zap.Error(err)) - } - - stopc, donec := make(chan struct{}), make(chan struct{}) - dbBytes := tx.Size() - go func() { - defer close(donec) - // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection - // assuming a min tcp throughput of 100MB/s. - var sendRateBytes int64 = 100 * 1024 * 1024 - warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) - if warningTimeout < minSnapshotWarningTimeout { - warningTimeout = minSnapshotWarningTimeout - } - start := time.Now() - ticker := time.NewTicker(warningTimeout) - defer ticker.Stop() - for { - select { - case <-ticker.C: - b.lg.Warn( - "snapshotting taking too long to transfer", - zap.Duration("taking", time.Since(start)), - zap.Int64("bytes", dbBytes), - zap.String("size", humanize.Bytes(uint64(dbBytes))), - ) - - case <-stopc: - snapshotTransferSec.Observe(time.Since(start).Seconds()) - return - } - } - }() - - return &snapshot{tx, stopc, donec} -} - -func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) { - h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - - b.mu.RLock() - defer b.mu.RUnlock() - err := b.db.View(func(tx *bolt.Tx) error { - c := tx.Cursor() - for next, _ := c.First(); next != nil; next, _ = c.Next() { - b := tx.Bucket(next) - if b == nil { - return fmt.Errorf("cannot get hash of bucket %s", string(next)) - } - h.Write(next) - b.ForEach(func(k, v []byte) error { - if ignores != nil && !ignores(next, k) { - h.Write(k) - h.Write(v) - } - return nil - }) - } - return nil - }) - - if err != nil { - return 0, err - } - - return h.Sum32(), nil -} - -func (b *backend) Size() int64 { - return atomic.LoadInt64(&b.size) -} - -func (b *backend) SizeInUse() int64 { - return atomic.LoadInt64(&b.sizeInUse) -} - -func (b *backend) run() { - defer close(b.donec) - t := time.NewTimer(b.batchInterval) - defer t.Stop() - for { - select { - case <-t.C: - case <-b.stopc: - b.batchTx.CommitAndStop() - return - } - if b.batchTx.safePending() != 0 { - b.batchTx.Commit() - } - t.Reset(b.batchInterval) - } -} - -func (b *backend) Close() error { - close(b.stopc) - <-b.donec - b.mu.Lock() - defer b.mu.Unlock() - return b.db.Close() -} - -// Commits returns total number of commits since start -func (b *backend) Commits() int64 { - return atomic.LoadInt64(&b.commits) -} - -func (b *backend) Defrag() error { - return b.defrag() -} - -func (b *backend) defrag() error { - now := time.Now() - isDefragActive.Set(1) - defer isDefragActive.Set(0) - - // TODO: make this non-blocking? - // lock batchTx to ensure nobody is using previous tx, and then - // close previous ongoing tx. - b.batchTx.LockOutsideApply() - defer b.batchTx.Unlock() - - // lock database after lock tx to avoid deadlock. - b.mu.Lock() - defer b.mu.Unlock() - - // block concurrent read requests while resetting tx - b.readTx.Lock() - defer b.readTx.Unlock() - - b.batchTx.unsafeCommit(true) - - b.batchTx.tx = nil - - // Create a temporary file to ensure we start with a clean slate. - // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup. - dir := filepath.Dir(b.db.Path()) - temp, err := os.CreateTemp(dir, "db.tmp.*") - if err != nil { - return err - } - options := bolt.Options{} - if boltOpenOptions != nil { - options = *boltOpenOptions - } - options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) { - return temp, nil - } - // Don't load tmp db into memory regardless of opening options - options.Mlock = false - tdbp := temp.Name() - tmpdb, err := bolt.Open(tdbp, 0600, &options) - if err != nil { - return err - } - - dbp := b.db.Path() - size1, sizeInUse1 := b.Size(), b.SizeInUse() - if b.lg != nil { - b.lg.Info( - "defragmenting", - zap.String("path", dbp), - zap.Int64("current-db-size-bytes", size1), - zap.String("current-db-size", humanize.Bytes(uint64(size1))), - zap.Int64("current-db-size-in-use-bytes", sizeInUse1), - zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))), - ) - } - // gofail: var defragBeforeCopy struct{} - err = defragdb(b.db, tmpdb, defragLimit) - if err != nil { - tmpdb.Close() - if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil { - b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr)) - } - return err - } - - err = b.db.Close() - if err != nil { - b.lg.Fatal("failed to close database", zap.Error(err)) - } - err = tmpdb.Close() - if err != nil { - b.lg.Fatal("failed to close tmp database", zap.Error(err)) - } - // gofail: var defragBeforeRename struct{} - err = os.Rename(tdbp, dbp) - if err != nil { - b.lg.Fatal("failed to rename tmp database", zap.Error(err)) - } - - b.db, err = bolt.Open(dbp, 0600, b.bopts) - if err != nil { - b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err)) - } - b.batchTx.tx = b.unsafeBegin(true) - - b.readTx.reset() - b.readTx.tx = b.unsafeBegin(false) - - size := b.readTx.tx.Size() - db := b.readTx.tx.DB() - atomic.StoreInt64(&b.size, size) - atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize))) - - took := time.Since(now) - defragSec.Observe(took.Seconds()) - - size2, sizeInUse2 := b.Size(), b.SizeInUse() - if b.lg != nil { - b.lg.Info( - "finished defragmenting directory", - zap.String("path", dbp), - zap.Int64("current-db-size-bytes-diff", size2-size1), - zap.Int64("current-db-size-bytes", size2), - zap.String("current-db-size", humanize.Bytes(uint64(size2))), - zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1), - zap.Int64("current-db-size-in-use-bytes", sizeInUse2), - zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))), - zap.Duration("took", took), - ) - } - return nil -} - -func defragdb(odb, tmpdb *bolt.DB, limit int) error { - // open a tx on tmpdb for writes - tmptx, err := tmpdb.Begin(true) - if err != nil { - return err - } - defer func() { - if err != nil { - tmptx.Rollback() - } - }() - - // open a tx on old db for read - tx, err := odb.Begin(false) - if err != nil { - return err - } - defer tx.Rollback() - - c := tx.Cursor() - - count := 0 - for next, _ := c.First(); next != nil; next, _ = c.Next() { - b := tx.Bucket(next) - if b == nil { - return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) - } - - tmpb, berr := tmptx.CreateBucketIfNotExists(next) - if berr != nil { - return berr - } - tmpb.FillPercent = 0.9 // for bucket2seq write in for each - - if err = b.ForEach(func(k, v []byte) error { - count++ - if count > limit { - err = tmptx.Commit() - if err != nil { - return err - } - tmptx, err = tmpdb.Begin(true) - if err != nil { - return err - } - tmpb = tmptx.Bucket(next) - tmpb.FillPercent = 0.9 // for bucket2seq write in for each - - count = 0 - } - return tmpb.Put(k, v) - }); err != nil { - return err - } - } - - return tmptx.Commit() -} - -func (b *backend) begin(write bool) *bolt.Tx { - b.mu.RLock() - tx := b.unsafeBegin(write) - b.mu.RUnlock() - - size := tx.Size() - db := tx.DB() - stats := db.Stats() - atomic.StoreInt64(&b.size, size) - atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize))) - atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN)) - - return tx -} - -func (b *backend) unsafeBegin(write bool) *bolt.Tx { - // gofail: var beforeStartDBTxn struct{} - tx, err := b.db.Begin(write) - // gofail: var afterStartDBTxn struct{} - if err != nil { - b.lg.Fatal("failed to begin tx", zap.Error(err)) - } - return tx -} - -func (b *backend) OpenReadTxN() int64 { - return atomic.LoadInt64(&b.openReadTxN) -} - -type snapshot struct { - *bolt.Tx - stopc chan struct{} - donec chan struct{} -} - -func (s *snapshot) Close() error { - close(s.stopc) - <-s.donec - return s.Tx.Rollback() -} diff --git a/server/storage/backend/backend_bench_test.go b/server/storage/backend/backend_bench_test.go deleted file mode 100644 index 204d7de6157..00000000000 --- a/server/storage/backend/backend_bench_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend_test - -import ( - "crypto/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func BenchmarkBackendPut(b *testing.B) { - backend, _ := betesting.NewTmpBackend(b, 100*time.Millisecond, 10000) - defer betesting.Close(b, backend) - - // prepare keys - keys := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - keys[i] = make([]byte, 64) - _, err := rand.Read(keys[i]) - assert.NoError(b, err) - } - value := make([]byte, 128) - _, err := rand.Read(value) - assert.NoError(b, err) - - batchTx := backend.BatchTx() - - batchTx.Lock() - batchTx.UnsafeCreateBucket(schema.Test) - batchTx.Unlock() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - batchTx.Lock() - batchTx.UnsafePut(schema.Test, keys[i], value) - batchTx.Unlock() - } -} diff --git a/server/storage/backend/backend_test.go b/server/storage/backend/backend_test.go deleted file mode 100644 index 0b0b6b8ec9b..00000000000 --- a/server/storage/backend/backend_test.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend_test - -import ( - "fmt" - "os" - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" - bolt "go.etcd.io/bbolt" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.uber.org/zap/zaptest" -) - -func TestBackendClose(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - - // check close could work - done := make(chan struct{}, 1) - go func() { - err := b.Close() - if err != nil { - t.Errorf("close error = %v, want nil", err) - } - done <- struct{}{} - }() - select { - case <-done: - case <-time.After(10 * time.Second): - t.Errorf("failed to close database in 10s") - } -} - -func TestBackendSnapshot(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar")) - tx.Unlock() - b.ForceCommit() - - // write snapshot to a new file - f, err := os.CreateTemp(t.TempDir(), "etcd_backend_test") - if err != nil { - t.Fatal(err) - } - snap := b.Snapshot() - defer func() { assert.NoError(t, snap.Close()) }() - if _, err := snap.WriteTo(f); err != nil { - t.Fatal(err) - } - assert.NoError(t, f.Close()) - - // bootstrap new backend from the snapshot - bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t)) - bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = f.Name(), time.Hour, 10000 - nb := backend.New(bcfg) - defer betesting.Close(t, nb) - - newTx := nb.BatchTx() - newTx.Lock() - ks, _ := newTx.UnsafeRange(schema.Test, []byte("foo"), []byte("goo"), 0) - if len(ks) != 1 { - t.Errorf("len(kvs) = %d, want 1", len(ks)) - } - newTx.Unlock() -} - -func TestBackendBatchIntervalCommit(t *testing.T) { - // start backend with super short batch interval so - // we do not need to wait long before commit to happen. - b, _ := betesting.NewTmpBackend(t, time.Nanosecond, 10000) - defer betesting.Close(t, b) - - pc := backend.CommitsForTest(b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar")) - tx.Unlock() - - for i := 0; i < 10; i++ { - if backend.CommitsForTest(b) >= pc+1 { - break - } - time.Sleep(time.Duration(i*100) * time.Millisecond) - } - - // check whether put happens via db view - assert.NoError(t, backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("test")) - if bucket == nil { - t.Errorf("bucket test does not exit") - return nil - } - v := bucket.Get([]byte("foo")) - if v == nil { - t.Errorf("foo key failed to written in backend") - } - return nil - })) -} - -func TestBackendDefrag(t *testing.T) { - bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t)) - // Make sure we change BackendFreelistType - // The goal is to verify that we restore config option after defrag. - if bcfg.BackendFreelistType == bolt.FreelistMapType { - bcfg.BackendFreelistType = bolt.FreelistArrayType - } else { - bcfg.BackendFreelistType = bolt.FreelistMapType - } - - b, _ := betesting.NewTmpBackendFromCfg(t, bcfg) - - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - for i := 0; i < backend.DefragLimitForTest()+100; i++ { - tx.UnsafePut(schema.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar")) - } - tx.Unlock() - b.ForceCommit() - - // remove some keys to ensure the disk space will be reclaimed after defrag - tx = b.BatchTx() - tx.Lock() - for i := 0; i < 50; i++ { - tx.UnsafeDelete(schema.Test, []byte(fmt.Sprintf("foo_%d", i))) - } - tx.Unlock() - b.ForceCommit() - - size := b.Size() - - // shrink and check hash - oh, err := b.Hash(nil) - if err != nil { - t.Fatal(err) - } - - err = b.Defrag() - if err != nil { - t.Fatal(err) - } - - nh, err := b.Hash(nil) - if err != nil { - t.Fatal(err) - } - if oh != nh { - t.Errorf("hash = %v, want %v", nh, oh) - } - - nsize := b.Size() - if nsize >= size { - t.Errorf("new size = %v, want < %d", nsize, size) - } - db := backend.DbFromBackendForTest(b) - if db.FreelistType != bcfg.BackendFreelistType { - t.Errorf("db FreelistType = [%v], want [%v]", db.FreelistType, bcfg.BackendFreelistType) - } - - // try put more keys after shrink. - tx = b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("more"), []byte("bar")) - tx.Unlock() - b.ForceCommit() -} - -// TestBackendWriteback ensures writes are stored to the read txn on write txn unlock. -func TestBackendWriteback(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Key) - tx.UnsafePut(schema.Key, []byte("abc"), []byte("bar")) - tx.UnsafePut(schema.Key, []byte("def"), []byte("baz")) - tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("1")) - tx.Unlock() - - // overwrites should be propagated too - tx.Lock() - tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("2")) - tx.Unlock() - - keys := []struct { - key []byte - end []byte - limit int64 - - wkey [][]byte - wval [][]byte - }{ - { - key: []byte("abc"), - end: nil, - - wkey: [][]byte{[]byte("abc")}, - wval: [][]byte{[]byte("bar")}, - }, - { - key: []byte("abc"), - end: []byte("def"), - - wkey: [][]byte{[]byte("abc")}, - wval: [][]byte{[]byte("bar")}, - }, - { - key: []byte("abc"), - end: []byte("deg"), - - wkey: [][]byte{[]byte("abc"), []byte("def")}, - wval: [][]byte{[]byte("bar"), []byte("baz")}, - }, - { - key: []byte("abc"), - end: []byte("\xff"), - limit: 1, - - wkey: [][]byte{[]byte("abc")}, - wval: [][]byte{[]byte("bar")}, - }, - { - key: []byte("abc"), - end: []byte("\xff"), - - wkey: [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}, - wval: [][]byte{[]byte("bar"), []byte("baz"), []byte("2")}, - }, - } - rtx := b.ReadTx() - for i, tt := range keys { - func() { - rtx.RLock() - defer rtx.RUnlock() - k, v := rtx.UnsafeRange(schema.Key, tt.key, tt.end, tt.limit) - if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) { - t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v) - } - }() - } -} - -// TestConcurrentReadTx ensures that current read transaction can see all prior writes stored in read buffer -func TestConcurrentReadTx(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - wtx1 := b.BatchTx() - wtx1.Lock() - wtx1.UnsafeCreateBucket(schema.Key) - wtx1.UnsafePut(schema.Key, []byte("abc"), []byte("ABC")) - wtx1.UnsafePut(schema.Key, []byte("overwrite"), []byte("1")) - wtx1.Unlock() - - wtx2 := b.BatchTx() - wtx2.Lock() - wtx2.UnsafePut(schema.Key, []byte("def"), []byte("DEF")) - wtx2.UnsafePut(schema.Key, []byte("overwrite"), []byte("2")) - wtx2.Unlock() - - rtx := b.ConcurrentReadTx() - rtx.RLock() // no-op - k, v := rtx.UnsafeRange(schema.Key, []byte("abc"), []byte("\xff"), 0) - rtx.RUnlock() - wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")} - wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")} - if !reflect.DeepEqual(wKey, k) || !reflect.DeepEqual(wVal, v) { - t.Errorf("want k=%+v, v=%+v; got k=%+v, v=%+v", wKey, wVal, k, v) - } -} - -// TestBackendWritebackForEach checks that partially written / buffered -// data is visited in the same order as fully committed data. -func TestBackendWritebackForEach(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Key) - for i := 0; i < 5; i++ { - k := []byte(fmt.Sprintf("%04d", i)) - tx.UnsafePut(schema.Key, k, []byte("bar")) - } - tx.Unlock() - - // writeback - b.ForceCommit() - - tx.Lock() - tx.UnsafeCreateBucket(schema.Key) - for i := 5; i < 20; i++ { - k := []byte(fmt.Sprintf("%04d", i)) - tx.UnsafePut(schema.Key, k, []byte("bar")) - } - tx.Unlock() - - seq := "" - getSeq := func(k, v []byte) error { - seq += string(k) - return nil - } - rtx := b.ReadTx() - rtx.RLock() - assert.NoError(t, rtx.UnsafeForEach(schema.Key, getSeq)) - rtx.RUnlock() - - partialSeq := seq - - seq = "" - b.ForceCommit() - - tx.Lock() - assert.NoError(t, tx.UnsafeForEach(schema.Key, getSeq)) - tx.Unlock() - - if seq != partialSeq { - t.Fatalf("expected %q, got %q", seq, partialSeq) - } -} diff --git a/server/storage/backend/batch_tx.go b/server/storage/backend/batch_tx.go deleted file mode 100644 index 0d12a0868dd..00000000000 --- a/server/storage/backend/batch_tx.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "bytes" - "math" - "sync" - "sync/atomic" - "time" - - "go.uber.org/zap" - - bolt "go.etcd.io/bbolt" -) - -type BucketID int - -type Bucket interface { - // ID returns a unique identifier of a bucket. - // The id must NOT be persisted and can be used as lightweight identificator - // in the in-memory maps. - ID() BucketID - Name() []byte - // String implements Stringer (human readable name). - String() string - - // IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys; - // overwrites on a bucket should only fetch with limit=1, but safeRangeBucket - // is known to never overwrite any key so range is safe. - IsSafeRangeBucket() bool -} - -type BatchTx interface { - ReadTx - UnsafeCreateBucket(bucket Bucket) - UnsafeDeleteBucket(bucket Bucket) - UnsafePut(bucket Bucket, key []byte, value []byte) - UnsafeSeqPut(bucket Bucket, key []byte, value []byte) - UnsafeDelete(bucket Bucket, key []byte) - // Commit commits a previous tx and begins a new writable one. - Commit() - // CommitAndStop commits the previous tx and does not create a new one. - CommitAndStop() - LockInsideApply() - LockOutsideApply() -} - -type batchTx struct { - sync.Mutex - tx *bolt.Tx - backend *backend - - pending int -} - -// Lock is supposed to be called only by the unit test. -func (t *batchTx) Lock() { - ValidateCalledInsideUnittest(t.backend.lg) - t.lock() -} - -func (t *batchTx) lock() { - t.Mutex.Lock() -} - -func (t *batchTx) LockInsideApply() { - t.lock() - if t.backend.txPostLockInsideApplyHook != nil { - // The callers of some methods (i.e., (*RaftCluster).AddMember) - // can be coming from both InsideApply and OutsideApply, but the - // callers from OutsideApply will have a nil txPostLockInsideApplyHook. - // So we should check the txPostLockInsideApplyHook before validating - // the callstack. - ValidateCalledInsideApply(t.backend.lg) - t.backend.txPostLockInsideApplyHook() - } -} - -func (t *batchTx) LockOutsideApply() { - ValidateCalledOutSideApply(t.backend.lg) - t.lock() -} - -func (t *batchTx) Unlock() { - if t.pending >= t.backend.batchLimit { - t.commit(false) - } - t.Mutex.Unlock() -} - -// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not -// have appropriate semantics in BatchTx interface. Therefore should not be called. -// TODO: might want to decouple ReadTx and BatchTx - -func (t *batchTx) RLock() { - panic("unexpected RLock") -} - -func (t *batchTx) RUnlock() { - panic("unexpected RUnlock") -} - -func (t *batchTx) UnsafeCreateBucket(bucket Bucket) { - _, err := t.tx.CreateBucket(bucket.Name()) - if err != nil && err != bolt.ErrBucketExists { - t.backend.lg.Fatal( - "failed to create a bucket", - zap.Stringer("bucket-name", bucket), - zap.Error(err), - ) - } - t.pending++ -} - -func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) { - err := t.tx.DeleteBucket(bucket.Name()) - if err != nil && err != bolt.ErrBucketNotFound { - t.backend.lg.Fatal( - "failed to delete a bucket", - zap.Stringer("bucket-name", bucket), - zap.Error(err), - ) - } - t.pending++ -} - -// UnsafePut must be called holding the lock on the tx. -func (t *batchTx) UnsafePut(bucket Bucket, key []byte, value []byte) { - t.unsafePut(bucket, key, value, false) -} - -// UnsafeSeqPut must be called holding the lock on the tx. -func (t *batchTx) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) { - t.unsafePut(bucket, key, value, true) -} - -func (t *batchTx) unsafePut(bucketType Bucket, key []byte, value []byte, seq bool) { - bucket := t.tx.Bucket(bucketType.Name()) - if bucket == nil { - t.backend.lg.Fatal( - "failed to find a bucket", - zap.Stringer("bucket-name", bucketType), - zap.Stack("stack"), - ) - } - if seq { - // it is useful to increase fill percent when the workloads are mostly append-only. - // this can delay the page split and reduce space usage. - bucket.FillPercent = 0.9 - } - if err := bucket.Put(key, value); err != nil { - t.backend.lg.Fatal( - "failed to write to a bucket", - zap.Stringer("bucket-name", bucketType), - zap.Error(err), - ) - } - t.pending++ -} - -// UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - bucket := t.tx.Bucket(bucketType.Name()) - if bucket == nil { - t.backend.lg.Fatal( - "failed to find a bucket", - zap.Stringer("bucket-name", bucketType), - zap.Stack("stack"), - ) - } - return unsafeRange(bucket.Cursor(), key, endKey, limit) -} - -func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { - if limit <= 0 { - limit = math.MaxInt64 - } - var isMatch func(b []byte) bool - if len(endKey) > 0 { - isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 } - } else { - isMatch = func(b []byte) bool { return bytes.Equal(b, key) } - limit = 1 - } - - for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() { - vs = append(vs, cv) - keys = append(keys, ck) - if limit == int64(len(keys)) { - break - } - } - return keys, vs -} - -// UnsafeDelete must be called holding the lock on the tx. -func (t *batchTx) UnsafeDelete(bucketType Bucket, key []byte) { - bucket := t.tx.Bucket(bucketType.Name()) - if bucket == nil { - t.backend.lg.Fatal( - "failed to find a bucket", - zap.Stringer("bucket-name", bucketType), - zap.Stack("stack"), - ) - } - err := bucket.Delete(key) - if err != nil { - t.backend.lg.Fatal( - "failed to delete a key", - zap.Stringer("bucket-name", bucketType), - zap.Error(err), - ) - } - t.pending++ -} - -// UnsafeForEach must be called holding the lock on the tx. -func (t *batchTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error { - return unsafeForEach(t.tx, bucket, visitor) -} - -func unsafeForEach(tx *bolt.Tx, bucket Bucket, visitor func(k, v []byte) error) error { - if b := tx.Bucket(bucket.Name()); b != nil { - return b.ForEach(visitor) - } - return nil -} - -// Commit commits a previous tx and begins a new writable one. -func (t *batchTx) Commit() { - t.lock() - t.commit(false) - t.Unlock() -} - -// CommitAndStop commits the previous tx and does not create a new one. -func (t *batchTx) CommitAndStop() { - t.lock() - t.commit(true) - t.Unlock() -} - -func (t *batchTx) safePending() int { - t.Mutex.Lock() - defer t.Mutex.Unlock() - return t.pending -} - -func (t *batchTx) commit(stop bool) { - // commit the last tx - if t.tx != nil { - if t.pending == 0 && !stop { - return - } - - start := time.Now() - - // gofail: var beforeCommit struct{} - err := t.tx.Commit() - // gofail: var afterCommit struct{} - - rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds()) - spillSec.Observe(t.tx.Stats().SpillTime.Seconds()) - writeSec.Observe(t.tx.Stats().WriteTime.Seconds()) - commitSec.Observe(time.Since(start).Seconds()) - atomic.AddInt64(&t.backend.commits, 1) - - t.pending = 0 - if err != nil { - t.backend.lg.Fatal("failed to commit tx", zap.Error(err)) - } - } - if !stop { - t.tx = t.backend.begin(true) - } -} - -type batchTxBuffered struct { - batchTx - buf txWriteBuffer -} - -func newBatchTxBuffered(backend *backend) *batchTxBuffered { - tx := &batchTxBuffered{ - batchTx: batchTx{backend: backend}, - buf: txWriteBuffer{ - txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)}, - bucket2seq: make(map[BucketID]bool), - }, - } - tx.Commit() - return tx -} - -func (t *batchTxBuffered) Unlock() { - if t.pending != 0 { - t.backend.readTx.Lock() // blocks txReadBuffer for writing. - // gofail: var beforeWritebackBuf struct{} - t.buf.writeback(&t.backend.readTx.buf) - // gofail: var afterWritebackBuf struct{} - t.backend.readTx.Unlock() - if t.pending >= t.backend.batchLimit { - t.commit(false) - } - } - t.batchTx.Unlock() -} - -func (t *batchTxBuffered) Commit() { - t.lock() - t.commit(false) - t.Unlock() -} - -func (t *batchTxBuffered) CommitAndStop() { - t.lock() - t.commit(true) - t.Unlock() -} - -func (t *batchTxBuffered) commit(stop bool) { - // all read txs must be closed to acquire boltdb commit rwlock - t.backend.readTx.Lock() - t.unsafeCommit(stop) - t.backend.readTx.Unlock() -} - -func (t *batchTxBuffered) unsafeCommit(stop bool) { - if t.backend.hooks != nil { - // gofail: var commitBeforePreCommitHook struct{} - t.backend.hooks.OnPreCommitUnsafe(t) - // gofail: var commitAfterPreCommitHook struct{} - } - - if t.backend.readTx.tx != nil { - // wait all store read transactions using the current boltdb tx to finish, - // then close the boltdb tx - go func(tx *bolt.Tx, wg *sync.WaitGroup) { - wg.Wait() - if err := tx.Rollback(); err != nil { - t.backend.lg.Fatal("failed to rollback tx", zap.Error(err)) - } - }(t.backend.readTx.tx, t.backend.readTx.txWg) - t.backend.readTx.reset() - } - - t.batchTx.commit(stop) - - if !stop { - t.backend.readTx.tx = t.backend.begin(false) - } -} - -func (t *batchTxBuffered) UnsafePut(bucket Bucket, key []byte, value []byte) { - t.batchTx.UnsafePut(bucket, key, value) - t.buf.put(bucket, key, value) -} - -func (t *batchTxBuffered) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) { - t.batchTx.UnsafeSeqPut(bucket, key, value) - t.buf.putSeq(bucket, key, value) -} diff --git a/server/storage/backend/batch_tx_test.go b/server/storage/backend/batch_tx_test.go deleted file mode 100644 index 6fd2bbae631..00000000000 --- a/server/storage/backend/batch_tx_test.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend_test - -import ( - "reflect" - "testing" - "time" - - bolt "go.etcd.io/bbolt" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func TestBatchTxPut(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - - tx.Lock() - - // create bucket - tx.UnsafeCreateBucket(schema.Test) - - // put - v := []byte("bar") - tx.UnsafePut(schema.Test, []byte("foo"), v) - - tx.Unlock() - - // check put result before and after tx is committed - for k := 0; k < 2; k++ { - tx.Lock() - _, gv := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0) - tx.Unlock() - if !reflect.DeepEqual(gv[0], v) { - t.Errorf("v = %s, want %s", string(gv[0]), string(v)) - } - tx.Commit() - } -} - -func TestBatchTxRange(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - defer tx.Unlock() - - tx.UnsafeCreateBucket(schema.Test) - // put keys - allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")} - allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")} - for i := range allKeys { - tx.UnsafePut(schema.Test, allKeys[i], allVals[i]) - } - - tests := []struct { - key []byte - endKey []byte - limit int64 - - wkeys [][]byte - wvals [][]byte - }{ - // single key - { - []byte("foo"), nil, 0, - allKeys[:1], allVals[:1], - }, - // single key, bad - { - []byte("doo"), nil, 0, - nil, nil, - }, - // key range - { - []byte("foo"), []byte("foo1"), 0, - allKeys[:1], allVals[:1], - }, - // key range, get all keys - { - []byte("foo"), []byte("foo3"), 0, - allKeys, allVals, - }, - // key range, bad - { - []byte("goo"), []byte("goo3"), 0, - nil, nil, - }, - // key range with effective limit - { - []byte("foo"), []byte("foo3"), 1, - allKeys[:1], allVals[:1], - }, - // key range with limit - { - []byte("foo"), []byte("foo3"), 4, - allKeys, allVals, - }, - } - for i, tt := range tests { - keys, vals := tx.UnsafeRange(schema.Test, tt.key, tt.endKey, tt.limit) - if !reflect.DeepEqual(keys, tt.wkeys) { - t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys) - } - if !reflect.DeepEqual(vals, tt.wvals) { - t.Errorf("#%d: vals = %+v, want %+v", i, vals, tt.wvals) - } - } -} - -func TestBatchTxDelete(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar")) - - tx.UnsafeDelete(schema.Test, []byte("foo")) - - tx.Unlock() - - // check put result before and after tx is committed - for k := 0; k < 2; k++ { - tx.Lock() - ks, _ := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0) - tx.Unlock() - if len(ks) != 0 { - t.Errorf("keys on foo = %v, want nil", ks) - } - tx.Commit() - } -} - -func TestBatchTxCommit(t *testing.T) { - b, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar")) - tx.Unlock() - - tx.Commit() - - // check whether put happens via db view - backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(schema.Test.Name()) - if bucket == nil { - t.Errorf("bucket test does not exit") - return nil - } - v := bucket.Get([]byte("foo")) - if v == nil { - t.Errorf("foo key failed to written in backend") - } - return nil - }) -} - -func TestBatchTxBatchLimitCommit(t *testing.T) { - // start backend with batch limit 1 so one write can - // trigger a commit - b, _ := betesting.NewTmpBackend(t, time.Hour, 1) - defer betesting.Close(t, b) - - tx := b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(schema.Test) - tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar")) - tx.Unlock() - - // batch limit commit should have been triggered - // check whether put happens via db view - backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(schema.Test.Name()) - if bucket == nil { - t.Errorf("bucket test does not exit") - return nil - } - v := bucket.Get([]byte("foo")) - if v == nil { - t.Errorf("foo key failed to written in backend") - } - return nil - }) -} diff --git a/server/storage/backend/export_test.go b/server/storage/backend/export_test.go deleted file mode 100644 index e9f5ad38d6a..00000000000 --- a/server/storage/backend/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import bolt "go.etcd.io/bbolt" - -func DbFromBackendForTest(b Backend) *bolt.DB { - return b.(*backend).db -} - -func DefragLimitForTest() int { - return defragLimit -} - -func CommitsForTest(b Backend) int64 { - return b.(*backend).Commits() -} diff --git a/server/storage/backend/hooks.go b/server/storage/backend/hooks.go deleted file mode 100644 index 9750828ef7b..00000000000 --- a/server/storage/backend/hooks.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -type HookFunc func(tx BatchTx) - -// Hooks allow to add additional logic executed during transaction lifetime. -type Hooks interface { - // OnPreCommitUnsafe is executed before Commit of transactions. - // The given transaction is already locked. - OnPreCommitUnsafe(tx BatchTx) -} - -type hooks struct { - onPreCommitUnsafe HookFunc -} - -func (h hooks) OnPreCommitUnsafe(tx BatchTx) { - h.onPreCommitUnsafe(tx) -} - -func NewHooks(onPreCommitUnsafe HookFunc) Hooks { - return hooks{onPreCommitUnsafe: onPreCommitUnsafe} -} diff --git a/server/storage/backend/hooks_test.go b/server/storage/backend/hooks_test.go deleted file mode 100644 index b77efbba492..00000000000 --- a/server/storage/backend/hooks_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend_test - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.uber.org/zap/zaptest" -) - -var ( - bucket = schema.Test - key = []byte("key") -) - -func TestBackendPreCommitHook(t *testing.T) { - be := newTestHooksBackend(t, backend.DefaultBackendConfig(zaptest.NewLogger(t))) - - tx := be.BatchTx() - prepareBuckenAndKey(tx) - tx.Commit() - - // Empty commit. - tx.Commit() - - assert.Equal(t, ">cc", getCommitsKey(t, be), "expected 2 explict commits") - tx.Commit() - assert.Equal(t, ">ccc", getCommitsKey(t, be), "expected 3 explict commits") -} - -func TestBackendAutoCommitLimitHook(t *testing.T) { - cfg := backend.DefaultBackendConfig(zaptest.NewLogger(t)) - cfg.BatchLimit = 3 - be := newTestHooksBackend(t, cfg) - - tx := be.BatchTx() - prepareBuckenAndKey(tx) // writes 2 entries. - - for i := 3; i <= 9; i++ { - write(tx, []byte("i"), []byte{byte(i)}) - } - - assert.Equal(t, ">ccc", getCommitsKey(t, be)) -} - -func write(tx backend.BatchTx, k, v []byte) { - tx.Lock() - defer tx.Unlock() - tx.UnsafePut(bucket, k, v) -} - -func TestBackendAutoCommitBatchIntervalHook(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - cfg := backend.DefaultBackendConfig(zaptest.NewLogger(t)) - cfg.BatchInterval = 10 * time.Millisecond - be := newTestHooksBackend(t, cfg) - tx := be.BatchTx() - prepareBuckenAndKey(tx) - - // Edits trigger an auto-commit - waitUntil(ctx, t, func() bool { return getCommitsKey(t, be) == ">c" }) - - time.Sleep(time.Second) - // No additional auto-commits, as there were no more edits - assert.Equal(t, ">c", getCommitsKey(t, be)) - - write(tx, []byte("foo"), []byte("bar1")) - - waitUntil(ctx, t, func() bool { return getCommitsKey(t, be) == ">cc" }) - - write(tx, []byte("foo"), []byte("bar1")) - - waitUntil(ctx, t, func() bool { return getCommitsKey(t, be) == ">ccc" }) -} - -func waitUntil(ctx context.Context, t testing.TB, f func() bool) { - for !f() { - select { - case <-ctx.Done(): - t.Fatalf("Context cancelled/timedout without condition met: %v", ctx.Err()) - default: - } - time.Sleep(10 * time.Millisecond) - } -} - -func prepareBuckenAndKey(tx backend.BatchTx) { - tx.Lock() - defer tx.Unlock() - tx.UnsafeCreateBucket(bucket) - tx.UnsafePut(bucket, key, []byte(">")) -} - -func newTestHooksBackend(t testing.TB, baseConfig backend.BackendConfig) backend.Backend { - cfg := baseConfig - cfg.Hooks = backend.NewHooks(func(tx backend.BatchTx) { - k, v := tx.UnsafeRange(bucket, key, nil, 1) - t.Logf("OnPreCommit executed: %v %v", string(k[0]), string(v[0])) - assert.Len(t, k, 1) - assert.Len(t, v, 1) - tx.UnsafePut(bucket, key, append(v[0], byte('c'))) - }) - - be, _ := betesting.NewTmpBackendFromCfg(t, cfg) - t.Cleanup(func() { - betesting.Close(t, be) - }) - return be -} - -func getCommitsKey(t testing.TB, be backend.Backend) string { - rtx := be.BatchTx() - rtx.Lock() - defer rtx.Unlock() - _, v := rtx.UnsafeRange(bucket, key, nil, 1) - assert.Len(t, v, 1) - return string(v[0]) -} diff --git a/server/storage/backend/metrics.go b/server/storage/backend/metrics.go deleted file mode 100644 index 9d58c00638b..00000000000 --- a/server/storage/backend/metrics.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import "github.com/prometheus/client_golang/prometheus" - -var ( - commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "backend_commit_duration_seconds", - Help: "The latency distributions of commit called by backend.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "disk", - Name: "backend_commit_rebalance_duration_seconds", - Help: "The latency distributions of commit.rebalance called by bboltdb backend.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "disk", - Name: "backend_commit_spill_duration_seconds", - Help: "The latency distributions of commit.spill called by bboltdb backend.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "disk", - Name: "backend_commit_write_duration_seconds", - Help: "The latency distributions of commit.write called by bboltdb backend.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "backend_defrag_duration_seconds", - Help: "The latency distribution of backend defragmentation.", - - // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms - // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2 - // highest bucket start of 0.1 sec * 2^12 == 409.6 sec - Buckets: prometheus.ExponentialBuckets(.1, 2, 13), - }) - - snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "backend_snapshot_duration_seconds", - Help: "The latency distribution of backend snapshots.", - - // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2 - // highest bucket start of 0.01 sec * 2^16 == 655.36 sec - Buckets: prometheus.ExponentialBuckets(.01, 2, 17), - }) - - isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "defrag_inflight", - Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.", - }) -) - -func init() { - prometheus.MustRegister(commitSec) - prometheus.MustRegister(rebalanceSec) - prometheus.MustRegister(spillSec) - prometheus.MustRegister(writeSec) - prometheus.MustRegister(defragSec) - prometheus.MustRegister(snapshotTransferSec) - prometheus.MustRegister(isDefragActive) -} diff --git a/server/storage/backend/read_tx.go b/server/storage/backend/read_tx.go deleted file mode 100644 index 56327d52ae6..00000000000 --- a/server/storage/backend/read_tx.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "math" - "sync" - - bolt "go.etcd.io/bbolt" -) - -// IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys; -// overwrites on a bucket should only fetch with limit=1, but IsSafeRangeBucket -// is known to never overwrite any key so range is safe. - -type ReadTx interface { - Lock() - Unlock() - RLock() - RUnlock() - - UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) - UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error -} - -// Base type for readTx and concurrentReadTx to eliminate duplicate functions between these -type baseReadTx struct { - // mu protects accesses to the txReadBuffer - mu sync.RWMutex - buf txReadBuffer - - // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle. - // txMu protects accesses to buckets and tx on Range requests. - txMu *sync.RWMutex - tx *bolt.Tx - buckets map[BucketID]*bolt.Bucket - // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done. - txWg *sync.WaitGroup -} - -func (baseReadTx *baseReadTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error { - dups := make(map[string]struct{}) - getDups := func(k, v []byte) error { - dups[string(k)] = struct{}{} - return nil - } - visitNoDup := func(k, v []byte) error { - if _, ok := dups[string(k)]; ok { - return nil - } - return visitor(k, v) - } - if err := baseReadTx.buf.ForEach(bucket, getDups); err != nil { - return err - } - baseReadTx.txMu.Lock() - err := unsafeForEach(baseReadTx.tx, bucket, visitNoDup) - baseReadTx.txMu.Unlock() - if err != nil { - return err - } - return baseReadTx.buf.ForEach(bucket, visitor) -} - -func (baseReadTx *baseReadTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - if endKey == nil { - // forbid duplicates for single keys - limit = 1 - } - if limit <= 0 { - limit = math.MaxInt64 - } - if limit > 1 && !bucketType.IsSafeRangeBucket() { - panic("do not use unsafeRange on non-keys bucket") - } - keys, vals := baseReadTx.buf.Range(bucketType, key, endKey, limit) - if int64(len(keys)) == limit { - return keys, vals - } - - // find/cache bucket - bn := bucketType.ID() - baseReadTx.txMu.RLock() - bucket, ok := baseReadTx.buckets[bn] - baseReadTx.txMu.RUnlock() - lockHeld := false - if !ok { - baseReadTx.txMu.Lock() - lockHeld = true - bucket = baseReadTx.tx.Bucket(bucketType.Name()) - baseReadTx.buckets[bn] = bucket - } - - // ignore missing bucket since may have been created in this batch - if bucket == nil { - if lockHeld { - baseReadTx.txMu.Unlock() - } - return keys, vals - } - if !lockHeld { - baseReadTx.txMu.Lock() - } - c := bucket.Cursor() - baseReadTx.txMu.Unlock() - - k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) - return append(k2, keys...), append(v2, vals...) -} - -type readTx struct { - baseReadTx -} - -func (rt *readTx) Lock() { rt.mu.Lock() } -func (rt *readTx) Unlock() { rt.mu.Unlock() } -func (rt *readTx) RLock() { rt.mu.RLock() } -func (rt *readTx) RUnlock() { rt.mu.RUnlock() } - -func (rt *readTx) reset() { - rt.buf.reset() - rt.buckets = make(map[BucketID]*bolt.Bucket) - rt.tx = nil - rt.txWg = new(sync.WaitGroup) -} - -type concurrentReadTx struct { - baseReadTx -} - -func (rt *concurrentReadTx) Lock() {} -func (rt *concurrentReadTx) Unlock() {} - -// RLock is no-op. concurrentReadTx does not need to be locked after it is created. -func (rt *concurrentReadTx) RLock() {} - -// RUnlock signals the end of concurrentReadTx. -func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() } diff --git a/server/storage/backend/verify.go b/server/storage/backend/verify.go deleted file mode 100644 index c55279f8161..00000000000 --- a/server/storage/backend/verify.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "runtime/debug" - "strings" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/verify" -) - -const ( - ENV_VERIFY_VALUE_LOCK verify.VerificationType = "lock" -) - -func ValidateCalledInsideApply(lg *zap.Logger) { - if !verifyLockEnabled() { - return - } - if !insideApply() { - lg.Panic("Called outside of APPLY!", zap.Stack("stacktrace")) - } -} - -func ValidateCalledOutSideApply(lg *zap.Logger) { - if !verifyLockEnabled() { - return - } - if insideApply() { - lg.Panic("Called inside of APPLY!", zap.Stack("stacktrace")) - } -} - -func ValidateCalledInsideUnittest(lg *zap.Logger) { - if !verifyLockEnabled() { - return - } - if !insideUnittest() { - lg.Fatal("Lock called outside of unit test!", zap.Stack("stacktrace")) - } -} - -func verifyLockEnabled() bool { - return verify.IsVerificationEnabled(ENV_VERIFY_VALUE_LOCK) -} - -func insideApply() bool { - stackTraceStr := string(debug.Stack()) - return strings.Contains(stackTraceStr, ".applyEntries") -} - -func insideUnittest() bool { - stackTraceStr := string(debug.Stack()) - return strings.Contains(stackTraceStr, "_test.go") && !strings.Contains(stackTraceStr, "tests/") -} diff --git a/server/storage/backend/verify_test.go b/server/storage/backend/verify_test.go deleted file mode 100644 index 914e24401a0..00000000000 --- a/server/storage/backend/verify_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend_test - -import ( - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestLockVerify(t *testing.T) { - tcs := []struct { - name string - insideApply bool - lock func(tx backend.BatchTx) - txPostLockInsideApplyHook func() - expectPanic bool - }{ - { - name: "call lockInsideApply from inside apply", - insideApply: true, - lock: lockInsideApply, - expectPanic: false, - }, - { - name: "call lockInsideApply from outside apply (without txPostLockInsideApplyHook)", - insideApply: false, - lock: lockInsideApply, - expectPanic: false, - }, - { - name: "call lockInsideApply from outside apply (with txPostLockInsideApplyHook)", - insideApply: false, - lock: lockInsideApply, - txPostLockInsideApplyHook: func() {}, - expectPanic: true, - }, - { - name: "call lockOutsideApply from outside apply", - insideApply: false, - lock: lockOutsideApply, - expectPanic: false, - }, - { - name: "call lockOutsideApply from inside apply", - insideApply: true, - lock: lockOutsideApply, - expectPanic: true, - }, - { - name: "call Lock from unit test", - insideApply: false, - lock: lockFromUT, - expectPanic: false, - }, - } - revertVerifyFunc := verify.EnableVerifications(backend.ENV_VERIFY_VALUE_LOCK) - defer revertVerifyFunc() - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - - be, _ := betesting.NewTmpBackend(t, time.Hour, 10000) - be.SetTxPostLockInsideApplyHook(tc.txPostLockInsideApplyHook) - - hasPaniced := handlePanic(func() { - if tc.insideApply { - applyEntries(be, tc.lock) - } else { - tc.lock(be.BatchTx()) - } - }) != nil - if hasPaniced != tc.expectPanic { - t.Errorf("%v != %v", hasPaniced, tc.expectPanic) - } - }) - } -} - -func handlePanic(f func()) (result interface{}) { - defer func() { - result = recover() - }() - f() - return result -} - -func applyEntries(be backend.Backend, f func(tx backend.BatchTx)) { - f(be.BatchTx()) -} - -func lockInsideApply(tx backend.BatchTx) { tx.LockInsideApply() } -func lockOutsideApply(tx backend.BatchTx) { tx.LockOutsideApply() } -func lockFromUT(tx backend.BatchTx) { tx.Lock() } diff --git a/server/storage/datadir/datadir.go b/server/storage/datadir/datadir.go deleted file mode 100644 index fa4c51ad1a2..00000000000 --- a/server/storage/datadir/datadir.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datadir - -import "path/filepath" - -const ( - memberDirSegment = "member" - snapDirSegment = "snap" - walDirSegment = "wal" - backendFileSegment = "db" -) - -func ToBackendFileName(dataDir string) string { - return filepath.Join(ToSnapDir(dataDir), backendFileSegment) -} - -func ToSnapDir(dataDir string) string { - return filepath.Join(ToMemberDir(dataDir), snapDirSegment) -} - -func ToWalDir(dataDir string) string { - return filepath.Join(ToMemberDir(dataDir), walDirSegment) -} - -func ToMemberDir(dataDir string) string { - return filepath.Join(dataDir, memberDirSegment) -} diff --git a/server/storage/datadir/datadir_test.go b/server/storage/datadir/datadir_test.go deleted file mode 100644 index 623614351bd..00000000000 --- a/server/storage/datadir/datadir_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datadir_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/server/v3/storage/datadir" -) - -func TestToBackendFileName(t *testing.T) { - result := datadir.ToBackendFileName("/dir/data-dir") - assert.Equal(t, "/dir/data-dir/member/snap/db", result) -} - -func TestToMemberDir(t *testing.T) { - result := datadir.ToMemberDir("/dir/data-dir") - assert.Equal(t, "/dir/data-dir/member", result) -} - -func TestToSnapDir(t *testing.T) { - result := datadir.ToSnapDir("/dir/data-dir") - assert.Equal(t, "/dir/data-dir/member/snap", result) -} - -func TestToWalDir(t *testing.T) { - result := datadir.ToWalDir("/dir/data-dir") - assert.Equal(t, "/dir/data-dir/member/wal", result) -} - -func TestToWalDirSlash(t *testing.T) { - result := datadir.ToWalDir("/dir/data-dir/") - assert.Equal(t, "/dir/data-dir/member/wal", result) -} diff --git a/server/storage/datadir/doc.go b/server/storage/datadir/doc.go deleted file mode 100644 index 92ca4b253dd..00000000000 --- a/server/storage/datadir/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package datadir - -// datadir contains functions to navigate file-layout of etcd data-directory. diff --git a/server/storage/hooks.go b/server/storage/hooks.go deleted file mode 100644 index cf09e06b3a6..00000000000 --- a/server/storage/hooks.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "sync" - - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/raft/v3/raftpb" -) - -type BackendHooks struct { - indexer cindex.ConsistentIndexer - lg *zap.Logger - - // confState to Be written in the next submitted Backend transaction (if dirty) - confState raftpb.ConfState - // first write changes it to 'dirty'. false by default, so - // not initialized `confState` is meaningless. - confStateDirty bool - confStateLock sync.Mutex -} - -func NewBackendHooks(lg *zap.Logger, indexer cindex.ConsistentIndexer) *BackendHooks { - return &BackendHooks{lg: lg, indexer: indexer} -} - -func (bh *BackendHooks) OnPreCommitUnsafe(tx backend.BatchTx) { - bh.indexer.UnsafeSave(tx) - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - if bh.confStateDirty { - schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) - // save bh.confState - bh.confStateDirty = false - } -} - -func (bh *BackendHooks) SetConfState(confState *raftpb.ConfState) { - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - bh.confState = *confState - bh.confStateDirty = true -} diff --git a/server/storage/metrics.go b/server/storage/metrics.go deleted file mode 100644 index cb7f87057f9..00000000000 --- a/server/storage/metrics.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -var quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "quota_backend_bytes", - Help: "Current backend storage quota size in bytes.", -}) - -func init() { - prometheus.MustRegister(quotaBackendBytes) -} diff --git a/server/storage/mvcc/doc.go b/server/storage/mvcc/doc.go deleted file mode 100644 index ad5be03086f..00000000000 --- a/server/storage/mvcc/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mvcc defines etcd's stable MVCC storage. -package mvcc diff --git a/server/storage/mvcc/hash.go b/server/storage/mvcc/hash.go deleted file mode 100644 index 0c8e92240ca..00000000000 --- a/server/storage/mvcc/hash.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "hash" - "hash/crc32" - "sort" - "sync" - - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -const ( - hashStorageMaxSize = 10 -) - -func unsafeHashByRev(tx backend.ReadTx, compactRevision, revision int64, keep map[revision]struct{}) (KeyValueHash, error) { - h := newKVHasher(compactRevision, revision, keep) - err := tx.UnsafeForEach(schema.Key, func(k, v []byte) error { - h.WriteKeyValue(k, v) - return nil - }) - return h.Hash(), err -} - -type kvHasher struct { - hash hash.Hash32 - compactRevision int64 - revision int64 - keep map[revision]struct{} -} - -func newKVHasher(compactRev, rev int64, keep map[revision]struct{}) kvHasher { - h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - h.Write(schema.Key.Name()) - return kvHasher{ - hash: h, - compactRevision: compactRev, - revision: rev, - keep: keep, - } -} - -func (h *kvHasher) WriteKeyValue(k, v []byte) { - kr := bytesToRev(k) - upper := revision{main: h.revision + 1} - if !upper.GreaterThan(kr) { - return - } - lower := revision{main: h.compactRevision + 1} - // skip revisions that are scheduled for deletion - // due to compacting; don't skip if there isn't one. - if lower.GreaterThan(kr) && len(h.keep) > 0 { - if _, ok := h.keep[kr]; !ok { - return - } - } - h.hash.Write(k) - h.hash.Write(v) -} - -func (h *kvHasher) Hash() KeyValueHash { - return KeyValueHash{Hash: h.hash.Sum32(), CompactRevision: h.compactRevision, Revision: h.revision} -} - -type KeyValueHash struct { - Hash uint32 - CompactRevision int64 - Revision int64 -} - -type HashStorage interface { - // Hash computes the hash of the KV's backend. - Hash() (hash uint32, revision int64, err error) - - // HashByRev computes the hash of all MVCC revisions up to a given revision. - HashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error) - - // Store adds hash value in local cache, allowing it can be returned by HashByRev. - Store(valueHash KeyValueHash) - - // Hashes returns list of up to `hashStorageMaxSize` newest previously stored hashes. - Hashes() []KeyValueHash -} - -type hashStorage struct { - store *store - hashMu sync.RWMutex - hashes []KeyValueHash - lg *zap.Logger -} - -func newHashStorage(lg *zap.Logger, s *store) *hashStorage { - return &hashStorage{ - store: s, - lg: lg, - } -} - -func (s *hashStorage) Hash() (hash uint32, revision int64, err error) { - return s.store.hash() -} - -func (s *hashStorage) HashByRev(rev int64) (KeyValueHash, int64, error) { - s.hashMu.RLock() - for _, h := range s.hashes { - if rev == h.Revision { - s.hashMu.RUnlock() - - s.store.revMu.RLock() - currentRev := s.store.currentRev - s.store.revMu.RUnlock() - return h, currentRev, nil - } - } - s.hashMu.RUnlock() - - return s.store.hashByRev(rev) -} - -func (s *hashStorage) Store(hash KeyValueHash) { - s.lg.Info("storing new hash", - zap.Uint32("hash", hash.Hash), - zap.Int64("revision", hash.Revision), - zap.Int64("compact-revision", hash.CompactRevision), - ) - s.hashMu.Lock() - defer s.hashMu.Unlock() - s.hashes = append(s.hashes, hash) - sort.Slice(s.hashes, func(i, j int) bool { - return s.hashes[i].Revision < s.hashes[j].Revision - }) - if len(s.hashes) > hashStorageMaxSize { - s.hashes = s.hashes[len(s.hashes)-hashStorageMaxSize:] - } -} - -func (s *hashStorage) Hashes() []KeyValueHash { - s.hashMu.RLock() - // Copy out hashes under lock just to be safe - hashes := make([]KeyValueHash, 0, len(s.hashes)) - for _, hash := range s.hashes { - hashes = append(hashes, hash) - } - s.hashMu.RUnlock() - return hashes -} diff --git a/server/storage/mvcc/hash_test.go b/server/storage/mvcc/hash_test.go deleted file mode 100644 index 2c7a35f9a60..00000000000 --- a/server/storage/mvcc/hash_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" -) - -// TestHashByRevValue test HashByRevValue values to ensure we don't change the -// output which would have catastrophic consequences. Expected output is just -// hardcoded, so please regenerate it every time you change input parameters. -func TestHashByRevValue(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - var totalRevisions int64 = 1210 - assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions) - assert.Less(t, int64(testutil.CompactionCycle*10), totalRevisions) - var rev int64 - var got []KeyValueHash - for ; rev < totalRevisions; rev += testutil.CompactionCycle { - putKVs(s, rev, testutil.CompactionCycle) - hash := testHashByRev(t, s, rev+testutil.CompactionCycle/2) - got = append(got, hash) - } - putKVs(s, rev, totalRevisions) - hash := testHashByRev(t, s, rev+totalRevisions/2) - got = append(got, hash) - assert.Equal(t, []KeyValueHash{ - {4082599214, -1, 35}, - {2279933401, 35, 106}, - {3284231217, 106, 177}, - {126286495, 177, 248}, - {900108730, 248, 319}, - {2475485232, 319, 390}, - {1226296507, 390, 461}, - {2503661030, 461, 532}, - {4155130747, 532, 603}, - {106915399, 603, 674}, - {406914006, 674, 745}, - {1882211381, 745, 816}, - {806177088, 816, 887}, - {664311366, 887, 958}, - {1496914449, 958, 1029}, - {2434525091, 1029, 1100}, - {3988652253, 1100, 1171}, - {1122462288, 1171, 1242}, - {724436716, 1242, 1883}, - }, got) -} - -func TestHashByRevValueLastRevision(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - var totalRevisions int64 = 1210 - assert.Less(t, int64(s.cfg.CompactionBatchLimit), totalRevisions) - assert.Less(t, int64(testutil.CompactionCycle*10), totalRevisions) - var rev int64 - var got []KeyValueHash - for ; rev < totalRevisions; rev += testutil.CompactionCycle { - putKVs(s, rev, testutil.CompactionCycle) - hash := testHashByRev(t, s, 0) - got = append(got, hash) - } - putKVs(s, rev, totalRevisions) - hash := testHashByRev(t, s, 0) - got = append(got, hash) - assert.Equal(t, []KeyValueHash{ - {1913897190, -1, 73}, - {224860069, 73, 145}, - {1565167519, 145, 217}, - {1566261620, 217, 289}, - {2037173024, 289, 361}, - {691659396, 361, 433}, - {2713730748, 433, 505}, - {3919322507, 505, 577}, - {769967540, 577, 649}, - {2909194793, 649, 721}, - {1576921157, 721, 793}, - {4067701532, 793, 865}, - {2226384237, 865, 937}, - {2923408134, 937, 1009}, - {2680329256, 1009, 1081}, - {1546717673, 1081, 1153}, - {2713657846, 1153, 1225}, - {1046575299, 1225, 1297}, - {2017735779, 1297, 2508}, - }, got) -} - -func putKVs(s *store, rev, count int64) { - for i := rev; i <= rev+count; i++ { - s.Put([]byte(testutil.PickKey(i)), []byte(fmt.Sprint(i)), 0) - } -} - -func testHashByRev(t *testing.T, s *store, rev int64) KeyValueHash { - if rev == 0 { - rev = s.Rev() - } - hash, _, err := s.hashByRev(rev) - assert.NoError(t, err, "error on rev %v", rev) - _, err = s.Compact(traceutil.TODO(), rev) - assert.NoError(t, err, "error on compact %v", rev) - return hash -} - -// TestCompactionHash tests compaction hash -// TODO: Change this to fuzz test -func TestCompactionHash(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - testutil.TestCompactionHash(context.Background(), t, hashTestCase{s}, s.cfg.CompactionBatchLimit) -} - -type hashTestCase struct { - *store -} - -func (tc hashTestCase) Put(ctx context.Context, key, value string) error { - tc.store.Put([]byte(key), []byte(value), 0) - return nil -} - -func (tc hashTestCase) Delete(ctx context.Context, key string) error { - tc.store.DeleteRange([]byte(key), nil) - return nil -} - -func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) { - hash, _, err := tc.store.HashStorage().HashByRev(rev) - return testutil.KeyValueHash{Hash: hash.Hash, CompactRevision: hash.CompactRevision, Revision: hash.Revision}, err -} - -func (tc hashTestCase) Defrag(ctx context.Context) error { - return tc.store.b.Defrag() -} - -func (tc hashTestCase) Compact(ctx context.Context, rev int64) error { - done, err := tc.store.Compact(traceutil.TODO(), rev) - if err != nil { - return err - } - select { - case <-done: - case <-ctx.Done(): - return ctx.Err() - } - return nil -} - -func TestHasherStore(t *testing.T) { - lg := zaptest.NewLogger(t) - s := newHashStorage(lg, newFakeStore(lg)) - var hashes []KeyValueHash - for i := 0; i < hashStorageMaxSize; i++ { - hash := KeyValueHash{Hash: uint32(i), Revision: int64(i) + 10, CompactRevision: int64(i) + 100} - hashes = append(hashes, hash) - s.Store(hash) - } - - for _, want := range hashes { - got, _, err := s.HashByRev(want.Revision) - if err != nil { - t.Fatal(err) - } - if want.Hash != got.Hash { - t.Errorf("Expected stored hash to match, got: %d, expected: %d", want.Hash, got.Hash) - } - if want.Revision != got.Revision { - t.Errorf("Expected stored revision to match, got: %d, expected: %d", want.Revision, got.Revision) - } - if want.CompactRevision != got.CompactRevision { - t.Errorf("Expected stored compact revision to match, got: %d, expected: %d", want.CompactRevision, got.CompactRevision) - } - } -} - -func TestHasherStoreFull(t *testing.T) { - lg := zaptest.NewLogger(t) - s := newHashStorage(lg, newFakeStore(lg)) - var minRevision int64 = 100 - var maxRevision = minRevision + hashStorageMaxSize - for i := 0; i < hashStorageMaxSize; i++ { - s.Store(KeyValueHash{Revision: int64(i) + minRevision}) - } - - // Hash for old revision should be discarded as storage is already full - s.Store(KeyValueHash{Revision: minRevision - 1}) - hash, _, err := s.HashByRev(minRevision - 1) - if err == nil { - t.Errorf("Expected an error as old revision should be discarded, got: %v", hash) - } - // Hash for new revision should be stored even when storage is full - s.Store(KeyValueHash{Revision: maxRevision + 1}) - _, _, err = s.HashByRev(maxRevision + 1) - if err != nil { - t.Errorf("Didn't expect error for new revision, err: %v", err) - } -} diff --git a/server/storage/mvcc/index.go b/server/storage/mvcc/index.go deleted file mode 100644 index 6f92a4aeeea..00000000000 --- a/server/storage/mvcc/index.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - - "github.com/google/btree" - "go.uber.org/zap" -) - -type index interface { - Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) - Range(key, end []byte, atRev int64) ([][]byte, []revision) - Revisions(key, end []byte, atRev int64, limit int) ([]revision, int) - CountRevisions(key, end []byte, atRev int64) int - Put(key []byte, rev revision) - Tombstone(key []byte, rev revision) error - Compact(rev int64) map[revision]struct{} - Keep(rev int64) map[revision]struct{} - Equal(b index) bool - - Insert(ki *keyIndex) - KeyIndex(ki *keyIndex) *keyIndex -} - -type treeIndex struct { - sync.RWMutex - tree *btree.BTreeG[*keyIndex] - lg *zap.Logger -} - -func newTreeIndex(lg *zap.Logger) index { - return &treeIndex{ - tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool { - return aki.Less(bki) - }), - lg: lg, - } -} - -func (ti *treeIndex) Put(key []byte, rev revision) { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - okeyi, ok := ti.tree.Get(keyi) - if !ok { - keyi.put(ti.lg, rev.main, rev.sub) - ti.tree.ReplaceOrInsert(keyi) - return - } - okeyi.put(ti.lg, rev.main, rev.sub) -} - -func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - ti.RLock() - defer ti.RUnlock() - return ti.unsafeGet(key, atRev) -} - -func (ti *treeIndex) unsafeGet(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - keyi := &keyIndex{key: key} - if keyi = ti.keyIndex(keyi); keyi == nil { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - return keyi.get(ti.lg, atRev) -} - -func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { - ti.RLock() - defer ti.RUnlock() - return ti.keyIndex(keyi) -} - -func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { - if ki, ok := ti.tree.Get(keyi); ok { - return ki - } - return nil -} - -func (ti *treeIndex) unsafeVisit(key, end []byte, f func(ki *keyIndex) bool) { - keyi, endi := &keyIndex{key: key}, &keyIndex{key: end} - - ti.tree.AscendGreaterOrEqual(keyi, func(item *keyIndex) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - if !f(item) { - return false - } - return true - }) -} - -// Revisions returns limited number of revisions from key(included) to end(excluded) -// at the given rev. The returned slice is sorted in the order of key. There is no limit if limit <= 0. -// The second return parameter isn't capped by the limit and reflects the total number of revisions. -func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []revision, total int) { - ti.RLock() - defer ti.RUnlock() - - if end == nil { - rev, _, _, err := ti.unsafeGet(key, atRev) - if err != nil { - return nil, 0 - } - return []revision{rev}, 1 - } - ti.unsafeVisit(key, end, func(ki *keyIndex) bool { - if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { - if limit <= 0 || len(revs) < limit { - revs = append(revs, rev) - } - total++ - } - return true - }) - return revs, total -} - -// CountRevisions returns the number of revisions -// from key(included) to end(excluded) at the given rev. -func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int { - ti.RLock() - defer ti.RUnlock() - - if end == nil { - _, _, _, err := ti.unsafeGet(key, atRev) - if err != nil { - return 0 - } - return 1 - } - total := 0 - ti.unsafeVisit(key, end, func(ki *keyIndex) bool { - if _, _, _, err := ki.get(ti.lg, atRev); err == nil { - total++ - } - return true - }) - return total -} - -func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { - ti.RLock() - defer ti.RUnlock() - - if end == nil { - rev, _, _, err := ti.unsafeGet(key, atRev) - if err != nil { - return nil, nil - } - return [][]byte{key}, []revision{rev} - } - ti.unsafeVisit(key, end, func(ki *keyIndex) bool { - if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { - revs = append(revs, rev) - keys = append(keys, ki.key) - } - return true - }) - return keys, revs -} - -func (ti *treeIndex) Tombstone(key []byte, rev revision) error { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - ki, ok := ti.tree.Get(keyi) - if !ok { - return ErrRevisionNotFound - } - - return ki.tombstone(ti.lg, rev.main, rev.sub) -} - -func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) - ti.lg.Info("compact tree index", zap.Int64("revision", rev)) - ti.Lock() - clone := ti.tree.Clone() - ti.Unlock() - - clone.Ascend(func(keyi *keyIndex) bool { - // Lock is needed here to prevent modification to the keyIndex while - // compaction is going on or revision added to empty before deletion - ti.Lock() - keyi.compact(ti.lg, rev, available) - if keyi.isEmpty() { - _, ok := ti.tree.Delete(keyi) - if !ok { - ti.lg.Panic("failed to delete during compaction") - } - } - ti.Unlock() - return true - }) - return available -} - -// Keep finds all revisions to be kept for a Compaction at the given rev. -func (ti *treeIndex) Keep(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) - ti.RLock() - defer ti.RUnlock() - ti.tree.Ascend(func(keyi *keyIndex) bool { - keyi.keep(rev, available) - return true - }) - return available -} - -func (ti *treeIndex) Equal(bi index) bool { - b := bi.(*treeIndex) - - if ti.tree.Len() != b.tree.Len() { - return false - } - - equal := true - - ti.tree.Ascend(func(aki *keyIndex) bool { - bki, _ := b.tree.Get(aki) - if !aki.equal(bki) { - equal = false - return false - } - return true - }) - - return equal -} - -func (ti *treeIndex) Insert(ki *keyIndex) { - ti.Lock() - defer ti.Unlock() - ti.tree.ReplaceOrInsert(ki) -} diff --git a/server/storage/mvcc/index_bench_test.go b/server/storage/mvcc/index_bench_test.go deleted file mode 100644 index 008a7d2ae91..00000000000 --- a/server/storage/mvcc/index_bench_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "testing" - - "go.uber.org/zap" -) - -func BenchmarkIndexCompact1(b *testing.B) { benchmarkIndexCompact(b, 1) } -func BenchmarkIndexCompact100(b *testing.B) { benchmarkIndexCompact(b, 100) } -func BenchmarkIndexCompact10000(b *testing.B) { benchmarkIndexCompact(b, 10000) } -func BenchmarkIndexCompact100000(b *testing.B) { benchmarkIndexCompact(b, 100000) } -func BenchmarkIndexCompact1000000(b *testing.B) { benchmarkIndexCompact(b, 1000000) } - -func benchmarkIndexCompact(b *testing.B, size int) { - log := zap.NewNop() - kvindex := newTreeIndex(log) - - bytesN := 64 - keys := createBytesSlice(bytesN, size) - for i := 1; i < size; i++ { - kvindex.Put(keys[i], revision{main: int64(i), sub: int64(i)}) - } - b.ResetTimer() - for i := 1; i < b.N; i++ { - kvindex.Compact(int64(i)) - } -} - -func BenchmarkIndexPut(b *testing.B) { - log := zap.NewNop() - kvindex := newTreeIndex(log) - - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - b.ResetTimer() - for i := 1; i < b.N; i++ { - kvindex.Put(keys[i], revision{main: int64(i), sub: int64(i)}) - } -} - -func BenchmarkIndexGet(b *testing.B) { - log := zap.NewNop() - kvindex := newTreeIndex(log) - - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - for i := 1; i < b.N; i++ { - kvindex.Put(keys[i], revision{main: int64(i), sub: int64(i)}) - } - b.ResetTimer() - for i := 1; i < b.N; i++ { - kvindex.Get(keys[i], int64(i)) - } -} diff --git a/server/storage/mvcc/index_test.go b/server/storage/mvcc/index_test.go deleted file mode 100644 index 7d947670d76..00000000000 --- a/server/storage/mvcc/index_test.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "reflect" - "testing" - - "github.com/google/btree" - "go.uber.org/zap/zaptest" -) - -func TestIndexGet(t *testing.T) { - ti := newTreeIndex(zaptest.NewLogger(t)) - ti.Put([]byte("foo"), revision{main: 2}) - ti.Put([]byte("foo"), revision{main: 4}) - ti.Tombstone([]byte("foo"), revision{main: 6}) - - tests := []struct { - rev int64 - - wrev revision - wcreated revision - wver int64 - werr error - }{ - {0, revision{}, revision{}, 0, ErrRevisionNotFound}, - {1, revision{}, revision{}, 0, ErrRevisionNotFound}, - {2, revision{main: 2}, revision{main: 2}, 1, nil}, - {3, revision{main: 2}, revision{main: 2}, 1, nil}, - {4, revision{main: 4}, revision{main: 2}, 2, nil}, - {5, revision{main: 4}, revision{main: 2}, 2, nil}, - {6, revision{}, revision{}, 0, ErrRevisionNotFound}, - } - for i, tt := range tests { - rev, created, ver, err := ti.Get([]byte("foo"), tt.rev) - if err != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - if rev != tt.wrev { - t.Errorf("#%d: rev = %+v, want %+v", i, rev, tt.wrev) - } - if created != tt.wcreated { - t.Errorf("#%d: created = %+v, want %+v", i, created, tt.wcreated) - } - if ver != tt.wver { - t.Errorf("#%d: ver = %d, want %d", i, ver, tt.wver) - } - } -} - -func TestIndexRange(t *testing.T) { - allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")} - allRevs := []revision{{main: 1}, {main: 2}, {main: 3}} - - ti := newTreeIndex(zaptest.NewLogger(t)) - for i := range allKeys { - ti.Put(allKeys[i], allRevs[i]) - } - - atRev := int64(3) - tests := []struct { - key, end []byte - wkeys [][]byte - wrevs []revision - }{ - // single key that not found - { - []byte("bar"), nil, nil, nil, - }, - // single key that found - { - []byte("foo"), nil, allKeys[:1], allRevs[:1], - }, - // range keys, return first member - { - []byte("foo"), []byte("foo1"), allKeys[:1], allRevs[:1], - }, - // range keys, return first two members - { - []byte("foo"), []byte("foo2"), allKeys[:2], allRevs[:2], - }, - // range keys, return all members - { - []byte("foo"), []byte("fop"), allKeys, allRevs, - }, - // range keys, return last two members - { - []byte("foo1"), []byte("fop"), allKeys[1:], allRevs[1:], - }, - // range keys, return last member - { - []byte("foo2"), []byte("fop"), allKeys[2:], allRevs[2:], - }, - // range keys, return nothing - { - []byte("foo3"), []byte("fop"), nil, nil, - }, - } - for i, tt := range tests { - keys, revs := ti.Range(tt.key, tt.end, atRev) - if !reflect.DeepEqual(keys, tt.wkeys) { - t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys) - } - if !reflect.DeepEqual(revs, tt.wrevs) { - t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs) - } - } -} - -func TestIndexTombstone(t *testing.T) { - ti := newTreeIndex(zaptest.NewLogger(t)) - ti.Put([]byte("foo"), revision{main: 1}) - - err := ti.Tombstone([]byte("foo"), revision{main: 2}) - if err != nil { - t.Errorf("tombstone error = %v, want nil", err) - } - - _, _, _, err = ti.Get([]byte("foo"), 2) - if err != ErrRevisionNotFound { - t.Errorf("get error = %v, want ErrRevisionNotFound", err) - } - err = ti.Tombstone([]byte("foo"), revision{main: 3}) - if err != ErrRevisionNotFound { - t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound) - } -} - -func TestIndexRevision(t *testing.T) { - allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2"), []byte("foo2"), []byte("foo1"), []byte("foo")} - allRevs := []revision{{main: 1}, {main: 2}, {main: 3}, {main: 4}, {main: 5}, {main: 6}} - - ti := newTreeIndex(zaptest.NewLogger(t)) - for i := range allKeys { - ti.Put(allKeys[i], allRevs[i]) - } - - tests := []struct { - key, end []byte - atRev int64 - limit int - wrevs []revision - wcounts int - }{ - // single key that not found - { - []byte("bar"), nil, 6, 0, nil, 0, - }, - // single key that found - { - []byte("foo"), nil, 6, 0, []revision{{main: 6}}, 1, - }, - // various range keys, fixed atRev, unlimited - { - []byte("foo"), []byte("foo1"), 6, 0, []revision{{main: 6}}, 1, - }, - { - []byte("foo"), []byte("foo2"), 6, 0, []revision{{main: 6}, {main: 5}}, 2, - }, - { - []byte("foo"), []byte("fop"), 6, 0, []revision{{main: 6}, {main: 5}, {main: 4}}, 3, - }, - { - []byte("foo1"), []byte("fop"), 6, 0, []revision{{main: 5}, {main: 4}}, 2, - }, - { - []byte("foo2"), []byte("fop"), 6, 0, []revision{{main: 4}}, 1, - }, - { - []byte("foo3"), []byte("fop"), 6, 0, nil, 0, - }, - // fixed range keys, various atRev, unlimited - { - []byte("foo1"), []byte("fop"), 1, 0, nil, 0, - }, - { - []byte("foo1"), []byte("fop"), 2, 0, []revision{{main: 2}}, 1, - }, - { - []byte("foo1"), []byte("fop"), 3, 0, []revision{{main: 2}, {main: 3}}, 2, - }, - { - []byte("foo1"), []byte("fop"), 4, 0, []revision{{main: 2}, {main: 4}}, 2, - }, - { - []byte("foo1"), []byte("fop"), 5, 0, []revision{{main: 5}, {main: 4}}, 2, - }, - { - []byte("foo1"), []byte("fop"), 6, 0, []revision{{main: 5}, {main: 4}}, 2, - }, - // fixed range keys, fixed atRev, various limit - { - []byte("foo"), []byte("fop"), 6, 1, []revision{{main: 6}}, 3, - }, - { - []byte("foo"), []byte("fop"), 6, 2, []revision{{main: 6}, {main: 5}}, 3, - }, - { - []byte("foo"), []byte("fop"), 6, 3, []revision{{main: 6}, {main: 5}, {main: 4}}, 3, - }, - { - []byte("foo"), []byte("fop"), 3, 1, []revision{{main: 1}}, 3, - }, - { - []byte("foo"), []byte("fop"), 3, 2, []revision{{main: 1}, {main: 2}}, 3, - }, - { - []byte("foo"), []byte("fop"), 3, 3, []revision{{main: 1}, {main: 2}, {main: 3}}, 3, - }, - } - for i, tt := range tests { - revs, _ := ti.Revisions(tt.key, tt.end, tt.atRev, tt.limit) - if !reflect.DeepEqual(revs, tt.wrevs) { - t.Errorf("#%d limit %d: revs = %+v, want %+v", i, tt.limit, revs, tt.wrevs) - } - count := ti.CountRevisions(tt.key, tt.end, tt.atRev) - if count != tt.wcounts { - t.Errorf("#%d: count = %d, want %v", i, count, tt.wcounts) - } - } -} - -func TestIndexCompactAndKeep(t *testing.T) { - maxRev := int64(20) - tests := []struct { - key []byte - remove bool - rev revision - created revision - ver int64 - }{ - {[]byte("foo"), false, revision{main: 1}, revision{main: 1}, 1}, - {[]byte("foo1"), false, revision{main: 2}, revision{main: 2}, 1}, - {[]byte("foo2"), false, revision{main: 3}, revision{main: 3}, 1}, - {[]byte("foo2"), false, revision{main: 4}, revision{main: 3}, 2}, - {[]byte("foo"), false, revision{main: 5}, revision{main: 1}, 2}, - {[]byte("foo1"), false, revision{main: 6}, revision{main: 2}, 2}, - {[]byte("foo1"), true, revision{main: 7}, revision{}, 0}, - {[]byte("foo2"), true, revision{main: 8}, revision{}, 0}, - {[]byte("foo"), true, revision{main: 9}, revision{}, 0}, - {[]byte("foo"), false, revision{10, 0}, revision{10, 0}, 1}, - {[]byte("foo1"), false, revision{10, 1}, revision{10, 1}, 1}, - } - - // Continuous Compact and Keep - ti := newTreeIndex(zaptest.NewLogger(t)) - for _, tt := range tests { - if tt.remove { - ti.Tombstone(tt.key, tt.rev) - } else { - ti.Put(tt.key, tt.rev) - } - } - for i := int64(1); i < maxRev; i++ { - am := ti.Compact(i) - keep := ti.Keep(i) - if !(reflect.DeepEqual(am, keep)) { - t.Errorf("#%d: compact keep %v != Keep keep %v", i, am, keep) - } - wti := &treeIndex{tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool { - return aki.Less(bki) - })} - for _, tt := range tests { - if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) { - if tt.remove { - wti.Tombstone(tt.key, tt.rev) - } else { - restore(wti, tt.key, tt.created, tt.rev, tt.ver) - } - } - } - if !ti.Equal(wti) { - t.Errorf("#%d: not equal ti", i) - } - } - - // Once Compact and Keep - for i := int64(1); i < maxRev; i++ { - ti := newTreeIndex(zaptest.NewLogger(t)) - for _, tt := range tests { - if tt.remove { - ti.Tombstone(tt.key, tt.rev) - } else { - ti.Put(tt.key, tt.rev) - } - } - am := ti.Compact(i) - keep := ti.Keep(i) - if !(reflect.DeepEqual(am, keep)) { - t.Errorf("#%d: compact keep %v != Keep keep %v", i, am, keep) - } - wti := &treeIndex{tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool { - return aki.Less(bki) - })} - for _, tt := range tests { - if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) { - if tt.remove { - wti.Tombstone(tt.key, tt.rev) - } else { - restore(wti, tt.key, tt.created, tt.rev, tt.ver) - } - } - } - if !ti.Equal(wti) { - t.Errorf("#%d: not equal ti", i) - } - } -} - -func restore(ti *treeIndex, key []byte, created, modified revision, ver int64) { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - okeyi, _ := ti.tree.Get(keyi) - if okeyi == nil { - keyi.restore(ti.lg, created, modified, ver) - ti.tree.ReplaceOrInsert(keyi) - return - } - okeyi.put(ti.lg, modified.main, modified.sub) -} diff --git a/server/storage/mvcc/key_index.go b/server/storage/mvcc/key_index.go deleted file mode 100644 index e7aac273c9e..00000000000 --- a/server/storage/mvcc/key_index.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "fmt" - - "go.uber.org/zap" -) - -var ( - ErrRevisionNotFound = errors.New("mvcc: revision not found") -) - -// keyIndex stores the revisions of a key in the backend. -// Each keyIndex has at least one key generation. -// Each generation might have several key versions. -// Tombstone on a key appends an tombstone version at the end -// of the current generation and creates a new empty generation. -// Each version of a key has an index pointing to the backend. -// -// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" -// generate a keyIndex: -// key: "foo" -// modified: 5 -// generations: -// -// {empty} -// {4.0, 5.0(t)} -// {1.0, 2.0, 3.0(t)} -// -// Compact a keyIndex removes the versions with smaller or equal to -// rev except the largest one. If the generation becomes empty -// during compaction, it will be removed. if all the generations get -// removed, the keyIndex should be removed. -// -// For example: -// compact(2) on the previous example -// generations: -// -// {empty} -// {4.0, 5.0(t)} -// {2.0, 3.0(t)} -// -// compact(4) -// generations: -// -// {empty} -// {4.0, 5.0(t)} -// -// compact(5): -// generations: -// -// {empty} -> key SHOULD be removed. -// -// compact(6): -// generations: -// -// {empty} -> key SHOULD be removed. -type keyIndex struct { - key []byte - modified revision // the main rev of the last modification - generations []generation -} - -// put puts a revision to the keyIndex. -func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { - rev := revision{main: main, sub: sub} - - if !rev.GreaterThan(ki.modified) { - lg.Panic( - "'put' with an unexpected smaller revision", - zap.Int64("given-revision-main", rev.main), - zap.Int64("given-revision-sub", rev.sub), - zap.Int64("modified-revision-main", ki.modified.main), - zap.Int64("modified-revision-sub", ki.modified.sub), - ) - } - if len(ki.generations) == 0 { - ki.generations = append(ki.generations, generation{}) - } - g := &ki.generations[len(ki.generations)-1] - if len(g.revs) == 0 { // create a new key - keysGauge.Inc() - g.created = rev - } - g.revs = append(g.revs, rev) - g.ver++ - ki.modified = rev -} - -func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) { - if len(ki.generations) != 0 { - lg.Panic( - "'restore' got an unexpected non-empty generations", - zap.Int("generations-size", len(ki.generations)), - ) - } - - ki.modified = modified - g := generation{created: created, ver: ver, revs: []revision{modified}} - ki.generations = append(ki.generations, g) - keysGauge.Inc() -} - -// tombstone puts a revision, pointing to a tombstone, to the keyIndex. -// It also creates a new empty generation in the keyIndex. -// It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { - if ki.isEmpty() { - lg.Panic( - "'tombstone' got an unexpected empty keyIndex", - zap.String("key", string(ki.key)), - ) - } - if ki.generations[len(ki.generations)-1].isEmpty() { - return ErrRevisionNotFound - } - ki.put(lg, main, sub) - ki.generations = append(ki.generations, generation{}) - keysGauge.Dec() - return nil -} - -// get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be smaller than or equal to the given atRev. -func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { - if ki.isEmpty() { - lg.Panic( - "'get' got an unexpected empty keyIndex", - zap.String("key", string(ki.key)), - ) - } - g := ki.findGeneration(atRev) - if g.isEmpty() { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - - n := g.walk(func(rev revision) bool { return rev.main > atRev }) - if n != -1 { - return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil - } - - return revision{}, revision{}, 0, ErrRevisionNotFound -} - -// since returns revisions since the given rev. Only the revision with the -// largest sub revision will be returned if multiple revisions have the same -// main revision. -func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { - if ki.isEmpty() { - lg.Panic( - "'since' got an unexpected empty keyIndex", - zap.String("key", string(ki.key)), - ) - } - since := revision{rev, 0} - var gi int - // find the generations to start checking - for gi = len(ki.generations) - 1; gi > 0; gi-- { - g := ki.generations[gi] - if g.isEmpty() { - continue - } - if since.GreaterThan(g.created) { - break - } - } - - var revs []revision - var last int64 - for ; gi < len(ki.generations); gi++ { - for _, r := range ki.generations[gi].revs { - if since.GreaterThan(r) { - continue - } - if r.main == last { - // replace the revision with a new one that has higher sub value, - // because the original one should not be seen by external - revs[len(revs)-1] = r - continue - } - revs = append(revs, r) - last = r.main - } - } - return revs -} - -// compact compacts a keyIndex by removing the versions with smaller or equal -// revision than the given atRev except the largest one (If the largest one is -// a tombstone, it will not be kept). -// If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { - if ki.isEmpty() { - lg.Panic( - "'compact' got an unexpected empty keyIndex", - zap.String("key", string(ki.key)), - ) - } - - genIdx, revIndex := ki.doCompact(atRev, available) - - g := &ki.generations[genIdx] - if !g.isEmpty() { - // remove the previous contents. - if revIndex != -1 { - g.revs = g.revs[revIndex:] - } - // remove any tombstone - if len(g.revs) == 1 && genIdx != len(ki.generations)-1 { - delete(available, g.revs[0]) - genIdx++ - } - } - - // remove the previous generations. - ki.generations = ki.generations[genIdx:] -} - -// keep finds the revision to be kept if compact is called at given atRev. -func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) { - if ki.isEmpty() { - return - } - - genIdx, revIndex := ki.doCompact(atRev, available) - g := &ki.generations[genIdx] - if !g.isEmpty() { - // remove any tombstone - if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 { - delete(available, g.revs[revIndex]) - } - } -} - -func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) { - // walk until reaching the first revision smaller or equal to "atRev", - // and add the revision to the available map - f := func(rev revision) bool { - if rev.main <= atRev { - available[rev] = struct{}{} - return false - } - return true - } - - genIdx, g := 0, &ki.generations[0] - // find first generation includes atRev or created after atRev - for genIdx < len(ki.generations)-1 { - if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { - break - } - genIdx++ - g = &ki.generations[genIdx] - } - - revIndex = g.walk(f) - - return genIdx, revIndex -} - -func (ki *keyIndex) isEmpty() bool { - return len(ki.generations) == 1 && ki.generations[0].isEmpty() -} - -// findGeneration finds out the generation of the keyIndex that the -// given rev belongs to. If the given rev is at the gap of two generations, -// which means that the key does not exist at the given rev, it returns nil. -func (ki *keyIndex) findGeneration(rev int64) *generation { - lastg := len(ki.generations) - 1 - cg := lastg - - for cg >= 0 { - if len(ki.generations[cg].revs) == 0 { - cg-- - continue - } - g := ki.generations[cg] - if cg != lastg { - if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { - return nil - } - } - if g.revs[0].main <= rev { - return &ki.generations[cg] - } - cg-- - } - return nil -} - -func (ki *keyIndex) Less(bki *keyIndex) bool { - return bytes.Compare(ki.key, bki.key) == -1 -} - -func (ki *keyIndex) equal(b *keyIndex) bool { - if !bytes.Equal(ki.key, b.key) { - return false - } - if ki.modified != b.modified { - return false - } - if len(ki.generations) != len(b.generations) { - return false - } - for i := range ki.generations { - ag, bg := ki.generations[i], b.generations[i] - if !ag.equal(bg) { - return false - } - } - return true -} - -func (ki *keyIndex) String() string { - var s string - for _, g := range ki.generations { - s += g.String() - } - return s -} - -// generation contains multiple revisions of a key. -type generation struct { - ver int64 - created revision // when the generation is created (put in first revision). - revs []revision -} - -func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } - -// walk walks through the revisions in the generation in descending order. -// It passes the revision to the given function. -// walk returns until: 1. it finishes walking all pairs 2. the function returns false. -// walk returns the position at where it stopped. If it stopped after -// finishing walking, -1 will be returned. -func (g *generation) walk(f func(rev revision) bool) int { - l := len(g.revs) - for i := range g.revs { - ok := f(g.revs[l-i-1]) - if !ok { - return l - i - 1 - } - } - return -1 -} - -func (g *generation) String() string { - return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) -} - -func (g generation) equal(b generation) bool { - if g.ver != b.ver { - return false - } - if len(g.revs) != len(b.revs) { - return false - } - - for i := range g.revs { - ar, br := g.revs[i], b.revs[i] - if ar != br { - return false - } - } - return true -} diff --git a/server/storage/mvcc/key_index_test.go b/server/storage/mvcc/key_index_test.go deleted file mode 100644 index bb47d5f1e89..00000000000 --- a/server/storage/mvcc/key_index_test.go +++ /dev/null @@ -1,702 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "reflect" - "testing" - - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func TestKeyIndexGet(t *testing.T) { - // key: "foo" - // modified: 16 - // generations: - // {empty} - // {{14, 0}[1], {14, 1}[2], {16, 0}(t)[3]} - // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]} - // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]} - ki := newTestKeyIndex(zaptest.NewLogger(t)) - ki.compact(zaptest.NewLogger(t), 4, make(map[revision]struct{})) - - tests := []struct { - rev int64 - - wmod revision - wcreat revision - wver int64 - werr error - }{ - {17, revision{}, revision{}, 0, ErrRevisionNotFound}, - {16, revision{}, revision{}, 0, ErrRevisionNotFound}, - - // get on generation 3 - {15, revision{14, 1}, revision{14, 0}, 2, nil}, - {14, revision{14, 1}, revision{14, 0}, 2, nil}, - - {13, revision{}, revision{}, 0, ErrRevisionNotFound}, - {12, revision{}, revision{}, 0, ErrRevisionNotFound}, - - // get on generation 2 - {11, revision{10, 0}, revision{8, 0}, 2, nil}, - {10, revision{10, 0}, revision{8, 0}, 2, nil}, - {9, revision{8, 0}, revision{8, 0}, 1, nil}, - {8, revision{8, 0}, revision{8, 0}, 1, nil}, - - {7, revision{}, revision{}, 0, ErrRevisionNotFound}, - {6, revision{}, revision{}, 0, ErrRevisionNotFound}, - - // get on generation 1 - {5, revision{4, 0}, revision{2, 0}, 2, nil}, - {4, revision{4, 0}, revision{2, 0}, 2, nil}, - - {3, revision{}, revision{}, 0, ErrRevisionNotFound}, - {2, revision{}, revision{}, 0, ErrRevisionNotFound}, - {1, revision{}, revision{}, 0, ErrRevisionNotFound}, - {0, revision{}, revision{}, 0, ErrRevisionNotFound}, - } - - for i, tt := range tests { - mod, creat, ver, err := ki.get(zaptest.NewLogger(t), tt.rev) - if err != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - if mod != tt.wmod { - t.Errorf("#%d: modified = %+v, want %+v", i, mod, tt.wmod) - } - if creat != tt.wcreat { - t.Errorf("#%d: created = %+v, want %+v", i, creat, tt.wcreat) - } - if ver != tt.wver { - t.Errorf("#%d: version = %d, want %d", i, ver, tt.wver) - } - } -} - -func TestKeyIndexSince(t *testing.T) { - ki := newTestKeyIndex(zaptest.NewLogger(t)) - ki.compact(zaptest.NewLogger(t), 4, make(map[revision]struct{})) - - allRevs := []revision{{4, 0}, {6, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 1}, {16, 0}} - tests := []struct { - rev int64 - - wrevs []revision - }{ - {17, nil}, - {16, allRevs[6:]}, - {15, allRevs[6:]}, - {14, allRevs[5:]}, - {13, allRevs[5:]}, - {12, allRevs[4:]}, - {11, allRevs[4:]}, - {10, allRevs[3:]}, - {9, allRevs[3:]}, - {8, allRevs[2:]}, - {7, allRevs[2:]}, - {6, allRevs[1:]}, - {5, allRevs[1:]}, - {4, allRevs}, - {3, allRevs}, - {2, allRevs}, - {1, allRevs}, - {0, allRevs}, - } - - for i, tt := range tests { - revs := ki.since(zaptest.NewLogger(t), tt.rev) - if !reflect.DeepEqual(revs, tt.wrevs) { - t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs) - } - } -} - -func TestKeyIndexPut(t *testing.T) { - ki := &keyIndex{key: []byte("foo")} - ki.put(zaptest.NewLogger(t), 5, 0) - - wki := &keyIndex{ - key: []byte("foo"), - modified: revision{5, 0}, - generations: []generation{{created: revision{5, 0}, ver: 1, revs: []revision{{main: 5}}}}, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } - - ki.put(zaptest.NewLogger(t), 7, 0) - - wki = &keyIndex{ - key: []byte("foo"), - modified: revision{7, 0}, - generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}}}, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } -} - -func TestKeyIndexRestore(t *testing.T) { - ki := &keyIndex{key: []byte("foo")} - ki.restore(zaptest.NewLogger(t), revision{5, 0}, revision{7, 0}, 2) - - wki := &keyIndex{ - key: []byte("foo"), - modified: revision{7, 0}, - generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 7}}}}, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } -} - -func TestKeyIndexTombstone(t *testing.T) { - ki := &keyIndex{key: []byte("foo")} - ki.put(zaptest.NewLogger(t), 5, 0) - - err := ki.tombstone(zaptest.NewLogger(t), 7, 0) - if err != nil { - t.Errorf("unexpected tombstone error: %v", err) - } - - wki := &keyIndex{ - key: []byte("foo"), - modified: revision{7, 0}, - generations: []generation{{created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}}, {}}, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } - - ki.put(zaptest.NewLogger(t), 8, 0) - ki.put(zaptest.NewLogger(t), 9, 0) - err = ki.tombstone(zaptest.NewLogger(t), 15, 0) - if err != nil { - t.Errorf("unexpected tombstone error: %v", err) - } - - wki = &keyIndex{ - key: []byte("foo"), - modified: revision{15, 0}, - generations: []generation{ - {created: revision{5, 0}, ver: 2, revs: []revision{{main: 5}, {main: 7}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 9}, {main: 15}}}, - {}, - }, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } - - err = ki.tombstone(zaptest.NewLogger(t), 16, 0) - if err != ErrRevisionNotFound { - t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound) - } -} - -func TestKeyIndexCompactAndKeep(t *testing.T) { - tests := []struct { - compact int64 - - wki *keyIndex - wam map[revision]struct{} - }{ - { - 1, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{}, - }, - { - 2, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 2}: {}, - }, - }, - { - 3, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{2, 0}, ver: 3, revs: []revision{{main: 2}, {main: 4}, {main: 6}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 2}: {}, - }, - }, - { - 4, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{2, 0}, ver: 3, revs: []revision{{main: 4}, {main: 6}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 4}: {}, - }, - }, - { - 5, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{2, 0}, ver: 3, revs: []revision{{main: 4}, {main: 6}}}, - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 4}: {}, - }, - }, - { - 6, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{}, - }, - { - 7, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{}, - }, - { - 8, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 8}: {}, - }, - }, - { - 9, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 8}, {main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 8}: {}, - }, - }, - { - 10, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 10}: {}, - }, - }, - { - 11, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{8, 0}, ver: 3, revs: []revision{{main: 10}, {main: 12}}}, - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 10}: {}, - }, - }, - { - 12, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{}, - }, - { - 13, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14}, {main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{}, - }, - { - 14, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 14, sub: 1}: {}, - }, - }, - { - 15, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {created: revision{14, 0}, ver: 3, revs: []revision{{main: 14, sub: 1}, {main: 16}}}, - {}, - }, - }, - map[revision]struct{}{ - {main: 14, sub: 1}: {}, - }, - }, - { - 16, - &keyIndex{ - key: []byte("foo"), - modified: revision{16, 0}, - generations: []generation{ - {}, - }, - }, - map[revision]struct{}{}, - }, - } - - // Continuous Compaction and finding Keep - ki := newTestKeyIndex(zaptest.NewLogger(t)) - for i, tt := range tests { - am := make(map[revision]struct{}) - kiclone := cloneKeyIndex(ki) - ki.keep(tt.compact, am) - if !reflect.DeepEqual(ki, kiclone) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - am = make(map[revision]struct{}) - ki.compact(zaptest.NewLogger(t), tt.compact, am) - if !reflect.DeepEqual(ki, tt.wki) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - } - - // Jump Compaction and finding Keep - ki = newTestKeyIndex(zaptest.NewLogger(t)) - for i, tt := range tests { - if (i%2 == 0 && i < 6) || (i%2 == 1 && i > 6) { - am := make(map[revision]struct{}) - kiclone := cloneKeyIndex(ki) - ki.keep(tt.compact, am) - if !reflect.DeepEqual(ki, kiclone) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiclone) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - am = make(map[revision]struct{}) - ki.compact(zaptest.NewLogger(t), tt.compact, am) - if !reflect.DeepEqual(ki, tt.wki) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - } - } - - kiClone := newTestKeyIndex(zaptest.NewLogger(t)) - // Once Compaction and finding Keep - for i, tt := range tests { - ki := newTestKeyIndex(zaptest.NewLogger(t)) - am := make(map[revision]struct{}) - ki.keep(tt.compact, am) - if !reflect.DeepEqual(ki, kiClone) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, kiClone) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - am = make(map[revision]struct{}) - ki.compact(zaptest.NewLogger(t), tt.compact, am) - if !reflect.DeepEqual(ki, tt.wki) { - t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki) - } - if !reflect.DeepEqual(am, tt.wam) { - t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam) - } - } -} - -func cloneKeyIndex(ki *keyIndex) *keyIndex { - generations := make([]generation, len(ki.generations)) - for i, gen := range ki.generations { - generations[i] = *cloneGeneration(&gen) - } - return &keyIndex{ki.key, ki.modified, generations} -} - -func cloneGeneration(g *generation) *generation { - if g.revs == nil { - return &generation{g.ver, g.created, nil} - } - tmp := make([]revision, len(g.revs)) - copy(tmp, g.revs) - return &generation{g.ver, g.created, tmp} -} - -// TestKeyIndexCompactOnFurtherRev tests that compact on version that -// higher than last modified version works well -func TestKeyIndexCompactOnFurtherRev(t *testing.T) { - ki := &keyIndex{key: []byte("foo")} - ki.put(zaptest.NewLogger(t), 1, 0) - ki.put(zaptest.NewLogger(t), 2, 0) - am := make(map[revision]struct{}) - ki.compact(zaptest.NewLogger(t), 3, am) - - wki := &keyIndex{ - key: []byte("foo"), - modified: revision{2, 0}, - generations: []generation{ - {created: revision{1, 0}, ver: 2, revs: []revision{{main: 2}}}, - }, - } - wam := map[revision]struct{}{ - {main: 2}: {}, - } - if !reflect.DeepEqual(ki, wki) { - t.Errorf("ki = %+v, want %+v", ki, wki) - } - if !reflect.DeepEqual(am, wam) { - t.Errorf("am = %+v, want %+v", am, wam) - } -} - -func TestKeyIndexIsEmpty(t *testing.T) { - tests := []struct { - ki *keyIndex - w bool - }{ - { - &keyIndex{ - key: []byte("foo"), - generations: []generation{{}}, - }, - true, - }, - { - &keyIndex{ - key: []byte("foo"), - modified: revision{2, 0}, - generations: []generation{ - {created: revision{1, 0}, ver: 2, revs: []revision{{main: 2}}}, - }, - }, - false, - }, - } - for i, tt := range tests { - g := tt.ki.isEmpty() - if g != tt.w { - t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w) - } - } -} - -func TestKeyIndexFindGeneration(t *testing.T) { - ki := newTestKeyIndex(zaptest.NewLogger(t)) - - tests := []struct { - rev int64 - wg *generation - }{ - {0, nil}, - {1, nil}, - {2, &ki.generations[0]}, - {3, &ki.generations[0]}, - {4, &ki.generations[0]}, - {5, &ki.generations[0]}, - {6, nil}, - {7, nil}, - {8, &ki.generations[1]}, - {9, &ki.generations[1]}, - {10, &ki.generations[1]}, - {11, &ki.generations[1]}, - {12, nil}, - {13, nil}, - } - for i, tt := range tests { - g := ki.findGeneration(tt.rev) - if g != tt.wg { - t.Errorf("#%d: generation = %+v, want %+v", i, g, tt.wg) - } - } -} - -func TestKeyIndexLess(t *testing.T) { - ki := &keyIndex{key: []byte("foo")} - - tests := []struct { - ki *keyIndex - w bool - }{ - {&keyIndex{key: []byte("doo")}, false}, - {&keyIndex{key: []byte("foo")}, false}, - {&keyIndex{key: []byte("goo")}, true}, - } - for i, tt := range tests { - g := ki.Less(tt.ki) - if g != tt.w { - t.Errorf("#%d: Less = %v, want %v", i, g, tt.w) - } - } -} - -func TestGenerationIsEmpty(t *testing.T) { - tests := []struct { - g *generation - w bool - }{ - {nil, true}, - {&generation{}, true}, - {&generation{revs: []revision{{main: 1}}}, false}, - } - for i, tt := range tests { - g := tt.g.isEmpty() - if g != tt.w { - t.Errorf("#%d: isEmpty = %v, want %v", i, g, tt.w) - } - } -} - -func TestGenerationWalk(t *testing.T) { - g := &generation{ - ver: 3, - created: revision{2, 0}, - revs: []revision{{main: 2}, {main: 4}, {main: 6}}, - } - tests := []struct { - f func(rev revision) bool - wi int - }{ - {func(rev revision) bool { return rev.main >= 7 }, 2}, - {func(rev revision) bool { return rev.main >= 6 }, 1}, - {func(rev revision) bool { return rev.main >= 5 }, 1}, - {func(rev revision) bool { return rev.main >= 4 }, 0}, - {func(rev revision) bool { return rev.main >= 3 }, 0}, - {func(rev revision) bool { return rev.main >= 2 }, -1}, - } - for i, tt := range tests { - idx := g.walk(tt.f) - if idx != tt.wi { - t.Errorf("#%d: index = %d, want %d", i, idx, tt.wi) - } - } -} - -func newTestKeyIndex(lg *zap.Logger) *keyIndex { - // key: "foo" - // modified: 16 - // generations: - // {empty} - // {{14, 0}[1], {14, 1}[2], {16, 0}(t)[3]} - // {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]} - // {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]} - - ki := &keyIndex{key: []byte("foo")} - ki.put(lg, 2, 0) - ki.put(lg, 4, 0) - ki.tombstone(lg, 6, 0) - ki.put(lg, 8, 0) - ki.put(lg, 10, 0) - ki.tombstone(lg, 12, 0) - ki.put(lg, 14, 0) - ki.put(lg, 14, 1) - ki.tombstone(lg, 16, 0) - return ki -} diff --git a/server/storage/mvcc/kv.go b/server/storage/mvcc/kv.go deleted file mode 100644 index 6250bb91198..00000000000 --- a/server/storage/mvcc/kv.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -type RangeOptions struct { - Limit int64 - Rev int64 - Count bool -} - -type RangeResult struct { - KVs []mvccpb.KeyValue - Rev int64 - Count int -} - -type ReadView interface { - // FirstRev returns the first KV revision at the time of opening the txn. - // After a compaction, the first revision increases to the compaction - // revision. - FirstRev() int64 - - // Rev returns the revision of the KV at the time of opening the txn. - Rev() int64 - - // Range gets the keys in the range at rangeRev. - // The returned rev is the current revision of the KV when the operation is executed. - // If rangeRev <=0, range gets the keys at currentRev. - // If `end` is nil, the request returns the key. - // If `end` is not nil and not empty, it gets the keys in range [key, range_end). - // If `end` is not nil and empty, it gets the keys greater than or equal to key. - // Limit limits the number of keys returned. - // If the required rev is compacted, ErrCompacted will be returned. - Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) -} - -// TxnRead represents a read-only transaction with operations that will not -// block other read transactions. -type TxnRead interface { - ReadView - // End marks the transaction is complete and ready to commit. - End() -} - -type WriteView interface { - // DeleteRange deletes the given range from the store. - // A deleteRange increases the rev of the store if any key in the range exists. - // The number of key deleted will be returned. - // The returned rev is the current revision of the KV when the operation is executed. - // It also generates one event for each key delete in the event history. - // if the `end` is nil, deleteRange deletes the key. - // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). - DeleteRange(key, end []byte) (n, rev int64) - - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) -} - -// TxnWrite represents a transaction that can modify the store. -type TxnWrite interface { - TxnRead - WriteView - // Changes gets the changes made since opening the write txn. - Changes() []mvccpb.KeyValue -} - -// txnReadWrite coerces a read txn to a write, panicking on any write operation. -type txnReadWrite struct{ TxnRead } - -func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } -func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - panic("unexpected Put") -} -func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } - -func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } - -type ReadTxMode uint32 - -const ( - // Use ConcurrentReadTx and the txReadBuffer is copied - ConcurrentReadTxMode = ReadTxMode(1) - // Use backend ReadTx and txReadBuffer is not copied - SharedBufReadTxMode = ReadTxMode(2) -) - -type KV interface { - ReadView - WriteView - - // Read creates a read transaction. - Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead - - // Write creates a write transaction. - Write(trace *traceutil.Trace) TxnWrite - - // HashStorage returns HashStorage interface for KV storage. - HashStorage() HashStorage - - // Compact frees all superseded keys with revisions less than rev. - Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) - - // Commit commits outstanding txns into the underlying backend. - Commit() - - // Restore restores the KV store from a backend. - Restore(b backend.Backend) error - Close() error -} - -// WatchableKV is a KV that can be watched. -type WatchableKV interface { - KV - Watchable -} - -// Watchable is the interface that wraps the NewWatchStream function. -type Watchable interface { - // NewWatchStream returns a WatchStream that can be used to - // watch events happened or happening on the KV. - NewWatchStream() WatchStream -} diff --git a/server/storage/mvcc/kv_test.go b/server/storage/mvcc/kv_test.go deleted file mode 100644 index bc2081b02ec..00000000000 --- a/server/storage/mvcc/kv_test.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "fmt" - "os" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" -) - -// Functional tests for features implemented in v3 store. It treats v3 store -// as a black box, and tests it by feeding the input and validating the output. - -// TODO: add similar tests on operations in one txn/rev - -type ( - rangeFunc func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) - putFunc func(kv KV, key, value []byte, lease lease.LeaseID) int64 - deleteRangeFunc func(kv KV, key, end []byte) (n, rev int64) -) - -var ( - normalRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) { - return kv.Range(context.TODO(), key, end, ro) - } - txnRangeFunc = func(kv KV, key, end []byte, ro RangeOptions) (*RangeResult, error) { - txn := kv.Read(ConcurrentReadTxMode, traceutil.TODO()) - defer txn.End() - return txn.Range(context.TODO(), key, end, ro) - } - - normalPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 { - return kv.Put(key, value, lease) - } - txnPutFunc = func(kv KV, key, value []byte, lease lease.LeaseID) int64 { - txn := kv.Write(traceutil.TODO()) - defer txn.End() - return txn.Put(key, value, lease) - } - - normalDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) { - return kv.DeleteRange(key, end) - } - txnDeleteRangeFunc = func(kv KV, key, end []byte) (n, rev int64) { - txn := kv.Write(traceutil.TODO()) - defer txn.End() - return txn.DeleteRange(key, end) - } -) - -func TestKVRange(t *testing.T) { testKVRange(t, normalRangeFunc) } -func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) } - -func testKVRange(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - kvs := put3TestKVs(s) - - wrev := int64(4) - tests := []struct { - key, end []byte - wkvs []mvccpb.KeyValue - }{ - // get no keys - { - []byte("doo"), []byte("foo"), - nil, - }, - // get no keys when key == end - { - []byte("foo"), []byte("foo"), - nil, - }, - // get no keys when ranging single key - { - []byte("doo"), nil, - nil, - }, - // get all keys - { - []byte("foo"), []byte("foo3"), - kvs, - }, - // get partial keys - { - []byte("foo"), []byte("foo1"), - kvs[:1], - }, - // get single key - { - []byte("foo"), nil, - kvs[:1], - }, - // get entire keyspace - { - []byte(""), []byte(""), - kvs, - }, - } - - for i, tt := range tests { - r, err := f(s, tt.key, tt.end, RangeOptions{}) - if err != nil { - t.Fatal(err) - } - if r.Rev != wrev { - t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev) - } - if !reflect.DeepEqual(r.KVs, tt.wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs) - } - } -} - -func TestKVRangeRev(t *testing.T) { testKVRangeRev(t, normalRangeFunc) } -func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) } - -func testKVRangeRev(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - kvs := put3TestKVs(s) - - tests := []struct { - rev int64 - wrev int64 - wkvs []mvccpb.KeyValue - }{ - {-1, 4, kvs}, - {0, 4, kvs}, - {2, 4, kvs[:1]}, - {3, 4, kvs[:2]}, - {4, 4, kvs}, - } - - for i, tt := range tests { - r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev}) - if err != nil { - t.Fatal(err) - } - if r.Rev != tt.wrev { - t.Errorf("#%d: rev = %d, want %d", i, r.Rev, tt.wrev) - } - if !reflect.DeepEqual(r.KVs, tt.wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs) - } - } -} - -func TestKVRangeBadRev(t *testing.T) { testKVRangeBadRev(t, normalRangeFunc) } -func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) } - -func testKVRangeBadRev(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - put3TestKVs(s) - if _, err := s.Compact(traceutil.TODO(), 4); err != nil { - t.Fatalf("compact error (%v)", err) - } - - tests := []struct { - rev int64 - werr error - }{ - {-1, nil}, // <= 0 is most recent store - {0, nil}, - {1, ErrCompacted}, - {2, ErrCompacted}, - {4, nil}, - {5, ErrFutureRev}, - {100, ErrFutureRev}, - } - for i, tt := range tests { - _, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Rev: tt.rev}) - if err != tt.werr { - t.Errorf("#%d: error = %v, want %v", i, err, tt.werr) - } - } -} - -func TestKVRangeLimit(t *testing.T) { testKVRangeLimit(t, normalRangeFunc) } -func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) } - -func testKVRangeLimit(t *testing.T, f rangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - kvs := put3TestKVs(s) - - wrev := int64(4) - tests := []struct { - limit int64 - wcounts int64 - wkvs []mvccpb.KeyValue - }{ - // no limit - {-1, 3, kvs}, - // no limit - {0, 3, kvs}, - {1, 3, kvs[:1]}, - {2, 3, kvs[:2]}, - {3, 3, kvs}, - {100, 3, kvs}, - } - for i, tt := range tests { - r, err := f(s, []byte("foo"), []byte("foo3"), RangeOptions{Limit: tt.limit}) - if err != nil { - t.Fatalf("#%d: range error (%v)", i, err) - } - if !reflect.DeepEqual(r.KVs, tt.wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs) - } - if r.Rev != wrev { - t.Errorf("#%d: rev = %d, want %d", i, r.Rev, wrev) - } - if tt.limit <= 0 || int(tt.limit) > len(kvs) { - if r.Count != len(kvs) { - t.Errorf("#%d: count = %d, want %d", i, r.Count, len(kvs)) - } - } else if r.Count != int(tt.wcounts) { - t.Errorf("#%d: count = %d, want %d", i, r.Count, tt.limit) - } - } -} - -func TestKVPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, normalPutFunc) } -func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutFunc) } - -func testKVPutMultipleTimes(t *testing.T, f putFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - for i := 0; i < 10; i++ { - base := int64(i + 1) - - rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base)) - if rev != base+1 { - t.Errorf("#%d: rev = %d, want %d", i, rev, base+1) - } - - r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{}) - if err != nil { - t.Fatal(err) - } - wkvs := []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base}, - } - if !reflect.DeepEqual(r.KVs, wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs) - } - } -} - -func TestKVDeleteRange(t *testing.T) { testKVDeleteRange(t, normalDeleteRangeFunc) } -func TestKVTxnDeleteRange(t *testing.T) { testKVDeleteRange(t, txnDeleteRangeFunc) } - -func testKVDeleteRange(t *testing.T, f deleteRangeFunc) { - tests := []struct { - key, end []byte - - wrev int64 - wN int64 - }{ - { - []byte("foo"), nil, - 5, 1, - }, - { - []byte("foo"), []byte("foo1"), - 5, 1, - }, - { - []byte("foo"), []byte("foo2"), - 5, 2, - }, - { - []byte("foo"), []byte("foo3"), - 5, 3, - }, - { - []byte("foo3"), []byte("foo8"), - 4, 0, - }, - { - []byte("foo3"), nil, - 4, 0, - }, - } - - for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease) - s.Put([]byte("foo2"), []byte("bar2"), lease.NoLease) - - n, rev := f(s, tt.key, tt.end) - if n != tt.wN || rev != tt.wrev { - t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, tt.wN, tt.wrev) - } - - cleanup(s, b, tmpPath) - } -} - -func TestKVDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, normalDeleteRangeFunc) } -func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, txnDeleteRangeFunc) } - -func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - - n, rev := f(s, []byte("foo"), nil) - if n != 1 || rev != 3 { - t.Fatalf("n = %d, rev = %d, want (%d, %d)", n, rev, 1, 3) - } - - for i := 0; i < 10; i++ { - n, rev := f(s, []byte("foo"), nil) - if n != 0 || rev != 3 { - t.Fatalf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 0, 3) - } - } -} - -func TestKVPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, normalPutFunc) } -func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutFunc) } - -func testKVPutWithSameLease(t *testing.T, f putFunc) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - leaseID := int64(1) - - // put foo - rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(leaseID)) - if rev != 2 { - t.Errorf("rev = %d, want %d", 2, rev) - } - - // put foo with same lease again - rev2 := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(leaseID)) - if rev2 != 3 { - t.Errorf("rev = %d, want %d", 3, rev2) - } - - // check leaseID - r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{}) - if err != nil { - t.Fatal(err) - } - wkvs := []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: leaseID}, - } - if !reflect.DeepEqual(r.KVs, wkvs) { - t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs) - } -} - -// TestKVOperationInSequence tests that range, put, delete on single key in -// sequence repeatedly works correctly. -func TestKVOperationInSequence(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - for i := 0; i < 10; i++ { - base := int64(i*2 + 1) - - // put foo - rev := s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - if rev != base+1 { - t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1) - } - - r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1}) - if err != nil { - t.Fatal(err) - } - wkvs := []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)}, - } - if !reflect.DeepEqual(r.KVs, wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs) - } - if r.Rev != base+1 { - t.Errorf("#%d: range rev = %d, want %d", i, rev, base+1) - } - - // delete foo - n, rev := s.DeleteRange([]byte("foo"), nil) - if n != 1 || rev != base+2 { - t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+2) - } - - r, err = s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 2}) - if err != nil { - t.Fatal(err) - } - if r.KVs != nil { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil) - } - if r.Rev != base+2 { - t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+2) - } - } -} - -func TestKVTxnBlockWriteOperations(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - tests := []func(){ - func() { s.Put([]byte("foo"), nil, lease.NoLease) }, - func() { s.DeleteRange([]byte("foo"), nil) }, - } - for i, tt := range tests { - tf := tt - txn := s.Write(traceutil.TODO()) - done := make(chan struct{}, 1) - go func() { - tf() - done <- struct{}{} - }() - select { - case <-done: - t.Fatalf("#%d: operation failed to be blocked", i) - case <-time.After(10 * time.Millisecond): - } - - txn.End() - select { - case <-done: - case <-time.After(10 * time.Second): - testutil.FatalStack(t, fmt.Sprintf("#%d: operation failed to be unblocked", i)) - } - } - - // only close backend when we know all the tx are finished - cleanup(s, b, tmpPath) -} - -func TestKVTxnNonBlockRange(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - txn := s.Write(traceutil.TODO()) - defer txn.End() - - donec := make(chan struct{}) - go func() { - defer close(donec) - s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{}) - }() - select { - case <-donec: - case <-time.After(100 * time.Millisecond): - t.Fatalf("range operation blocked on write txn") - } -} - -// TestKVTxnOperationInSequence tests that txn range, put, delete on single key -// in sequence repeatedly works correctly. -func TestKVTxnOperationInSequence(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - for i := 0; i < 10; i++ { - txn := s.Write(traceutil.TODO()) - base := int64(i + 1) - - // put foo - rev := txn.Put([]byte("foo"), []byte("bar"), lease.NoLease) - if rev != base+1 { - t.Errorf("#%d: put rev = %d, want %d", i, rev, base+1) - } - - r, err := txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1}) - if err != nil { - t.Fatal(err) - } - wkvs := []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: base + 1, ModRevision: base + 1, Version: 1, Lease: int64(lease.NoLease)}, - } - if !reflect.DeepEqual(r.KVs, wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, wkvs) - } - if r.Rev != base+1 { - t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1) - } - - // delete foo - n, rev := txn.DeleteRange([]byte("foo"), nil) - if n != 1 || rev != base+1 { - t.Errorf("#%d: n = %d, rev = %d, want (%d, %d)", i, n, rev, 1, base+1) - } - - r, err = txn.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: base + 1}) - if err != nil { - t.Errorf("#%d: range error (%v)", i, err) - } - if r.KVs != nil { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, nil) - } - if r.Rev != base+1 { - t.Errorf("#%d: range rev = %d, want %d", i, r.Rev, base+1) - } - - txn.End() - } -} - -func TestKVCompactReserveLastValue(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - s.Put([]byte("foo"), []byte("bar0"), 1) - s.Put([]byte("foo"), []byte("bar1"), 2) - s.DeleteRange([]byte("foo"), nil) - s.Put([]byte("foo"), []byte("bar2"), 3) - - // rev in tests will be called in Compact() one by one on the same store - tests := []struct { - rev int64 - // wanted kvs right after the compacted rev - wkvs []mvccpb.KeyValue - }{ - { - 1, - []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar0"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1}, - }, - }, - { - 2, - []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar1"), CreateRevision: 2, ModRevision: 3, Version: 2, Lease: 2}, - }, - }, - { - 3, - nil, - }, - { - 4, - []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar2"), CreateRevision: 5, ModRevision: 5, Version: 1, Lease: 3}, - }, - }, - } - for i, tt := range tests { - _, err := s.Compact(traceutil.TODO(), tt.rev) - if err != nil { - t.Errorf("#%d: unexpect compact error %v", i, err) - } - r, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: tt.rev + 1}) - if err != nil { - t.Errorf("#%d: unexpect range error %v", i, err) - } - if !reflect.DeepEqual(r.KVs, tt.wkvs) { - t.Errorf("#%d: kvs = %+v, want %+v", i, r.KVs, tt.wkvs) - } - } -} - -func TestKVCompactBad(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - s.Put([]byte("foo"), []byte("bar0"), lease.NoLease) - s.Put([]byte("foo"), []byte("bar1"), lease.NoLease) - s.Put([]byte("foo"), []byte("bar2"), lease.NoLease) - - // rev in tests will be called in Compact() one by one on the same store - tests := []struct { - rev int64 - werr error - }{ - {0, nil}, - {1, nil}, - {1, ErrCompacted}, - {4, nil}, - {5, ErrFutureRev}, - {100, ErrFutureRev}, - } - for i, tt := range tests { - _, err := s.Compact(traceutil.TODO(), tt.rev) - if err != tt.werr { - t.Errorf("#%d: compact error = %v, want %v", i, err, tt.werr) - } - } -} - -func TestKVHash(t *testing.T) { - hashes := make([]uint32, 3) - - for i := 0; i < len(hashes); i++ { - var err error - b, tmpPath := betesting.NewDefaultTmpBackend(t) - kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease) - kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease) - hashes[i], _, err = kv.hash() - if err != nil { - t.Fatalf("failed to get hash: %v", err) - } - cleanup(kv, b, tmpPath) - } - - for i := 1; i < len(hashes); i++ { - if hashes[i-1] != hashes[i] { - t.Errorf("hash[%d](%d) != hash[%d](%d)", i-1, hashes[i-1], i, hashes[i]) - } - } -} - -func TestKVRestore(t *testing.T) { - tests := []func(kv KV){ - func(kv KV) { - kv.Put([]byte("foo"), []byte("bar0"), 1) - kv.Put([]byte("foo"), []byte("bar1"), 2) - kv.Put([]byte("foo"), []byte("bar2"), 3) - kv.Put([]byte("foo2"), []byte("bar0"), 1) - }, - func(kv KV) { - kv.Put([]byte("foo"), []byte("bar0"), 1) - kv.DeleteRange([]byte("foo"), nil) - kv.Put([]byte("foo"), []byte("bar1"), 2) - }, - func(kv KV) { - kv.Put([]byte("foo"), []byte("bar0"), 1) - kv.Put([]byte("foo"), []byte("bar1"), 2) - kv.Compact(traceutil.TODO(), 1) - }, - } - for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - tt(s) - var kvss [][]mvccpb.KeyValue - for k := int64(0); k < 10; k++ { - r, _ := s.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k}) - kvss = append(kvss, r.KVs) - } - - keysBefore := readGaugeInt(keysGauge) - s.Close() - - // ns should recover the previous state from backend. - ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore { - t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore) - } - - // wait for possible compaction to finish - testutil.WaitSchedule() - var nkvss [][]mvccpb.KeyValue - for k := int64(0); k < 10; k++ { - r, _ := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{Rev: k}) - nkvss = append(nkvss, r.KVs) - } - cleanup(ns, b, tmpPath) - - if !reflect.DeepEqual(nkvss, kvss) { - t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss) - } - } -} - -func readGaugeInt(g prometheus.Gauge) int { - ch := make(chan prometheus.Metric, 1) - g.Collect(ch) - m := <-ch - mm := &dto.Metric{} - m.Write(mm) - return int(mm.GetGauge().GetValue()) -} - -func TestKVSnapshot(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - wkvs := put3TestKVs(s) - - newPath := "new_test" - f, err := os.Create(newPath) - if err != nil { - t.Fatal(err) - } - defer os.Remove(newPath) - - snap := s.b.Snapshot() - defer snap.Close() - _, err = snap.WriteTo(f) - if err != nil { - t.Fatal(err) - } - f.Close() - - ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer ns.Close() - r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{}) - if err != nil { - t.Errorf("unexpect range error (%v)", err) - } - if !reflect.DeepEqual(r.KVs, wkvs) { - t.Errorf("kvs = %+v, want %+v", r.KVs, wkvs) - } - if r.Rev != 4 { - t.Errorf("rev = %d, want %d", r.Rev, 4) - } -} - -func TestWatchableKVWatch(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - wid, _ := w.Watch(0, []byte("foo"), []byte("fop"), 0) - - wev := []mvccpb.Event{ - {Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 2, - Version: 1, - Lease: 1, - }, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{ - Key: []byte("foo1"), - Value: []byte("bar1"), - CreateRevision: 3, - ModRevision: 3, - Version: 1, - Lease: 2, - }, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{ - Key: []byte("foo1"), - Value: []byte("bar11"), - CreateRevision: 3, - ModRevision: 4, - Version: 2, - Lease: 3, - }, - }, - } - - s.Put([]byte("foo"), []byte("bar"), 1) - select { - case resp := <-w.Chan(): - if resp.WatchID != wid { - t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid) - } - ev := resp.Events[0] - if !reflect.DeepEqual(ev, wev[0]) { - t.Errorf("watched event = %+v, want %+v", ev, wev[0]) - } - case <-time.After(5 * time.Second): - // CPU might be too slow, and the routine is not able to switch around - testutil.FatalStack(t, "failed to watch the event") - } - - s.Put([]byte("foo1"), []byte("bar1"), 2) - select { - case resp := <-w.Chan(): - if resp.WatchID != wid { - t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid) - } - ev := resp.Events[0] - if !reflect.DeepEqual(ev, wev[1]) { - t.Errorf("watched event = %+v, want %+v", ev, wev[1]) - } - case <-time.After(5 * time.Second): - testutil.FatalStack(t, "failed to watch the event") - } - - w = s.NewWatchStream() - wid, _ = w.Watch(0, []byte("foo1"), []byte("foo2"), 3) - - select { - case resp := <-w.Chan(): - if resp.WatchID != wid { - t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid) - } - ev := resp.Events[0] - if !reflect.DeepEqual(ev, wev[1]) { - t.Errorf("watched event = %+v, want %+v", ev, wev[1]) - } - case <-time.After(5 * time.Second): - testutil.FatalStack(t, "failed to watch the event") - } - - s.Put([]byte("foo1"), []byte("bar11"), 3) - select { - case resp := <-w.Chan(): - if resp.WatchID != wid { - t.Errorf("resp.WatchID got = %d, want = %d", resp.WatchID, wid) - } - ev := resp.Events[0] - if !reflect.DeepEqual(ev, wev[2]) { - t.Errorf("watched event = %+v, want %+v", ev, wev[2]) - } - case <-time.After(5 * time.Second): - testutil.FatalStack(t, "failed to watch the event") - } -} - -func cleanup(s KV, b backend.Backend, path string) { - s.Close() - b.Close() - os.Remove(path) -} - -func put3TestKVs(s KV) []mvccpb.KeyValue { - s.Put([]byte("foo"), []byte("bar"), 1) - s.Put([]byte("foo1"), []byte("bar1"), 2) - s.Put([]byte("foo2"), []byte("bar2"), 3) - return []mvccpb.KeyValue{ - {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1, Lease: 1}, - {Key: []byte("foo1"), Value: []byte("bar1"), CreateRevision: 3, ModRevision: 3, Version: 1, Lease: 2}, - {Key: []byte("foo2"), Value: []byte("bar2"), CreateRevision: 4, ModRevision: 4, Version: 1, Lease: 3}, - } -} diff --git a/server/storage/mvcc/kv_view.go b/server/storage/mvcc/kv_view.go deleted file mode 100644 index 56260e7599a..00000000000 --- a/server/storage/mvcc/kv_view.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" -) - -type readView struct{ kv KV } - -func (rv *readView) FirstRev() int64 { - tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) - defer tr.End() - return tr.FirstRev() -} - -func (rv *readView) Rev() int64 { - tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) - defer tr.End() - return tr.Rev() -} - -func (rv *readView) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) - defer tr.End() - return tr.Range(ctx, key, end, ro) -} - -type writeView struct{ kv KV } - -func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { - tw := wv.kv.Write(traceutil.TODO()) - defer tw.End() - return tw.DeleteRange(key, end) -} - -func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw := wv.kv.Write(traceutil.TODO()) - defer tw.End() - return tw.Put(key, value, lease) -} diff --git a/server/storage/mvcc/kvstore.go b/server/storage/mvcc/kvstore.go deleted file mode 100644 index 8bc1b07d997..00000000000 --- a/server/storage/mvcc/kvstore.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "errors" - "fmt" - "math" - "sync" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/schedule" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - - "go.uber.org/zap" -) - -var ( - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") -) - -const ( - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' -) - -var restoreChunkKeys = 10000 // non-const for testing -var defaultCompactBatchLimit = 1000 -var minimumBatchInterval = 10 * time.Millisecond - -type StoreConfig struct { - CompactionBatchLimit int - CompactionSleepInterval time.Duration -} - -type store struct { - ReadView - WriteView - - cfg StoreConfig - - // mu read locks for txns and write locks for non-txn store changes. - mu sync.RWMutex - - b backend.Backend - kvindex index - - le lease.Lessor - - // revMuLock protects currentRev and compactMainRev. - // Locked at end of write txn and released after write txn unlock lock. - // Locked before locking read txn and released after locking. - revMu sync.RWMutex - // currentRev is the revision of the last completed transaction. - currentRev int64 - // compactMainRev is the main revision of the last compaction. - compactMainRev int64 - - fifoSched schedule.Scheduler - - stopc chan struct{} - - lg *zap.Logger - hashes HashStorage -} - -// NewStore returns a new store. It is useful to create a store inside -// mvcc pkg. It should only be used for testing externally. -func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store { - if lg == nil { - lg = zap.NewNop() - } - if cfg.CompactionBatchLimit == 0 { - cfg.CompactionBatchLimit = defaultCompactBatchLimit - } - if cfg.CompactionSleepInterval == 0 { - cfg.CompactionSleepInterval = minimumBatchInterval - } - s := &store{ - cfg: cfg, - b: b, - kvindex: newTreeIndex(lg), - - le: le, - - currentRev: 1, - compactMainRev: -1, - - fifoSched: schedule.NewFIFOScheduler(lg), - - stopc: make(chan struct{}), - - lg: lg, - } - s.hashes = newHashStorage(lg, s) - s.ReadView = &readView{s} - s.WriteView = &writeView{s} - if s.le != nil { - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) }) - } - - tx := s.b.BatchTx() - tx.LockOutsideApply() - tx.UnsafeCreateBucket(schema.Key) - schema.UnsafeCreateMetaBucket(tx) - tx.Unlock() - s.b.ForceCommit() - - s.mu.Lock() - defer s.mu.Unlock() - if err := s.restore(); err != nil { - // TODO: return the error instead of panic here? - panic("failed to recover store from backend") - } - - return s -} - -func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { - if ctx == nil || ctx.Err() != nil { - select { - case <-s.stopc: - default: - // fix deadlock in mvcc, for more information, please refer to pr 11817. - // s.stopc is only updated in restore operation, which is called by apply - // snapshot call, compaction and apply snapshot requests are serialized by - // raft, and do not happen at the same time. - s.mu.Lock() - f := schedule.NewJob("kvstore_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) }) - s.fifoSched.Schedule(f) - s.mu.Unlock() - } - return - } - close(ch) -} - -func (s *store) hash() (hash uint32, revision int64, err error) { - // TODO: hash and revision could be inconsistent, one possible fix is to add s.revMu.RLock() at the beginning of function, which is costly - start := time.Now() - - s.b.ForceCommit() - h, err := s.b.Hash(schema.DefaultIgnores) - - hashSec.Observe(time.Since(start).Seconds()) - return h, s.currentRev, err -} - -func (s *store) hashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error) { - var compactRev int64 - start := time.Now() - - s.mu.RLock() - s.revMu.RLock() - compactRev, currentRev = s.compactMainRev, s.currentRev - s.revMu.RUnlock() - - if rev > 0 && rev < compactRev { - s.mu.RUnlock() - return KeyValueHash{}, 0, ErrCompacted - } else if rev > 0 && rev > currentRev { - s.mu.RUnlock() - return KeyValueHash{}, currentRev, ErrFutureRev - } - if rev == 0 { - rev = currentRev - } - keep := s.kvindex.Keep(rev) - - tx := s.b.ReadTx() - tx.RLock() - defer tx.RUnlock() - s.mu.RUnlock() - hash, err = unsafeHashByRev(tx, compactRev, rev, keep) - hashRevSec.Observe(time.Since(start).Seconds()) - return hash, currentRev, err -} - -func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) { - s.revMu.Lock() - if rev <= s.compactMainRev { - ch := make(chan struct{}) - f := schedule.NewJob("kvstore_updateCompactRev_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) }) - s.fifoSched.Schedule(f) - s.revMu.Unlock() - return ch, 0, ErrCompacted - } - if rev > s.currentRev { - s.revMu.Unlock() - return nil, 0, ErrFutureRev - } - compactMainRev := s.compactMainRev - s.compactMainRev = rev - - SetScheduledCompact(s.b.BatchTx(), rev) - // ensure that desired compaction is persisted - // gofail: var compactBeforeCommitScheduledCompact struct{} - s.b.ForceCommit() - // gofail: var compactAfterCommitScheduledCompact struct{} - - s.revMu.Unlock() - - return nil, compactMainRev, nil -} - -func (s *store) compact(trace *traceutil.Trace, rev, prevCompactRev int64) (<-chan struct{}, error) { - ch := make(chan struct{}) - j := schedule.NewJob("kvstore_compact", func(ctx context.Context) { - if ctx.Err() != nil { - s.compactBarrier(ctx, ch) - return - } - hash, err := s.scheduleCompaction(rev, prevCompactRev) - if err != nil { - s.lg.Warn("Failed compaction", zap.Error(err)) - s.compactBarrier(context.TODO(), ch) - return - } - s.hashes.Store(hash) - close(ch) - }) - - s.fifoSched.Schedule(j) - trace.Step("schedule compaction") - return ch, nil -} - -func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) { - ch, prevCompactRev, err := s.updateCompactRev(rev) - if err != nil { - return ch, err - } - - return s.compact(traceutil.TODO(), rev, prevCompactRev) -} - -func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { - s.mu.Lock() - - ch, prevCompactRev, err := s.updateCompactRev(rev) - trace.Step("check and update compact revision") - if err != nil { - s.mu.Unlock() - return ch, err - } - s.mu.Unlock() - - return s.compact(trace, rev, prevCompactRev) -} - -func (s *store) Commit() { - s.mu.Lock() - defer s.mu.Unlock() - s.b.ForceCommit() -} - -func (s *store) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - - close(s.stopc) - s.fifoSched.Stop() - - s.b = b - s.kvindex = newTreeIndex(s.lg) - - { - // During restore the metrics might report 'special' values - s.revMu.Lock() - s.currentRev = 1 - s.compactMainRev = -1 - s.revMu.Unlock() - } - - s.fifoSched = schedule.NewFIFOScheduler(s.lg) - s.stopc = make(chan struct{}) - - return s.restore() -} - -func (s *store) restore() error { - s.setupMetricsReporter() - - min, max := newRevBytes(), newRevBytes() - revToBytes(revision{main: 1}, min) - revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) - - keyToLease := make(map[string]lease.LeaseID) - - // restore index - tx := s.b.ReadTx() - tx.Lock() - - finishedCompact, found := UnsafeReadFinishedCompact(tx) - if found { - s.revMu.Lock() - s.compactMainRev = finishedCompact - - s.lg.Info( - "restored last compact revision", - zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)), - zap.Int64("restored-compact-revision", s.compactMainRev), - ) - s.revMu.Unlock() - } - scheduledCompact, _ := UnsafeReadScheduledCompact(tx) - // index keys concurrently as they're loaded in from tx - keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) - for { - keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys)) - if len(keys) == 0 { - break - } - // rkvc blocks if the total pending keys exceeds the restore - // chunk size to keep keys from consuming too much memory. - restoreChunk(s.lg, rkvc, keys, vals, keyToLease) - if len(keys) < restoreChunkKeys { - // partial set implies final set - break - } - // next set begins after where this one ended - newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) - newMin.sub++ - revToBytes(newMin, min) - } - close(rkvc) - - { - s.revMu.Lock() - s.currentRev = <-revc - - // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. - // the correct revision should be set to compaction revision in the case, not the largest revision - // we have seen. - if s.currentRev < s.compactMainRev { - s.currentRev = s.compactMainRev - } - s.revMu.Unlock() - } - - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - - for key, lid := range keyToLease { - if s.le == nil { - tx.Unlock() - panic("no lessor to attach lease") - } - err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) - if err != nil { - s.lg.Error( - "failed to attach a lease", - zap.String("lease-id", fmt.Sprintf("%016x", lid)), - zap.Error(err), - ) - } - } - - tx.Unlock() - - s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev)) - - if scheduledCompact != 0 { - if _, err := s.compactLockfree(scheduledCompact); err != nil { - s.lg.Warn("compaction encountered error", zap.Error(err)) - } - - s.lg.Info( - "resume scheduled compaction", - zap.Int64("scheduled-compact-revision", scheduledCompact), - ) - } - - return nil -} - -type revKeyValue struct { - key []byte - kv mvccpb.KeyValue - kstr string -} - -func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) { - rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) - go func() { - currentRev := int64(1) - defer func() { revc <- currentRev }() - // restore the tree index from streaming the unordered index. - kiCache := make(map[string]*keyIndex, restoreChunkKeys) - for rkv := range rkvc { - ki, ok := kiCache[rkv.kstr] - // purge kiCache if many keys but still missing in the cache - if !ok && len(kiCache) >= restoreChunkKeys { - i := 10 - for k := range kiCache { - delete(kiCache, k) - if i--; i == 0 { - break - } - } - } - // cache miss, fetch from tree index if there - if !ok { - ki = &keyIndex{key: rkv.kv.Key} - if idxKey := idx.KeyIndex(ki); idxKey != nil { - kiCache[rkv.kstr], ki = idxKey, idxKey - ok = true - } - } - rev := bytesToRev(rkv.key) - currentRev = rev.main - if ok { - if isTombstone(rkv.key) { - if err := ki.tombstone(lg, rev.main, rev.sub); err != nil { - lg.Warn("tombstone encountered error", zap.Error(err)) - } - continue - } - ki.put(lg, rev.main, rev.sub) - } else if !isTombstone(rkv.key) { - ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) - idx.Insert(ki) - kiCache[rkv.kstr] = ki - } - } - }() - return rkvc, revc -} - -func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { - for i, key := range keys { - rkv := revKeyValue{key: key} - if err := rkv.kv.Unmarshal(vals[i]); err != nil { - lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) - } - rkv.kstr = string(rkv.kv.Key) - if isTombstone(key) { - delete(keyToLease, rkv.kstr) - } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { - keyToLease[rkv.kstr] = lid - } else { - delete(keyToLease, rkv.kstr) - } - kvc <- rkv - } -} - -func (s *store) Close() error { - close(s.stopc) - s.fifoSched.Stop() - return nil -} - -func (s *store) setupMetricsReporter() { - b := s.b - reportDbTotalSizeInBytesMu.Lock() - reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } - reportDbTotalSizeInBytesMu.Unlock() - reportDbTotalSizeInUseInBytesMu.Lock() - reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) } - reportDbTotalSizeInUseInBytesMu.Unlock() - reportDbOpenReadTxNMu.Lock() - reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) } - reportDbOpenReadTxNMu.Unlock() - reportCurrentRevMu.Lock() - reportCurrentRev = func() float64 { - s.revMu.RLock() - defer s.revMu.RUnlock() - return float64(s.currentRev) - } - reportCurrentRevMu.Unlock() - reportCompactRevMu.Lock() - reportCompactRev = func() float64 { - s.revMu.RLock() - defer s.revMu.RUnlock() - return float64(s.compactMainRev) - } - reportCompactRevMu.Unlock() -} - -// appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(lg *zap.Logger, b []byte) []byte { - if len(b) != revBytesLen { - lg.Panic( - "cannot append tombstone mark to non-normal revision bytes", - zap.Int("expected-revision-bytes-size", revBytesLen), - zap.Int("given-revision-bytes-size", len(b)), - ) - } - return append(b, markTombstone) -} - -// isTombstone checks whether the revision bytes is a tombstone. -func isTombstone(b []byte) bool { - return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone -} - -func (s *store) HashStorage() HashStorage { - return s.hashes -} diff --git a/server/storage/mvcc/kvstore_bench_test.go b/server/storage/mvcc/kvstore_bench_test.go deleted file mode 100644 index 8b9a1456a24..00000000000 --- a/server/storage/mvcc/kvstore_bench_test.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func BenchmarkStorePut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // arbitrary number of bytes - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - vals := createBytesSlice(bytesN, b.N) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Put(keys[i], vals[i], lease.NoLease) - } -} - -func BenchmarkStoreRangeKey1(b *testing.B) { benchmarkStoreRange(b, 1) } -func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) } - -func benchmarkStoreRange(b *testing.B, n int) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // 64 byte key/val - keys, val := createBytesSlice(64, n), createBytesSlice(64, 1) - for i := range keys { - s.Put(keys[i], val[0], lease.NoLease) - } - // Force into boltdb tx instead of backend read tx. - s.Commit() - - var begin, end []byte - if n == 1 { - begin, end = keys[0], nil - } else { - begin, end = []byte{}, []byte{} - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Range(context.TODO(), begin, end, RangeOptions{}) - } -} - -func BenchmarkConsistentIndex(b *testing.B) { - be, _ := betesting.NewDefaultTmpBackend(b) - ci := cindex.NewConsistentIndex(be) - defer betesting.Close(b, be) - - // This will force the index to be reread from scratch on each call. - ci.SetConsistentIndex(0, 0) - - tx := be.BatchTx() - tx.Lock() - schema.UnsafeCreateMetaBucket(tx) - ci.UnsafeSave(tx) - tx.Unlock() - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - ci.ConsistentIndex() - } -} - -// BenchmarkStorePutUpdate is same as above, but instead updates single key -func BenchmarkStorePutUpdate(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // arbitrary number of bytes - keys := createBytesSlice(64, 1) - vals := createBytesSlice(1024, 1) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.Put(keys[0], vals[0], lease.NoLease) - } -} - -// BenchmarkStoreTxnPut benchmarks the Put operation -// with transaction begin and end, where transaction involves -// some synchronization operations, such as mutex locking. -func BenchmarkStoreTxnPut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // arbitrary number of bytes - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - vals := createBytesSlice(bytesN, b.N) - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - txn := s.Write(traceutil.TODO()) - txn.Put(keys[i], vals[i], lease.NoLease) - txn.End() - } -} - -// benchmarkStoreRestore benchmarks the restore operation -func benchmarkStoreRestore(revsPerKey int, b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - // use closure to capture 's' to pick up the reassignment - defer func() { cleanup(s, be, tmpPath) }() - - // arbitrary number of bytes - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - vals := createBytesSlice(bytesN, b.N) - - for i := 0; i < b.N; i++ { - for j := 0; j < revsPerKey; j++ { - txn := s.Write(traceutil.TODO()) - txn.Put(keys[i], vals[i], lease.NoLease) - txn.End() - } - } - assert.NoError(b, s.Close()) - - b.ReportAllocs() - b.ResetTimer() - s = NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) -} - -func BenchmarkStoreRestoreRevs1(b *testing.B) { - benchmarkStoreRestore(1, b) -} - -func BenchmarkStoreRestoreRevs10(b *testing.B) { - benchmarkStoreRestore(10, b) -} - -func BenchmarkStoreRestoreRevs20(b *testing.B) { - benchmarkStoreRestore(20, b) -} diff --git a/server/storage/mvcc/kvstore_compaction.go b/server/storage/mvcc/kvstore_compaction.go deleted file mode 100644 index 9a0163697a7..00000000000 --- a/server/storage/mvcc/kvstore_compaction.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "fmt" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyValueHash, error) { - totalStart := time.Now() - keep := s.kvindex.Compact(compactMainRev) - indexCompactionPauseMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) - - totalStart = time.Now() - defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }() - keyCompactions := 0 - defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }() - defer func() { dbCompactionLast.Set(float64(time.Now().Unix())) }() - - end := make([]byte, 8) - binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) - - batchNum := s.cfg.CompactionBatchLimit - batchTicker := time.NewTicker(s.cfg.CompactionSleepInterval) - defer batchTicker.Stop() - h := newKVHasher(prevCompactRev, compactMainRev, keep) - last := make([]byte, 8+1+8) - for { - var rev revision - - start := time.Now() - - tx := s.b.BatchTx() - tx.LockOutsideApply() - keys, values := tx.UnsafeRange(schema.Key, last, end, int64(batchNum)) - for i := range keys { - rev = bytesToRev(keys[i]) - if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(schema.Key, keys[i]) - keyCompactions++ - } - h.WriteKeyValue(keys[i], values[i]) - } - - if len(keys) < batchNum { - // gofail: var compactBeforeSetFinishedCompact struct{} - UnsafeSetFinishedCompact(tx, compactMainRev) - tx.Unlock() - // gofail: var compactAfterSetFinishedCompact struct{} - hash := h.Hash() - s.lg.Info( - "finished scheduled compaction", - zap.Int64("compact-revision", compactMainRev), - zap.Duration("took", time.Since(totalStart)), - zap.Uint32("hash", hash.Hash), - ) - return hash, nil - } - - tx.Unlock() - // update last - revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) - // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer - // gofail: var compactBeforeCommitBatch struct{} - s.b.ForceCommit() - // gofail: var compactAfterCommitBatch struct{} - dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) - - select { - case <-batchTicker.C: - case <-s.stopc: - return KeyValueHash{}, fmt.Errorf("interrupted due to stop signal") - } - } -} diff --git a/server/storage/mvcc/kvstore_compaction_test.go b/server/storage/mvcc/kvstore_compaction_test.go deleted file mode 100644 index 2f8fac83c77..00000000000 --- a/server/storage/mvcc/kvstore_compaction_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "os" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func TestScheduleCompaction(t *testing.T) { - revs := []revision{{1, 0}, {2, 0}, {3, 0}} - - tests := []struct { - rev int64 - keep map[revision]struct{} - wrevs []revision - }{ - // compact at 1 and discard all history - { - 1, - nil, - revs[1:], - }, - // compact at 3 and discard all history - { - 3, - nil, - nil, - }, - // compact at 1 and keeps history one step earlier - { - 1, - map[revision]struct{}{ - {main: 1}: {}, - }, - revs, - }, - // compact at 1 and keeps history two steps earlier - { - 3, - map[revision]struct{}{ - {main: 2}: {}, - {main: 3}: {}, - }, - revs[1:], - }, - } - for i, tt := range tests { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - fi := newFakeIndex() - fi.indexCompactRespc <- tt.keep - s.kvindex = fi - - tx := s.b.BatchTx() - - tx.Lock() - ibytes := newRevBytes() - for _, rev := range revs { - revToBytes(rev, ibytes) - tx.UnsafePut(schema.Key, ibytes, []byte("bar")) - } - tx.Unlock() - - _, err := s.scheduleCompaction(tt.rev, 0) - if err != nil { - t.Error(err) - } - - tx.Lock() - for _, rev := range tt.wrevs { - revToBytes(rev, ibytes) - keys, _ := tx.UnsafeRange(schema.Key, ibytes, nil, 0) - if len(keys) != 1 { - t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys)) - } - } - vals, _ := UnsafeReadFinishedCompact(tx) - if !reflect.DeepEqual(vals, tt.rev) { - t.Errorf("#%d: finished compact equal %+v, want %+v", i, vals, tt.rev) - } - tx.Unlock() - - cleanup(s, b, tmpPath) - } -} - -func TestCompactAllAndRestore(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer os.Remove(tmpPath) - - s0.Put([]byte("foo"), []byte("bar"), lease.NoLease) - s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease) - s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease) - s0.DeleteRange([]byte("foo"), nil) - - rev := s0.Rev() - // compact all keys - done, err := s0.Compact(traceutil.TODO(), rev) - if err != nil { - t.Fatal(err) - } - - select { - case <-done: - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for compaction to finish") - } - - err = s0.Close() - if err != nil { - t.Fatal(err) - } - - s1 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - if s1.Rev() != rev { - t.Errorf("rev = %v, want %v", s1.Rev(), rev) - } - _, err = s1.Range(context.TODO(), []byte("foo"), nil, RangeOptions{}) - if err != nil { - t.Errorf("unexpect range error %v", err) - } -} diff --git a/server/storage/mvcc/kvstore_test.go b/server/storage/mvcc/kvstore_test.go deleted file mode 100644 index c755827ce68..00000000000 --- a/server/storage/mvcc/kvstore_test.go +++ /dev/null @@ -1,1068 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "fmt" - "math" - mrand "math/rand" - "os" - "reflect" - "sort" - "strconv" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/pkg/v3/schedule" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" - - "go.uber.org/zap" -) - -func TestStoreRev(t *testing.T) { - b, _ := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer s.Close() - - for i := 1; i <= 3; i++ { - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - if r := s.Rev(); r != int64(i+1) { - t.Errorf("#%d: rev = %d, want %d", i, r, i+1) - } - } -} - -func TestStorePut(t *testing.T) { - lg := zaptest.NewLogger(t) - kv := mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 1, - ModRevision: 2, - Version: 1, - } - kvb, err := kv.Marshal() - if err != nil { - t.Fatal(err) - } - - tests := []struct { - rev revision - r indexGetResp - rr *rangeResp - - wrev revision - wkey []byte - wkv mvccpb.KeyValue - wputrev revision - }{ - { - revision{1, 0}, - indexGetResp{revision{}, revision{}, 0, ErrRevisionNotFound}, - nil, - - revision{2, 0}, - newTestKeyBytes(lg, revision{2, 0}, false), - mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 2, - Version: 1, - Lease: 1, - }, - revision{2, 0}, - }, - { - revision{1, 1}, - indexGetResp{revision{2, 0}, revision{2, 0}, 1, nil}, - &rangeResp{[][]byte{newTestKeyBytes(lg, revision{2, 1}, false)}, [][]byte{kvb}}, - - revision{2, 0}, - newTestKeyBytes(lg, revision{2, 0}, false), - mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 2, - Version: 2, - Lease: 2, - }, - revision{2, 0}, - }, - { - revision{2, 0}, - indexGetResp{revision{2, 1}, revision{2, 0}, 2, nil}, - &rangeResp{[][]byte{newTestKeyBytes(lg, revision{2, 1}, false)}, [][]byte{kvb}}, - - revision{3, 0}, - newTestKeyBytes(lg, revision{3, 0}, false), - mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 3, - Version: 3, - Lease: 3, - }, - revision{3, 0}, - }, - } - for i, tt := range tests { - s := newFakeStore(lg) - b := s.b.(*fakeBackend) - fi := s.kvindex.(*fakeIndex) - - s.currentRev = tt.rev.main - fi.indexGetRespc <- tt.r - if tt.rr != nil { - b.tx.rangeRespc <- *tt.rr - } - - s.Put([]byte("foo"), []byte("bar"), lease.LeaseID(i+1)) - - data, err := tt.wkv.Marshal() - if err != nil { - t.Errorf("#%d: marshal err = %v, want nil", i, err) - } - - wact := []testutil.Action{ - {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}}, - } - - if tt.rr != nil { - wact = []testutil.Action{ - {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}}, - } - } - - if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) - } - wact = []testutil.Action{ - {Name: "get", Params: []interface{}{[]byte("foo"), tt.wputrev.main}}, - {Name: "put", Params: []interface{}{[]byte("foo"), tt.wputrev}}, - } - if g := fi.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: index action = %+v, want %+v", i, g, wact) - } - if s.currentRev != tt.wrev.main { - t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev) - } - - s.Close() - } -} - -func TestStoreRange(t *testing.T) { - lg := zaptest.NewLogger(t) - key := newTestKeyBytes(lg, revision{2, 0}, false) - kv := mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 1, - ModRevision: 2, - Version: 1, - } - kvb, err := kv.Marshal() - if err != nil { - t.Fatal(err) - } - wrev := int64(2) - - tests := []struct { - idxr indexRangeResp - r rangeResp - }{ - { - indexRangeResp{[][]byte{[]byte("foo")}, []revision{{2, 0}}}, - rangeResp{[][]byte{key}, [][]byte{kvb}}, - }, - { - indexRangeResp{[][]byte{[]byte("foo"), []byte("foo1")}, []revision{{2, 0}, {3, 0}}}, - rangeResp{[][]byte{key}, [][]byte{kvb}}, - }, - } - - ro := RangeOptions{Limit: 1, Rev: 0, Count: false} - for i, tt := range tests { - s := newFakeStore(lg) - b := s.b.(*fakeBackend) - fi := s.kvindex.(*fakeIndex) - - s.currentRev = 2 - b.tx.rangeRespc <- tt.r - fi.indexRangeRespc <- tt.idxr - - ret, err := s.Range(context.TODO(), []byte("foo"), []byte("goo"), ro) - if err != nil { - t.Errorf("#%d: err = %v, want nil", i, err) - } - if w := []mvccpb.KeyValue{kv}; !reflect.DeepEqual(ret.KVs, w) { - t.Errorf("#%d: kvs = %+v, want %+v", i, ret.KVs, w) - } - if ret.Rev != wrev { - t.Errorf("#%d: rev = %d, want %d", i, ret.Rev, wrev) - } - - wstart := newRevBytes() - revToBytes(tt.idxr.revs[0], wstart) - wact := []testutil.Action{ - {Name: "range", Params: []interface{}{schema.Key, wstart, []byte(nil), int64(0)}}, - } - if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) - } - wact = []testutil.Action{ - {Name: "range", Params: []interface{}{[]byte("foo"), []byte("goo"), wrev}}, - } - if g := fi.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: index action = %+v, want %+v", i, g, wact) - } - if s.currentRev != 2 { - t.Errorf("#%d: current rev = %+v, want %+v", i, s.currentRev, 2) - } - - s.Close() - } -} - -func TestStoreDeleteRange(t *testing.T) { - lg := zaptest.NewLogger(t) - key := newTestKeyBytes(lg, revision{2, 0}, false) - kv := mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 1, - ModRevision: 2, - Version: 1, - } - kvb, err := kv.Marshal() - if err != nil { - t.Fatal(err) - } - - tests := []struct { - rev revision - r indexRangeResp - rr rangeResp - - wkey []byte - wrev revision - wrrev int64 - wdelrev revision - }{ - { - revision{2, 0}, - indexRangeResp{[][]byte{[]byte("foo")}, []revision{{2, 0}}}, - rangeResp{[][]byte{key}, [][]byte{kvb}}, - - newTestKeyBytes(lg, revision{3, 0}, true), - revision{3, 0}, - 2, - revision{3, 0}, - }, - } - for i, tt := range tests { - s := newFakeStore(lg) - b := s.b.(*fakeBackend) - fi := s.kvindex.(*fakeIndex) - - s.currentRev = tt.rev.main - fi.indexRangeRespc <- tt.r - b.tx.rangeRespc <- tt.rr - - n, _ := s.DeleteRange([]byte("foo"), []byte("goo")) - if n != 1 { - t.Errorf("#%d: n = %d, want 1", i, n) - } - - data, err := (&mvccpb.KeyValue{ - Key: []byte("foo"), - }).Marshal() - if err != nil { - t.Errorf("#%d: marshal err = %v, want nil", i, err) - } - wact := []testutil.Action{ - {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}}, - } - if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) - } - wact = []testutil.Action{ - {Name: "range", Params: []interface{}{[]byte("foo"), []byte("goo"), tt.wrrev}}, - {Name: "tombstone", Params: []interface{}{[]byte("foo"), tt.wdelrev}}, - } - if g := fi.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("#%d: index action = %+v, want %+v", i, g, wact) - } - if s.currentRev != tt.wrev.main { - t.Errorf("#%d: rev = %+v, want %+v", i, s.currentRev, tt.wrev) - } - } -} - -func TestStoreCompact(t *testing.T) { - lg := zaptest.NewLogger(t) - s := newFakeStore(lg) - defer s.Close() - b := s.b.(*fakeBackend) - fi := s.kvindex.(*fakeIndex) - - s.currentRev = 3 - fi.indexCompactRespc <- map[revision]struct{}{{1, 0}: {}} - key1 := newTestKeyBytes(lg, revision{1, 0}, false) - key2 := newTestKeyBytes(lg, revision{2, 0}, false) - b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, [][]byte{[]byte("alice"), []byte("bob")}} - - s.Compact(traceutil.TODO(), 3) - s.fifoSched.WaitFinish(1) - - if s.compactMainRev != 3 { - t.Errorf("compact main rev = %d, want 3", s.compactMainRev) - } - end := make([]byte, 8) - binary.BigEndian.PutUint64(end, uint64(4)) - wact := []testutil.Action{ - {Name: "put", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, newTestRevBytes(revision{3, 0})}}, - {Name: "range", Params: []interface{}{schema.Key, make([]byte, 17), end, int64(10000)}}, - {Name: "delete", Params: []interface{}{schema.Key, key2}}, - {Name: "put", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, newTestRevBytes(revision{3, 0})}}, - } - if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("tx actions = %+v, want %+v", g, wact) - } - wact = []testutil.Action{ - {Name: "compact", Params: []interface{}{int64(3)}}, - } - if g := fi.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("index action = %+v, want %+v", g, wact) - } -} - -func TestStoreRestore(t *testing.T) { - lg := zaptest.NewLogger(t) - s := newFakeStore(lg) - b := s.b.(*fakeBackend) - fi := s.kvindex.(*fakeIndex) - - putkey := newTestKeyBytes(lg, revision{3, 0}, false) - putkv := mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 4, - ModRevision: 4, - Version: 1, - } - putkvb, err := putkv.Marshal() - if err != nil { - t.Fatal(err) - } - delkey := newTestKeyBytes(lg, revision{5, 0}, true) - delkv := mvccpb.KeyValue{ - Key: []byte("foo"), - } - delkvb, err := delkv.Marshal() - if err != nil { - t.Fatal(err) - } - b.tx.rangeRespc <- rangeResp{[][]byte{schema.FinishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} - b.tx.rangeRespc <- rangeResp{[][]byte{schema.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} - - b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}} - b.tx.rangeRespc <- rangeResp{nil, nil} - - s.restore() - - if s.compactMainRev != 3 { - t.Errorf("compact rev = %d, want 3", s.compactMainRev) - } - if s.currentRev != 5 { - t.Errorf("current rev = %v, want 5", s.currentRev) - } - wact := []testutil.Action{ - {Name: "range", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, []byte(nil), int64(0)}}, - {Name: "range", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, []byte(nil), int64(0)}}, - {Name: "range", Params: []interface{}{schema.Key, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}}, - } - if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("tx actions = %+v, want %+v", g, wact) - } - - gens := []generation{ - {created: revision{4, 0}, ver: 2, revs: []revision{{3, 0}, {5, 0}}}, - {created: revision{0, 0}, ver: 0, revs: nil}, - } - ki := &keyIndex{key: []byte("foo"), modified: revision{5, 0}, generations: gens} - wact = []testutil.Action{ - {Name: "keyIndex", Params: []interface{}{ki}}, - {Name: "insert", Params: []interface{}{ki}}, - } - if g := fi.Action(); !reflect.DeepEqual(g, wact) { - t.Errorf("index action = %+v, want %+v", g, wact) - } -} - -func TestRestoreDelete(t *testing.T) { - oldChunk := restoreChunkKeys - restoreChunkKeys = mrand.Intn(3) + 2 - defer func() { restoreChunkKeys = oldChunk }() - - b, _ := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - keys := make(map[string]struct{}) - for i := 0; i < 20; i++ { - ks := fmt.Sprintf("foo-%d", i) - k := []byte(ks) - s.Put(k, []byte("bar"), lease.NoLease) - keys[ks] = struct{}{} - switch mrand.Intn(3) { - case 0: - // put random key from past via random range on map - ks = fmt.Sprintf("foo-%d", mrand.Intn(i+1)) - s.Put([]byte(ks), []byte("baz"), lease.NoLease) - keys[ks] = struct{}{} - case 1: - // delete random key via random range on map - for k := range keys { - s.DeleteRange([]byte(k), nil) - delete(keys, k) - break - } - } - } - s.Close() - - s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer s.Close() - for i := 0; i < 20; i++ { - ks := fmt.Sprintf("foo-%d", i) - r, err := s.Range(context.TODO(), []byte(ks), nil, RangeOptions{}) - if err != nil { - t.Fatal(err) - } - if _, ok := keys[ks]; ok { - if len(r.KVs) == 0 { - t.Errorf("#%d: expected %q, got deleted", i, ks) - } - } else if len(r.KVs) != 0 { - t.Errorf("#%d: expected deleted, got %q", i, ks) - } - } -} - -func TestRestoreContinueUnfinishedCompaction(t *testing.T) { - tests := []string{"recreate", "restore"} - for _, test := range tests { - b, _ := betesting.NewDefaultTmpBackend(t) - s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - s0.Put([]byte("foo"), []byte("bar"), lease.NoLease) - s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease) - s0.Put([]byte("foo"), []byte("bar2"), lease.NoLease) - - // write scheduled compaction, but not do compaction - rbytes := newRevBytes() - revToBytes(revision{main: 2}, rbytes) - tx := s0.b.BatchTx() - tx.Lock() - UnsafeSetScheduledCompact(tx, 2) - tx.Unlock() - - s0.Close() - - var s *store - switch test { - case "recreate": - s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - case "restore": - s0.Restore(b) - s = s0 - } - - // wait for scheduled compaction to be finished - time.Sleep(100 * time.Millisecond) - - if _, err := s.Range(context.TODO(), []byte("foo"), nil, RangeOptions{Rev: 1}); err != ErrCompacted { - t.Errorf("range on compacted rev error = %v, want %v", err, ErrCompacted) - } - // check the key in backend is deleted - revbytes := newRevBytes() - revToBytes(revision{main: 1}, revbytes) - - // The disk compaction is done asynchronously and requires more time on slow disk. - // try 5 times for CI with slow IO. - for i := 0; i < 5; i++ { - tx := s.b.BatchTx() - tx.Lock() - ks, _ := tx.UnsafeRange(schema.Key, revbytes, nil, 0) - tx.Unlock() - if len(ks) != 0 { - time.Sleep(100 * time.Millisecond) - continue - } - return - } - - t.Errorf("key for rev %+v still exists, want deleted", bytesToRev(revbytes)) - } -} - -type hashKVResult struct { - hash uint32 - compactRev int64 -} - -// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting. -func TestHashKVWhenCompacting(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - rev := 10000 - for i := 2; i <= rev; i++ { - s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease) - } - - hashCompactc := make(chan hashKVResult, 1) - var wg sync.WaitGroup - donec := make(chan struct{}) - - // Call HashByRev(10000) in multiple goroutines until donec is closed - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - hash, _, err := s.HashStorage().HashByRev(int64(rev)) - if err != nil { - t.Error(err) - } - select { - case <-donec: - return - case hashCompactc <- hashKVResult{hash.Hash, hash.CompactRevision}: - } - } - }() - } - - // Check computed hashes by HashByRev are correct in a goroutine, until donec is closed - wg.Add(1) - go func() { - defer wg.Done() - revHash := make(map[int64]uint32) - for { - r := <-hashCompactc - if revHash[r.compactRev] == 0 { - revHash[r.compactRev] = r.hash - } - if r.hash != revHash[r.compactRev] { - t.Errorf("Hashes differ (current %v) != (saved %v)", r.hash, revHash[r.compactRev]) - } - - select { - case <-donec: - return - default: - } - } - }() - - // Compact the store in a goroutine, using revision 9900 to 10000 and close donec when finished - go func() { - defer close(donec) - for i := 100; i >= 0; i-- { - _, err := s.Compact(traceutil.TODO(), int64(rev-i)) - if err != nil { - t.Error(err) - } - // Wait for the compaction job to finish - s.fifoSched.WaitFinish(1) - // Leave time for calls to HashByRev to take place after each compaction - time.Sleep(10 * time.Millisecond) - } - }() - - select { - case <-donec: - wg.Wait() - case <-time.After(10 * time.Second): - testutil.FatalStack(t, "timeout") - } -} - -// TestHashKVWithCompactedAndFutureRevisions ensures that HashKV returns a correct hash when called -// with a past revision (lower than compacted), a future revision, and the exact compacted revision -func TestHashKVWithCompactedAndFutureRevisions(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - rev := 10000 - compactRev := rev / 2 - - for i := 2; i <= rev; i++ { - s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease) - } - if _, err := s.Compact(traceutil.TODO(), int64(compactRev)); err != nil { - t.Fatal(err) - } - - _, _, errFutureRev := s.HashStorage().HashByRev(int64(rev + 1)) - if errFutureRev != ErrFutureRev { - t.Error(errFutureRev) - } - - _, _, errPastRev := s.HashStorage().HashByRev(int64(compactRev - 1)) - if errPastRev != ErrCompacted { - t.Error(errPastRev) - } - - _, _, errCompactRev := s.HashStorage().HashByRev(int64(compactRev)) - if errCompactRev != nil { - t.Error(errCompactRev) - } -} - -// TestHashKVZeroRevision ensures that "HashByRev(0)" computes -// correct hash value with latest revision. -func TestHashKVZeroRevision(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - rev := 10000 - for i := 2; i <= rev; i++ { - s.Put([]byte("foo"), []byte(fmt.Sprintf("bar%d", i)), lease.NoLease) - } - if _, err := s.Compact(traceutil.TODO(), int64(rev/2)); err != nil { - t.Fatal(err) - } - - hash1, _, err := s.HashStorage().HashByRev(int64(rev)) - if err != nil { - t.Fatal(err) - } - var hash2 KeyValueHash - hash2, _, err = s.HashStorage().HashByRev(0) - if err != nil { - t.Fatal(err) - } - if hash1 != hash2 { - t.Errorf("hash %d (rev %d) != hash %d (rev 0)", hash1, rev, hash2) - } -} - -func TestTxnPut(t *testing.T) { - // assign arbitrary size - bytesN := 30 - sliceN := 100 - keys := createBytesSlice(bytesN, sliceN) - vals := createBytesSlice(bytesN, sliceN) - - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - for i := 0; i < sliceN; i++ { - txn := s.Write(traceutil.TODO()) - base := int64(i + 2) - if rev := txn.Put(keys[i], vals[i], lease.NoLease); rev != base { - t.Errorf("#%d: rev = %d, want %d", i, rev, base) - } - txn.End() - } -} - -// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation -func TestConcurrentReadNotBlockingWrite(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer os.Remove(tmpPath) - - // write something to read later - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - - // readTx simulates a long read request - readTx1 := s.Read(ConcurrentReadTxMode, traceutil.TODO()) - - // write should not be blocked by reads - done := make(chan struct{}, 1) - go func() { - s.Put([]byte("foo"), []byte("newBar"), lease.NoLease) // this is a write Txn - done <- struct{}{} - }() - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatalf("write should not be blocked by read") - } - - // readTx2 simulates a short read request - readTx2 := s.Read(ConcurrentReadTxMode, traceutil.TODO()) - ro := RangeOptions{Limit: 1, Rev: 0, Count: false} - ret, err := readTx2.Range(context.TODO(), []byte("foo"), nil, ro) - if err != nil { - t.Fatalf("failed to range: %v", err) - } - // readTx2 should see the result of new write - w := mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("newBar"), - CreateRevision: 2, - ModRevision: 3, - Version: 2, - } - if !reflect.DeepEqual(ret.KVs[0], w) { - t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w) - } - readTx2.End() - - ret, err = readTx1.Range(context.TODO(), []byte("foo"), nil, ro) - if err != nil { - t.Fatalf("failed to range: %v", err) - } - // readTx1 should not see the result of new write - w = mvccpb.KeyValue{ - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 2, - Version: 1, - } - if !reflect.DeepEqual(ret.KVs[0], w) { - t.Fatalf("range result = %+v, want = %+v", ret.KVs[0], w) - } - readTx1.End() -} - -// TestConcurrentReadTxAndWrite creates random concurrent Reads and Writes, and ensures Reads always see latest Writes -func TestConcurrentReadTxAndWrite(t *testing.T) { - var ( - numOfReads = 100 - numOfWrites = 100 - maxNumOfPutsPerWrite = 10 - committedKVs kvs // committedKVs records the key-value pairs written by the finished Write Txns - mu sync.Mutex // mu protects committedKVs - ) - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer b.Close() - defer s.Close() - defer os.Remove(tmpPath) - - var wg sync.WaitGroup - wg.Add(numOfWrites) - for i := 0; i < numOfWrites; i++ { - go func() { - defer wg.Done() - time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time - - tx := s.Write(traceutil.TODO()) - numOfPuts := mrand.Intn(maxNumOfPutsPerWrite) + 1 - var pendingKvs kvs - for j := 0; j < numOfPuts; j++ { - k := []byte(strconv.Itoa(mrand.Int())) - v := []byte(strconv.Itoa(mrand.Int())) - tx.Put(k, v, lease.NoLease) - pendingKvs = append(pendingKvs, kv{k, v}) - } - // reads should not see above Puts until write is finished - mu.Lock() - committedKVs = merge(committedKVs, pendingKvs) // update shared data structure - tx.End() - mu.Unlock() - }() - } - - wg.Add(numOfReads) - for i := 0; i < numOfReads; i++ { - go func() { - defer wg.Done() - time.Sleep(time.Duration(mrand.Intn(100)) * time.Millisecond) // random starting time - - mu.Lock() - wKVs := make(kvs, len(committedKVs)) - copy(wKVs, committedKVs) - tx := s.Read(ConcurrentReadTxMode, traceutil.TODO()) - mu.Unlock() - // get all keys in backend store, and compare with wKVs - ret, err := tx.Range(context.TODO(), []byte("\x00000000"), []byte("\xffffffff"), RangeOptions{}) - tx.End() - if err != nil { - t.Errorf("failed to range keys: %v", err) - return - } - if len(wKVs) == 0 && len(ret.KVs) == 0 { // no committed KVs yet - return - } - var result kvs - for _, keyValue := range ret.KVs { - result = append(result, kv{keyValue.Key, keyValue.Value}) - } - if !reflect.DeepEqual(wKVs, result) { - t.Errorf("unexpected range result") // too many key value pairs, skip printing them - } - }() - } - - // wait until goroutines finish or timeout - doneC := make(chan struct{}) - go func() { - wg.Wait() - close(doneC) - }() - select { - case <-doneC: - case <-time.After(5 * time.Minute): - testutil.FatalStack(t, "timeout") - } -} - -type kv struct { - key []byte - val []byte -} - -type kvs []kv - -func (kvs kvs) Len() int { return len(kvs) } -func (kvs kvs) Less(i, j int) bool { return bytes.Compare(kvs[i].key, kvs[j].key) < 0 } -func (kvs kvs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } - -func merge(dst, src kvs) kvs { - dst = append(dst, src...) - sort.Stable(dst) - // remove duplicates, using only the newest value - // ref: tx_buffer.go - widx := 0 - for ridx := 1; ridx < len(dst); ridx++ { - if !bytes.Equal(dst[widx].key, dst[ridx].key) { - widx++ - } - dst[widx] = dst[ridx] - } - return dst[:widx+1] -} - -// TODO: test attach key to lessor - -func newTestRevBytes(rev revision) []byte { - bytes := newRevBytes() - revToBytes(rev, bytes) - return bytes -} - -func newTestKeyBytes(lg *zap.Logger, rev revision, tombstone bool) []byte { - bytes := newRevBytes() - revToBytes(rev, bytes) - if tombstone { - bytes = appendMarkTombstone(lg, bytes) - } - return bytes -} - -func newFakeStore(lg *zap.Logger) *store { - b := &fakeBackend{&fakeBatchTx{ - Recorder: &testutil.RecorderBuffered{}, - rangeRespc: make(chan rangeResp, 5)}} - s := &store{ - cfg: StoreConfig{ - CompactionBatchLimit: 10000, - CompactionSleepInterval: minimumBatchInterval, - }, - b: b, - le: &lease.FakeLessor{}, - kvindex: newFakeIndex(), - currentRev: 0, - compactMainRev: -1, - fifoSched: schedule.NewFIFOScheduler(lg), - stopc: make(chan struct{}), - lg: lg, - } - s.ReadView, s.WriteView = &readView{s}, &writeView{s} - s.hashes = newHashStorage(lg, s) - return s -} - -func newFakeIndex() *fakeIndex { - return &fakeIndex{ - Recorder: &testutil.RecorderBuffered{}, - indexGetRespc: make(chan indexGetResp, 1), - indexRangeRespc: make(chan indexRangeResp, 1), - indexRangeEventsRespc: make(chan indexRangeEventsResp, 1), - indexCompactRespc: make(chan map[revision]struct{}, 1), - } -} - -type rangeResp struct { - keys [][]byte - vals [][]byte -} - -type fakeBatchTx struct { - testutil.Recorder - rangeRespc chan rangeResp -} - -func (b *fakeBatchTx) LockInsideApply() {} -func (b *fakeBatchTx) LockOutsideApply() {} -func (b *fakeBatchTx) Lock() {} -func (b *fakeBatchTx) Unlock() {} -func (b *fakeBatchTx) RLock() {} -func (b *fakeBatchTx) RUnlock() {} -func (b *fakeBatchTx) UnsafeCreateBucket(bucket backend.Bucket) {} -func (b *fakeBatchTx) UnsafeDeleteBucket(bucket backend.Bucket) {} -func (b *fakeBatchTx) UnsafePut(bucket backend.Bucket, key []byte, value []byte) { - b.Recorder.Record(testutil.Action{Name: "put", Params: []interface{}{bucket, key, value}}) -} -func (b *fakeBatchTx) UnsafeSeqPut(bucket backend.Bucket, key []byte, value []byte) { - b.Recorder.Record(testutil.Action{Name: "seqput", Params: []interface{}{bucket, key, value}}) -} -func (b *fakeBatchTx) UnsafeRange(bucket backend.Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { - b.Recorder.Record(testutil.Action{Name: "range", Params: []interface{}{bucket, key, endKey, limit}}) - r := <-b.rangeRespc - return r.keys, r.vals -} -func (b *fakeBatchTx) UnsafeDelete(bucket backend.Bucket, key []byte) { - b.Recorder.Record(testutil.Action{Name: "delete", Params: []interface{}{bucket, key}}) -} -func (b *fakeBatchTx) UnsafeForEach(bucket backend.Bucket, visitor func(k, v []byte) error) error { - return nil -} -func (b *fakeBatchTx) Commit() {} -func (b *fakeBatchTx) CommitAndStop() {} - -type fakeBackend struct { - tx *fakeBatchTx -} - -func (b *fakeBackend) BatchTx() backend.BatchTx { return b.tx } -func (b *fakeBackend) ReadTx() backend.ReadTx { return b.tx } -func (b *fakeBackend) ConcurrentReadTx() backend.ReadTx { return b.tx } -func (b *fakeBackend) Hash(func(bucketName, keyName []byte) bool) (uint32, error) { return 0, nil } -func (b *fakeBackend) Size() int64 { return 0 } -func (b *fakeBackend) SizeInUse() int64 { return 0 } -func (b *fakeBackend) OpenReadTxN() int64 { return 0 } -func (b *fakeBackend) Snapshot() backend.Snapshot { return nil } -func (b *fakeBackend) ForceCommit() {} -func (b *fakeBackend) Defrag() error { return nil } -func (b *fakeBackend) Close() error { return nil } -func (b *fakeBackend) SetTxPostLockInsideApplyHook(func()) {} - -type indexGetResp struct { - rev revision - created revision - ver int64 - err error -} - -type indexRangeResp struct { - keys [][]byte - revs []revision -} - -type indexRangeEventsResp struct { - revs []revision -} - -type fakeIndex struct { - testutil.Recorder - indexGetRespc chan indexGetResp - indexRangeRespc chan indexRangeResp - indexRangeEventsRespc chan indexRangeEventsResp - indexCompactRespc chan map[revision]struct{} -} - -func (i *fakeIndex) Revisions(key, end []byte, atRev int64, limit int) ([]revision, int) { - _, rev := i.Range(key, end, atRev) - if len(rev) >= limit { - rev = rev[:limit] - } - return rev, len(rev) -} - -func (i *fakeIndex) CountRevisions(key, end []byte, atRev int64) int { - _, rev := i.Range(key, end, atRev) - return len(rev) -} - -func (i *fakeIndex) Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) { - i.Recorder.Record(testutil.Action{Name: "get", Params: []interface{}{key, atRev}}) - r := <-i.indexGetRespc - return r.rev, r.created, r.ver, r.err -} -func (i *fakeIndex) Range(key, end []byte, atRev int64) ([][]byte, []revision) { - i.Recorder.Record(testutil.Action{Name: "range", Params: []interface{}{key, end, atRev}}) - r := <-i.indexRangeRespc - return r.keys, r.revs -} -func (i *fakeIndex) Put(key []byte, rev revision) { - i.Recorder.Record(testutil.Action{Name: "put", Params: []interface{}{key, rev}}) -} -func (i *fakeIndex) Tombstone(key []byte, rev revision) error { - i.Recorder.Record(testutil.Action{Name: "tombstone", Params: []interface{}{key, rev}}) - return nil -} -func (i *fakeIndex) RangeSince(key, end []byte, rev int64) []revision { - i.Recorder.Record(testutil.Action{Name: "rangeEvents", Params: []interface{}{key, end, rev}}) - r := <-i.indexRangeEventsRespc - return r.revs -} -func (i *fakeIndex) Compact(rev int64) map[revision]struct{} { - i.Recorder.Record(testutil.Action{Name: "compact", Params: []interface{}{rev}}) - return <-i.indexCompactRespc -} -func (i *fakeIndex) Keep(rev int64) map[revision]struct{} { - i.Recorder.Record(testutil.Action{Name: "keep", Params: []interface{}{rev}}) - return <-i.indexCompactRespc -} -func (i *fakeIndex) Equal(b index) bool { return false } - -func (i *fakeIndex) Insert(ki *keyIndex) { - i.Recorder.Record(testutil.Action{Name: "insert", Params: []interface{}{ki}}) -} - -func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex { - i.Recorder.Record(testutil.Action{Name: "keyIndex", Params: []interface{}{ki}}) - return nil -} - -func createBytesSlice(bytesN, sliceN int) [][]byte { - var rs [][]byte - for len(rs) != sliceN { - v := make([]byte, bytesN) - if _, err := rand.Read(v); err != nil { - panic(err) - } - rs = append(rs, v) - } - return rs -} diff --git a/server/storage/mvcc/kvstore_txn.go b/server/storage/mvcc/kvstore_txn.go deleted file mode 100644 index b93fcbe64da..00000000000 --- a/server/storage/mvcc/kvstore_txn.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - "fmt" - - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -type storeTxnRead struct { - s *store - tx backend.ReadTx - - firstRev int64 - rev int64 - - trace *traceutil.Trace -} - -func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead { - s.mu.RLock() - s.revMu.RLock() - // For read-only workloads, we use shared buffer by copying transaction read buffer - // for higher concurrency with ongoing blocking writes. - // For write/write-read transactions, we use the shared buffer - // rather than duplicating transaction read buffer to avoid transaction overhead. - var tx backend.ReadTx - if mode == ConcurrentReadTxMode { - tx = s.b.ConcurrentReadTx() - } else { - tx = s.b.ReadTx() - } - - tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created. - firstRev, rev := s.compactMainRev, s.currentRev - s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace}) -} - -func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } -func (tr *storeTxnRead) Rev() int64 { return tr.rev } - -func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - return tr.rangeKeys(ctx, key, end, tr.Rev(), ro) -} - -func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - if ro.Count { - total := tr.s.kvindex.CountRevisions(key, end, rev) - tr.trace.Step("count revisions from in-memory index tree") - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) - tr.trace.Step("range keys from in-memory index tree") - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - - limit := int(ro.Limit) - if limit <= 0 || limit > len(revpairs) { - limit = len(revpairs) - } - - kvs := make([]mvccpb.KeyValue, limit) - revBytes := newRevBytes() - for i, revpair := range revpairs[:len(kvs)] { - select { - case <-ctx.Done(): - return nil, fmt.Errorf("rangeKeys: context cancelled: %w", ctx.Err()) - default: - } - revToBytes(revpair, revBytes) - _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0) - if len(vs) != 1 { - tr.s.lg.Fatal( - "range failed to find revision pair", - zap.Int64("revision-main", revpair.main), - zap.Int64("revision-sub", revpair.sub), - zap.Int64("revision-current", curRev), - zap.Int64("range-option-rev", ro.Rev), - zap.Int64("range-option-limit", ro.Limit), - zap.Binary("key", key), - zap.Binary("end", end), - zap.Int("len-revpairs", len(revpairs)), - zap.Int("len-values", len(vs)), - ) - } - if err := kvs[i].Unmarshal(vs[0]); err != nil { - tr.s.lg.Fatal( - "failed to unmarshal mvccpb.KeyValue", - zap.Error(err), - ) - } - } - tr.trace.Step("range keys from bolt db") - return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil -} - -func (tr *storeTxnRead) End() { - tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx. - tr.s.mu.RUnlock() -} - -type storeTxnWrite struct { - storeTxnRead - tx backend.BatchTx - // beginRev is the revision where the txn begins; it will write to the next revision. - beginRev int64 - changes []mvccpb.KeyValue -} - -func (s *store) Write(trace *traceutil.Trace) TxnWrite { - s.mu.RLock() - tx := s.b.BatchTx() - tx.LockInsideApply() - tw := &storeTxnWrite{ - storeTxnRead: storeTxnRead{s, tx, 0, 0, trace}, - tx: tx, - beginRev: s.currentRev, - changes: make([]mvccpb.KeyValue, 0, 4), - } - return newMetricsTxnWrite(tw) -} - -func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } - -func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - rev := tw.beginRev - if len(tw.changes) > 0 { - rev++ - } - return tw.rangeKeys(ctx, key, end, rev, ro) -} - -func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { - if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, tw.beginRev + 1 - } - return 0, tw.beginRev -} - -func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { - tw.put(key, value, lease) - return tw.beginRev + 1 -} - -func (tw *storeTxnWrite) End() { - // only update index if the txn modifies the mvcc state. - if len(tw.changes) != 0 { - // hold revMu lock to prevent new read txns from opening until writeback. - tw.s.revMu.Lock() - tw.s.currentRev++ - } - tw.tx.Unlock() - if len(tw.changes) != 0 { - tw.s.revMu.Unlock() - } - tw.s.mu.RUnlock() -} - -func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { - rev := tw.beginRev + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := tw.s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) - tw.trace.Step("get key's previous created_revision and leaseID") - } - ibytes := newRevBytes() - idxRev := revision{main: rev, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - tw.storeTxnRead.s.lg.Fatal( - "failed to marshal mvccpb.KeyValue", - zap.Error(err), - ) - } - - tw.trace.Step("marshal mvccpb.KeyValue") - tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) - tw.s.kvindex.Put(key, idxRev) - tw.changes = append(tw.changes, kv) - tw.trace.Step("store kv pair into bolt db") - - if oldLease == leaseID { - tw.trace.Step("attach lease to kv pair") - return - } - - if oldLease != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to detach lease") - } - err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - tw.storeTxnRead.s.lg.Error( - "failed to detach old lease from a key", - zap.Error(err), - ) - } - } - if leaseID != lease.NoLease { - if tw.s.le == nil { - panic("no lessor to attach lease") - } - err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } - tw.trace.Step("attach lease to kv pair") -} - -func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { - rrev := tw.beginRev - if len(tw.changes) > 0 { - rrev++ - } - keys, _ := tw.s.kvindex.Range(key, end, rrev) - if len(keys) == 0 { - return 0 - } - for _, key := range keys { - tw.delete(key) - } - return int64(len(keys)) -} - -func (tw *storeTxnWrite) delete(key []byte) { - ibytes := newRevBytes() - idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} - revToBytes(idxRev, ibytes) - - ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes) - - kv := mvccpb.KeyValue{Key: key} - - d, err := kv.Marshal() - if err != nil { - tw.storeTxnRead.s.lg.Fatal( - "failed to marshal mvccpb.KeyValue", - zap.Error(err), - ) - } - - tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) - err = tw.s.kvindex.Tombstone(key, idxRev) - if err != nil { - tw.storeTxnRead.s.lg.Fatal( - "failed to tombstone an existing key", - zap.String("key", string(key)), - zap.Error(err), - ) - } - tw.changes = append(tw.changes, kv) - - item := lease.LeaseItem{Key: string(key)} - leaseID := tw.s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - tw.storeTxnRead.s.lg.Error( - "failed to detach old lease from a key", - zap.Error(err), - ) - } - } -} - -func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/server/storage/mvcc/metrics.go b/server/storage/mvcc/metrics.go deleted file mode 100644 index b75abbcc089..00000000000 --- a/server/storage/mvcc/metrics.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - rangeCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) - - putCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "put_total", - Help: "Total number of puts seen by this member.", - }) - - deleteCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "delete_total", - Help: "Total number of deletes seen by this member.", - }) - - txnCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "txn_total", - Help: "Total number of txns seen by this member.", - }) - - keysGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "keys_total", - Help: "Total number of keys.", - }) - - watchStreamGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watch_stream_total", - Help: "Total number of watch streams.", - }) - - watcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watcher_total", - Help: "Total number of watchers.", - }) - - slowWatcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "slow_watcher_total", - Help: "Total number of unsynced slow watchers.", - }) - - totalEventsCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "events_total", - Help: "Total number of events sent by this member.", - }) - - pendingEventsGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "pending_events_total", - Help: "Total number of pending events to be sent.", - }) - - indexCompactionPauseMs = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "index_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of index compaction pause duration.", - - // lowest bucket start of upper bound 0.5 ms with factor 2 - // highest bucket start of 0.5 ms * 2^13 == 4.096 sec - Buckets: prometheus.ExponentialBuckets(0.5, 2, 14), - }) - - dbCompactionPauseMs = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of db compaction pause duration.", - - // lowest bucket start of upper bound 1 ms with factor 2 - // highest bucket start of 1 ms * 2^12 == 4.096 sec - Buckets: prometheus.ExponentialBuckets(1, 2, 13), - }) - - dbCompactionTotalMs = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_total_duration_milliseconds", - Help: "Bucketed histogram of db compaction total duration.", - - // lowest bucket start of upper bound 100 ms with factor 2 - // highest bucket start of 100 ms * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(100, 2, 14), - }) - - dbCompactionLast = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_last", - Help: "The unix time of the last db compaction. Resets to 0 on start.", - }) - - dbCompactionKeysCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_keys_total", - Help: "Total number of db keys compacted.", - }) - - dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database physically allocated in bytes.", - }, - func() float64 { - reportDbTotalSizeInBytesMu.RLock() - defer reportDbTotalSizeInBytesMu.RUnlock() - return reportDbTotalSizeInBytes() - }, - ) - // overridden by mvcc initialization - reportDbTotalSizeInBytesMu sync.RWMutex - reportDbTotalSizeInBytes = func() float64 { return 0 } - - dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "db_total_size_in_use_in_bytes", - Help: "Total size of the underlying database logically in use in bytes.", - }, - func() float64 { - reportDbTotalSizeInUseInBytesMu.RLock() - defer reportDbTotalSizeInUseInBytesMu.RUnlock() - return reportDbTotalSizeInUseInBytes() - }, - ) - // overridden by mvcc initialization - reportDbTotalSizeInUseInBytesMu sync.RWMutex - reportDbTotalSizeInUseInBytes = func() float64 { return 0 } - - dbOpenReadTxN = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "db_open_read_transactions", - Help: "The number of currently open read transactions", - }, - - func() float64 { - reportDbOpenReadTxNMu.RLock() - defer reportDbOpenReadTxNMu.RUnlock() - return reportDbOpenReadTxN() - }, - ) - // overridden by mvcc initialization - reportDbOpenReadTxNMu sync.RWMutex - reportDbOpenReadTxN = func() float64 { return 0 } - - hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "hash_duration_seconds", - Help: "The latency distribution of storage hash operation.", - - // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms - // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2 - // highest bucket start of 0.01 sec * 2^14 == 163.84 sec - Buckets: prometheus.ExponentialBuckets(.01, 2, 15), - }) - - hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "mvcc", - Name: "hash_rev_duration_seconds", - Help: "The latency distribution of storage hash by revision operation.", - - // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms - // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2 - // highest bucket start of 0.01 sec * 2^14 == 163.84 sec - Buckets: prometheus.ExponentialBuckets(.01, 2, 15), - }) - - currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "current_revision", - Help: "The current revision of store.", - }, - func() float64 { - reportCurrentRevMu.RLock() - defer reportCurrentRevMu.RUnlock() - return reportCurrentRev() - }, - ) - // overridden by mvcc initialization - reportCurrentRevMu sync.RWMutex - reportCurrentRev = func() float64 { return 0 } - - compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "compact_revision", - Help: "The revision of the last compaction in store.", - }, - func() float64 { - reportCompactRevMu.RLock() - defer reportCompactRevMu.RUnlock() - return reportCompactRev() - }, - ) - // overridden by mvcc initialization - reportCompactRevMu sync.RWMutex - reportCompactRev = func() float64 { return 0 } - - totalPutSizeGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "total_put_size_in_bytes", - Help: "The total size of put kv pairs seen by this member.", - }) -) - -func init() { - prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(putCounter) - prometheus.MustRegister(deleteCounter) - prometheus.MustRegister(txnCounter) - prometheus.MustRegister(keysGauge) - prometheus.MustRegister(watchStreamGauge) - prometheus.MustRegister(watcherGauge) - prometheus.MustRegister(slowWatcherGauge) - prometheus.MustRegister(totalEventsCounter) - prometheus.MustRegister(pendingEventsGauge) - prometheus.MustRegister(indexCompactionPauseMs) - prometheus.MustRegister(dbCompactionPauseMs) - prometheus.MustRegister(dbCompactionTotalMs) - prometheus.MustRegister(dbCompactionLast) - prometheus.MustRegister(dbCompactionKeysCounter) - prometheus.MustRegister(dbTotalSize) - prometheus.MustRegister(dbTotalSizeInUse) - prometheus.MustRegister(dbOpenReadTxN) - prometheus.MustRegister(hashSec) - prometheus.MustRegister(hashRevSec) - prometheus.MustRegister(currentRev) - prometheus.MustRegister(compactRev) - prometheus.MustRegister(totalPutSizeGauge) -} - -// ReportEventReceived reports that an event is received. -// This function should be called when the external systems received an -// event from mvcc.Watcher. -func ReportEventReceived(n int) { - pendingEventsGauge.Sub(float64(n)) - totalEventsCounter.Add(float64(n)) -} diff --git a/server/storage/mvcc/metrics_txn.go b/server/storage/mvcc/metrics_txn.go deleted file mode 100644 index aef877a1c15..00000000000 --- a/server/storage/mvcc/metrics_txn.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "context" - - "go.etcd.io/etcd/server/v3/lease" -) - -type metricsTxnWrite struct { - TxnWrite - ranges uint - puts uint - deletes uint - putSize int64 -} - -func newMetricsTxnRead(tr TxnRead) TxnRead { - return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0} -} - -func newMetricsTxnWrite(tw TxnWrite) TxnWrite { - return &metricsTxnWrite{tw, 0, 0, 0, 0} -} - -func (tw *metricsTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (*RangeResult, error) { - tw.ranges++ - return tw.TxnWrite.Range(ctx, key, end, ro) -} - -func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { - tw.deletes++ - return tw.TxnWrite.DeleteRange(key, end) -} - -func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw.puts++ - size := int64(len(key) + len(value)) - tw.putSize += size - return tw.TxnWrite.Put(key, value, lease) -} - -func (tw *metricsTxnWrite) End() { - defer tw.TxnWrite.End() - if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 { - txnCounter.Inc() - } - - ranges := float64(tw.ranges) - rangeCounter.Add(ranges) - - puts := float64(tw.puts) - putCounter.Add(puts) - totalPutSizeGauge.Add(float64(tw.putSize)) - - deletes := float64(tw.deletes) - deleteCounter.Add(deletes) -} diff --git a/server/storage/mvcc/revision.go b/server/storage/mvcc/revision.go deleted file mode 100644 index a910e177aef..00000000000 --- a/server/storage/mvcc/revision.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import "encoding/binary" - -// revBytesLen is the byte length of a normal revision. -// First 8 bytes is the revision.main in big-endian format. The 9th byte -// is a '_'. The last 8 bytes is the revision.sub in big-endian format. -const revBytesLen = 8 + 1 + 8 - -// A revision indicates modification of the key-value space. -// The set of changes that share same main revision changes the key-value space atomically. -type revision struct { - // main is the main revision of a set of changes that happen atomically. - main int64 - - // sub is the sub revision of a change in a set of changes that happen - // atomically. Each change has different increasing sub revision in that - // set. - sub int64 -} - -func (a revision) GreaterThan(b revision) bool { - if a.main > b.main { - return true - } - if a.main < b.main { - return false - } - return a.sub > b.sub -} - -func newRevBytes() []byte { - return make([]byte, revBytesLen, markedRevBytesLen) -} - -func revToBytes(rev revision, bytes []byte) { - binary.BigEndian.PutUint64(bytes, uint64(rev.main)) - bytes[8] = '_' - binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) -} - -func bytesToRev(bytes []byte) revision { - return revision{ - main: int64(binary.BigEndian.Uint64(bytes[0:8])), - sub: int64(binary.BigEndian.Uint64(bytes[9:])), - } -} diff --git a/server/storage/mvcc/revision_test.go b/server/storage/mvcc/revision_test.go deleted file mode 100644 index 46fcb483cf0..00000000000 --- a/server/storage/mvcc/revision_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "math" - "reflect" - "testing" -) - -// TestRevision tests that revision could be encoded to and decoded from -// bytes slice. Moreover, the lexicographical order of its byte slice representation -// follows the order of (main, sub). -func TestRevision(t *testing.T) { - tests := []revision{ - // order in (main, sub) - {}, - {main: 1, sub: 0}, - {main: 1, sub: 1}, - {main: 2, sub: 0}, - {main: math.MaxInt64, sub: math.MaxInt64}, - } - - bs := make([][]byte, len(tests)) - for i, tt := range tests { - b := newRevBytes() - revToBytes(tt, b) - bs[i] = b - - if grev := bytesToRev(b); !reflect.DeepEqual(grev, tt) { - t.Errorf("#%d: revision = %+v, want %+v", i, grev, tt) - } - } - - for i := 0; i < len(tests)-1; i++ { - if bytes.Compare(bs[i], bs[i+1]) >= 0 { - t.Errorf("#%d: %v (%+v) should be smaller than %v (%+v)", i, bs[i], tests[i], bs[i+1], tests[i+1]) - } - } -} diff --git a/server/storage/mvcc/store.go b/server/storage/mvcc/store.go deleted file mode 100644 index a002ada7177..00000000000 --- a/server/storage/mvcc/store.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found bool) { - _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { - return bytesToRev(finishedCompactBytes[0]).main, true - } - return 0, false -} - -func UnsafeReadScheduledCompact(tx backend.ReadTx) (scheduledComact int64, found bool) { - _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0) - if len(scheduledCompactBytes) != 0 { - return bytesToRev(scheduledCompactBytes[0]).main, true - } - return 0, false -} - -func SetScheduledCompact(tx backend.BatchTx, value int64) { - tx.LockInsideApply() - defer tx.Unlock() - UnsafeSetScheduledCompact(tx, value) -} - -func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) { - rbytes := newRevBytes() - revToBytes(revision{main: value}, rbytes) - tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes) -} - -func SetFinishedCompact(tx backend.BatchTx, value int64) { - tx.LockInsideApply() - defer tx.Unlock() - UnsafeSetFinishedCompact(tx, value) -} - -func UnsafeSetFinishedCompact(tx backend.BatchTx, value int64) { - rbytes := newRevBytes() - revToBytes(revision{main: value}, rbytes) - tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes) -} diff --git a/server/storage/mvcc/store_test.go b/server/storage/mvcc/store_test.go deleted file mode 100644 index bd6d25e171f..00000000000 --- a/server/storage/mvcc/store_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "fmt" - "math" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" - "go.etcd.io/etcd/server/v3/storage/schema" -) - -// TestScheduledCompact ensures that UnsafeSetScheduledCompact&UnsafeReadScheduledCompact work well together. -func TestScheduledCompact(t *testing.T) { - tcs := []struct { - value int64 - }{ - { - value: 1, - }, - { - value: 0, - }, - { - value: math.MaxInt64, - }, - { - value: math.MinInt64, - }, - } - for _, tc := range tcs { - t.Run(fmt.Sprint(tc.value), func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - tx.UnsafeCreateBucket(schema.Meta) - UnsafeSetScheduledCompact(tx, tc.value) - tx.Unlock() - be.ForceCommit() - be.Close() - - b := backend.NewDefaultBackend(lg, tmpPath) - defer b.Close() - v, found := UnsafeReadScheduledCompact(b.BatchTx()) - assert.Equal(t, true, found) - assert.Equal(t, tc.value, v) - }) - } -} - -// TestFinishedCompact ensures that UnsafeSetFinishedCompact&UnsafeReadFinishedCompact work well together. -func TestFinishedCompact(t *testing.T) { - tcs := []struct { - value int64 - }{ - { - value: 1, - }, - { - value: 0, - }, - { - value: math.MaxInt64, - }, - { - value: math.MinInt64, - }, - } - for _, tc := range tcs { - t.Run(fmt.Sprint(tc.value), func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - tx.UnsafeCreateBucket(schema.Meta) - UnsafeSetFinishedCompact(tx, tc.value) - tx.Unlock() - be.ForceCommit() - be.Close() - - b := backend.NewDefaultBackend(lg, tmpPath) - defer b.Close() - v, found := UnsafeReadFinishedCompact(b.BatchTx()) - assert.Equal(t, true, found) - assert.Equal(t, tc.value, v) - }) - } -} diff --git a/server/storage/mvcc/testutil/hash.go b/server/storage/mvcc/testutil/hash.go deleted file mode 100644 index e9d43b2046c..00000000000 --- a/server/storage/mvcc/testutil/hash.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "context" - "errors" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/bbolt" - "go.etcd.io/etcd/api/v3/mvccpb" -) - -const ( - // CompactionCycle is high prime used to test hash calculation between compactions. - CompactionCycle = 71 -) - -func TestCompactionHash(ctx context.Context, t *testing.T, h CompactionHashTestCase, compactionBatchLimit int) { - var totalRevisions int64 = 1210 - assert.Less(t, int64(compactionBatchLimit), totalRevisions) - assert.Less(t, int64(CompactionCycle*10), totalRevisions) - var rev int64 - for ; rev < totalRevisions; rev += CompactionCycle { - testCompactionHash(ctx, t, h, rev, rev+CompactionCycle) - } - testCompactionHash(ctx, t, h, rev, rev+totalRevisions) -} - -type CompactionHashTestCase interface { - Put(ctx context.Context, key, value string) error - Delete(ctx context.Context, key string) error - HashByRev(ctx context.Context, rev int64) (KeyValueHash, error) - Defrag(ctx context.Context) error - Compact(ctx context.Context, rev int64) error -} - -type KeyValueHash struct { - Hash uint32 - CompactRevision int64 - Revision int64 -} - -func testCompactionHash(ctx context.Context, t *testing.T, h CompactionHashTestCase, start, stop int64) { - for i := start; i <= stop; i++ { - if i%67 == 0 { - err := h.Delete(ctx, PickKey(i+83)) - assert.NoError(t, err, "error on delete") - } else { - err := h.Put(ctx, PickKey(i), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - } - hash1, err := h.HashByRev(ctx, stop) - assert.NoError(t, err, "error on hash (rev %v)", stop) - - err = h.Compact(ctx, stop) - assert.NoError(t, err, "error on compact (rev %v)", stop) - - err = h.Defrag(ctx) - assert.NoError(t, err, "error on defrag") - - hash2, err := h.HashByRev(ctx, stop) - assert.NoError(t, err, "error on hash (rev %v)", stop) - assert.Equal(t, hash1, hash2, "hashes do not match on rev %v", stop) -} - -func PickKey(i int64) string { - if i%(CompactionCycle*2) == 30 { - return "zenek" - } - if i%CompactionCycle == 30 { - return "xavery" - } - // Use low prime number to ensure repeats without alignment - switch i % 7 { - case 0: - return "alice" - case 1: - return "bob" - case 2: - return "celine" - case 3: - return "dominik" - case 4: - return "eve" - case 5: - return "frederica" - case 6: - return "gorge" - default: - panic("Can't count") - } -} - -func CorruptBBolt(fpath string) error { - db, derr := bbolt.Open(fpath, os.ModePerm, &bbolt.Options{}) - if derr != nil { - return derr - } - defer db.Close() - - return db.Update(func(tx *bbolt.Tx) error { - b := tx.Bucket([]byte("key")) - if b == nil { - return errors.New("got nil bucket for 'key'") - } - var vals [][]byte - var keys [][]byte - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - keys = append(keys, k) - var kv mvccpb.KeyValue - if uerr := kv.Unmarshal(v); uerr != nil { - return uerr - } - kv.Key[0]++ - kv.Value[0]++ - v2, v2err := kv.Marshal() - if v2err != nil { - return v2err - } - vals = append(vals, v2) - } - for i := range keys { - if perr := b.Put(keys[i], vals[i]); perr != nil { - return perr - } - } - return nil - }) -} diff --git a/server/storage/mvcc/watchable_store.go b/server/storage/mvcc/watchable_store.go deleted file mode 100644 index adf07f7755b..00000000000 --- a/server/storage/mvcc/watchable_store.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - - "go.uber.org/zap" -) - -// non-const so modifiable by tests -var ( - // chanBufLen is the length of the buffered chan - // for sending out watched events. - // See https://github.com/etcd-io/etcd/issues/11906 for more detail. - chanBufLen = 128 - - // maxWatchersPerSync is the number of watchers to sync in a single batch - maxWatchersPerSync = 512 -) - -type watchable interface { - watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) - progress(w *watcher) - rev() int64 -} - -type watchableStore struct { - *store - - // mu protects watcher groups and batches. It should never be locked - // before locking store.mu to avoid deadlock. - mu sync.RWMutex - - // victims are watcher batches that were blocked on the watch channel - victims []watcherBatch - victimc chan struct{} - - // contains all unsynced watchers that needs to sync with events that have happened - unsynced watcherGroup - - // contains all synced watchers that are in sync with the progress of the store. - // The key of the map is the key that the watcher watches on. - synced watcherGroup - - stopc chan struct{} - wg sync.WaitGroup -} - -// cancelFunc updates unsynced and synced maps when running -// cancel operations. -type cancelFunc func() - -func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV { - return newWatchableStore(lg, b, le, cfg) -} - -func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore { - if lg == nil { - lg = zap.NewNop() - } - s := &watchableStore{ - store: NewStore(lg, b, le, cfg), - victimc: make(chan struct{}, 1), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - stopc: make(chan struct{}), - } - s.store.ReadView = &readView{s} - s.store.WriteView = &writeView{s} - if s.le != nil { - // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) }) - } - s.wg.Add(2) - go s.syncWatchersLoop() - go s.syncVictimsLoop() - return s -} - -func (s *watchableStore) Close() error { - close(s.stopc) - s.wg.Wait() - return s.store.Close() -} - -func (s *watchableStore) NewWatchStream() WatchStream { - watchStreamGauge.Inc() - return &watchStream{ - watchable: s, - ch: make(chan WatchResponse, chanBufLen), - cancels: make(map[WatchID]cancelFunc), - watchers: make(map[WatchID]*watcher), - } -} - -func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - wa := &watcher{ - key: key, - end: end, - minRev: startRev, - id: id, - ch: ch, - fcs: fcs, - } - - s.mu.Lock() - s.revMu.RLock() - synced := startRev > s.store.currentRev || startRev == 0 - if synced { - wa.minRev = s.store.currentRev + 1 - if startRev > wa.minRev { - wa.minRev = startRev - } - s.synced.add(wa) - } else { - slowWatcherGauge.Inc() - s.unsynced.add(wa) - } - s.revMu.RUnlock() - s.mu.Unlock() - - watcherGauge.Inc() - - return wa, func() { s.cancelWatcher(wa) } -} - -// cancelWatcher removes references of the watcher from the watchableStore -func (s *watchableStore) cancelWatcher(wa *watcher) { - for { - s.mu.Lock() - if s.unsynced.delete(wa) { - slowWatcherGauge.Dec() - watcherGauge.Dec() - break - } else if s.synced.delete(wa) { - watcherGauge.Dec() - break - } else if wa.compacted { - watcherGauge.Dec() - break - } else if wa.ch == nil { - // already canceled (e.g., cancel/close race) - break - } - - if !wa.victim { - s.mu.Unlock() - panic("watcher not victim but not in watch groups") - } - - var victimBatch watcherBatch - for _, wb := range s.victims { - if wb[wa] != nil { - victimBatch = wb - break - } - } - if victimBatch != nil { - slowWatcherGauge.Dec() - watcherGauge.Dec() - delete(victimBatch, wa) - break - } - - // victim being processed so not accessible; retry - s.mu.Unlock() - time.Sleep(time.Millisecond) - } - - wa.ch = nil - s.mu.Unlock() -} - -func (s *watchableStore) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.store.Restore(b) - if err != nil { - return err - } - - for wa := range s.synced.watchers { - wa.restore = true - s.unsynced.add(wa) - } - s.synced = newWatcherGroup() - return nil -} - -// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. -func (s *watchableStore) syncWatchersLoop() { - defer s.wg.Done() - - waitDuration := 100 * time.Millisecond - delayTicker := time.NewTicker(waitDuration) - defer delayTicker.Stop() - - for { - s.mu.RLock() - st := time.Now() - lastUnsyncedWatchers := s.unsynced.size() - s.mu.RUnlock() - - unsyncedWatchers := 0 - if lastUnsyncedWatchers > 0 { - unsyncedWatchers = s.syncWatchers() - } - syncDuration := time.Since(st) - - delayTicker.Reset(waitDuration) - // more work pending? - if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { - // be fair to other store operations by yielding time taken - delayTicker.Reset(syncDuration) - } - - select { - case <-delayTicker.C: - case <-s.stopc: - return - } - } -} - -// syncVictimsLoop tries to write precomputed watcher responses to -// watchers that had a blocked watcher channel -func (s *watchableStore) syncVictimsLoop() { - defer s.wg.Done() - - for { - for s.moveVictims() != 0 { - // try to update all victim watchers - } - s.mu.RLock() - isEmpty := len(s.victims) == 0 - s.mu.RUnlock() - - var tickc <-chan time.Time - if !isEmpty { - tickc = time.After(10 * time.Millisecond) - } - - select { - case <-tickc: - case <-s.victimc: - case <-s.stopc: - return - } - } -} - -// moveVictims tries to update watches with already pending event data -func (s *watchableStore) moveVictims() (moved int) { - s.mu.Lock() - victims := s.victims - s.victims = nil - s.mu.Unlock() - - var newVictim watcherBatch - for _, wb := range victims { - // try to send responses again - for w, eb := range wb { - // watcher has observed the store up to, but not including, w.minRev - rev := w.minRev - 1 - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if newVictim == nil { - newVictim = make(watcherBatch) - } - newVictim[w] = eb - continue - } - moved++ - } - - // assign completed victim watchers to unsync/sync - s.mu.Lock() - s.store.revMu.RLock() - curRev := s.store.currentRev - for w, eb := range wb { - if newVictim != nil && newVictim[w] != nil { - // couldn't send watch response; stays victim - continue - } - w.victim = false - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - if w.minRev <= curRev { - s.unsynced.add(w) - } else { - slowWatcherGauge.Dec() - s.synced.add(w) - } - } - s.store.revMu.RUnlock() - s.mu.Unlock() - } - - if len(newVictim) > 0 { - s.mu.Lock() - s.victims = append(s.victims, newVictim) - s.mu.Unlock() - } - - return moved -} - -// syncWatchers syncs unsynced watchers by: -// 1. choose a set of watchers from the unsynced watcher group -// 2. iterate over the set to get the minimum revision and remove compacted watchers -// 3. use minimum revision to get all key-value pairs and send those events to watchers -// 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() int { - s.mu.Lock() - defer s.mu.Unlock() - - if s.unsynced.size() == 0 { - return 0 - } - - s.store.revMu.RLock() - defer s.store.revMu.RUnlock() - - // in order to find key-value pairs from unsynced watchers, we need to - // find min revision index, and these revisions can be used to - // query the backend store of key-value pairs - curRev := s.store.currentRev - compactionRev := s.store.compactMainRev - - wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) - minBytes, maxBytes := newRevBytes(), newRevBytes() - revToBytes(revision{main: minRev}, minBytes) - revToBytes(revision{main: curRev + 1}, maxBytes) - - // UnsafeRange returns keys and values. And in boltdb, keys are revisions. - // values are actual key-value pairs in backend. - tx := s.store.b.ReadTx() - tx.RLock() - revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0) - evs := kvsToEvents(s.store.lg, wg, revs, vs) - // Must unlock after kvsToEvents, because vs (come from boltdb memory) is not deep copy. - // We can only unlock after Unmarshal, which will do deep copy. - // Otherwise we will trigger SIGSEGV during boltdb re-mmap. - tx.RUnlock() - - victims := make(watcherBatch) - wb := newWatcherBatch(wg, evs) - for w := range wg.watchers { - w.minRev = curRev + 1 - - eb, ok := wb[w] - if !ok { - // bring un-notified watcher to synced - s.synced.add(w) - s.unsynced.delete(w) - continue - } - - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - w.victim = true - } - - if w.victim { - victims[w] = eb - } else { - if eb.moreRev != 0 { - // stay unsynced; more to read - continue - } - s.synced.add(w) - } - s.unsynced.delete(w) - } - s.addVictim(victims) - - vsz := 0 - for _, v := range s.victims { - vsz += len(v) - } - slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) - - return s.unsynced.size() -} - -// kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { - for i, v := range vals { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(v); err != nil { - lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) - } - - if !wg.contains(string(kv.Key)) { - continue - } - - ty := mvccpb.PUT - if isTombstone(revs[i]) { - ty = mvccpb.DELETE - // patch in mod revision so watchers won't skip - kv.ModRevision = bytesToRev(revs[i]).main - } - evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) - } - return evs -} - -// notify notifies the fact that given event at the given rev just happened to -// watchers that watch on the key of the event. -func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - victim := make(watcherBatch) - for w, eb := range newWatcherBatch(&s.synced, evs) { - if eb.revs != 1 { - s.store.lg.Panic( - "unexpected multiple revisions in watch notification", - zap.Int("number-of-revisions", eb.revs), - ) - } - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - // move slow watcher to victims - w.minRev = rev + 1 - w.victim = true - victim[w] = eb - s.synced.delete(w) - slowWatcherGauge.Inc() - } - } - s.addVictim(victim) -} - -func (s *watchableStore) addVictim(victim watcherBatch) { - if len(victim) == 0 { - return - } - s.victims = append(s.victims, victim) - select { - case s.victimc <- struct{}{}: - default: - } -} - -func (s *watchableStore) rev() int64 { return s.store.Rev() } - -func (s *watchableStore) progress(w *watcher) { - s.mu.RLock() - defer s.mu.RUnlock() - - if _, ok := s.synced.watchers[w]; ok { - w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) - // If the ch is full, this watcher is receiving events. - // We do not need to send progress at all. - } -} - -type watcher struct { - // the watcher key - key []byte - // end indicates the end of the range to watch. - // If end is set, the watcher is on a range. - end []byte - - // victim is set when ch is blocked and undergoing victim processing - victim bool - - // compacted is set when the watcher is removed because of compaction - compacted bool - - // restore is true when the watcher is being restored from leader snapshot - // which means that this watcher has just been moved from "synced" to "unsynced" - // watcher group, possibly with a future revision when it was first added - // to the synced watcher - // "unsynced" watcher revision must always be <= current revision, - // except when the watcher were to be moved from "synced" watcher group - restore bool - - // minRev is the minimum revision update the watcher will accept - minRev int64 - id WatchID - - fcs []FilterFunc - // a chan to send out the watch response. - // The chan might be shared with other watchers. - ch chan<- WatchResponse -} - -func (w *watcher) send(wr WatchResponse) bool { - progressEvent := len(wr.Events) == 0 - - if len(w.fcs) != 0 { - ne := make([]mvccpb.Event, 0, len(wr.Events)) - for i := range wr.Events { - filtered := false - for _, filter := range w.fcs { - if filter(wr.Events[i]) { - filtered = true - break - } - } - if !filtered { - ne = append(ne, wr.Events[i]) - } - } - wr.Events = ne - } - - // if all events are filtered out, we should send nothing. - if !progressEvent && len(wr.Events) == 0 { - return true - } - select { - case w.ch <- wr: - return true - default: - return false - } -} diff --git a/server/storage/mvcc/watchable_store_bench_test.go b/server/storage/mvcc/watchable_store_bench_test.go deleted file mode 100644 index 9329dce7635..00000000000 --- a/server/storage/mvcc/watchable_store_bench_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "math/rand" - "os" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func BenchmarkWatchableStorePut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // arbitrary number of bytes - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - vals := createBytesSlice(bytesN, b.N) - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - s.Put(keys[i], vals[i], lease.NoLease) - } -} - -// BenchmarkWatchableStoreTxnPut benchmarks the Put operation -// with transaction begin and end, where transaction involves -// some synchronization operations, such as mutex locking. -func BenchmarkWatchableStoreTxnPut(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - // arbitrary number of bytes - bytesN := 64 - keys := createBytesSlice(bytesN, b.N) - vals := createBytesSlice(bytesN, b.N) - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - txn := s.Write(traceutil.TODO()) - txn.Put(keys[i], vals[i], lease.NoLease) - txn.End() - } -} - -// BenchmarkWatchableStoreWatchPutSync benchmarks the case of -// many synced watchers receiving a Put notification. -func BenchmarkWatchableStoreWatchPutSync(b *testing.B) { - benchmarkWatchableStoreWatchPut(b, true) -} - -// BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of -// many unsynced watchers receiving a Put notification. -func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) { - benchmarkWatchableStoreWatchPut(b, false) -} - -func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, be, tmpPath) - - k := []byte("testkey") - v := []byte("testval") - - rev := int64(0) - if !synced { - // non-0 value to keep watchers in unsynced - rev = 1 - } - - w := s.NewWatchStream() - defer w.Close() - watchIDs := make([]WatchID, b.N) - for i := range watchIDs { - watchIDs[i], _ = w.Watch(0, k, nil, rev) - } - - b.ResetTimer() - b.ReportAllocs() - - // trigger watchers - s.Put(k, v, lease.NoLease) - for range watchIDs { - <-w.Chan() - } - select { - case wc := <-w.Chan(): - b.Fatalf("unexpected data %v", wc) - default: - } -} - -// BenchmarkWatchableStoreUnsyncedCancel benchmarks on cancel function -// performance for unsynced watchers in a WatchableStore. It creates -// k*N watchers to populate unsynced with a reasonably large number of -// watchers. And measures the time it takes to cancel N watchers out -// of k*N watchers. The performance is expected to differ depending on -// the unsynced member implementation. -// TODO: k is an arbitrary constant. We need to figure out what factor -// we should put to simulate the real-world use cases. -func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - - // manually create watchableStore instead of newWatchableStore - // because newWatchableStore periodically calls syncWatchersLoop - // method to sync watchers in unsynced map. We want to keep watchers - // in unsynced for this benchmark. - ws := &watchableStore{ - store: s, - unsynced: newWatcherGroup(), - - // to make the test not crash from assigning to nil map. - // 'synced' doesn't get populated in this test. - synced: newWatcherGroup(), - } - - defer func() { - ws.store.Close() - os.Remove(tmpPath) - }() - - // Put a key so that we can spawn watchers on that key - // (testKey in this test). This increases the rev to 1, - // and later we can we set the watcher's startRev to 1, - // and force watchers to be in unsynced. - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := ws.NewWatchStream() - - const k int = 2 - benchSampleN := b.N - watcherN := k * benchSampleN - - watchIDs := make([]WatchID, watcherN) - for i := 0; i < watcherN; i++ { - // non-0 value to keep watchers in unsynced - watchIDs[i], _ = w.Watch(0, testKey, nil, 1) - } - - // random-cancel N watchers to make it not biased towards - // data structures with an order, such as slice. - ix := rand.Perm(watcherN) - - b.ResetTimer() - b.ReportAllocs() - - // cancel N watchers - for _, idx := range ix[:benchSampleN] { - if err := w.Cancel(watchIDs[idx]); err != nil { - b.Error(err) - } - } -} - -func BenchmarkWatchableStoreSyncedCancel(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - - // Put a key so that we can spawn watchers on that key - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - - // put 1 million watchers on the same key - const watcherN = 1000000 - - watchIDs := make([]WatchID, watcherN) - for i := 0; i < watcherN; i++ { - // 0 for startRev to keep watchers in synced - watchIDs[i], _ = w.Watch(0, testKey, nil, 0) - } - - // randomly cancel watchers to make it not biased towards - // data structures with an order, such as slice. - ix := rand.Perm(watcherN) - - b.ResetTimer() - b.ReportAllocs() - - for _, idx := range ix { - if err := w.Cancel(watchIDs[idx]); err != nil { - b.Error(err) - } - } -} diff --git a/server/storage/mvcc/watchable_store_test.go b/server/storage/mvcc/watchable_store_test.go deleted file mode 100644 index a36c3ee1430..00000000000 --- a/server/storage/mvcc/watchable_store_test.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "fmt" - "os" - "reflect" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestWatch(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - b.Close() - s.Close() - os.Remove(tmpPath) - }() - - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - w.Watch(0, testKey, nil, 0) - - if !s.synced.contains(string(testKey)) { - // the key must have had an entry in synced - t.Errorf("existence = false, want true") - } -} - -func TestNewWatcherCancel(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - wt, _ := w.Watch(0, testKey, nil, 0) - - if err := w.Cancel(wt); err != nil { - t.Error(err) - } - - if s.synced.contains(string(testKey)) { - // the key shoud have been deleted - t.Errorf("existence = true, want false") - } -} - -// TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced. -func TestCancelUnsynced(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - - // manually create watchableStore instead of newWatchableStore - // because newWatchableStore automatically calls syncWatchers - // method to sync watchers in unsynced map. We want to keep watchers - // in unsynced to test if syncWatchers works as expected. - s := &watchableStore{ - store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}), - unsynced: newWatcherGroup(), - - // to make the test not crash from assigning to nil map. - // 'synced' doesn't get populated in this test. - synced: newWatcherGroup(), - } - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - - // Put a key so that we can spawn watchers on that key. - // (testKey in this test). This increases the rev to 1, - // and later we can we set the watcher's startRev to 1, - // and force watchers to be in unsynced. - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - - // arbitrary number for watchers - watcherN := 100 - - // create watcherN of watch ids to cancel - watchIDs := make([]WatchID, watcherN) - for i := 0; i < watcherN; i++ { - // use 1 to keep watchers in unsynced - watchIDs[i], _ = w.Watch(0, testKey, nil, 1) - } - - for _, idx := range watchIDs { - if err := w.Cancel(idx); err != nil { - t.Error(err) - } - } - - // After running CancelFunc - // - // unsynced should be empty - // because cancel removes watcher from unsynced - if size := s.unsynced.size(); size != 0 { - t.Errorf("unsynced size = %d, want 0", size) - } -} - -// TestSyncWatchers populates unsynced watcher map and tests syncWatchers -// method to see if it correctly sends events to channel of unsynced watchers -// and moves these watchers to synced. -func TestSyncWatchers(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - - s := &watchableStore{ - store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - } - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - - testKey := []byte("foo") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - - // arbitrary number for watchers - watcherN := 100 - - for i := 0; i < watcherN; i++ { - // specify rev as 1 to keep watchers in unsynced - w.Watch(0, testKey, nil, 1) - } - - // Before running s.syncWatchers() synced should be empty because we manually - // populate unsynced only - sws := s.synced.watcherSetByKey(string(testKey)) - uws := s.unsynced.watcherSetByKey(string(testKey)) - - if len(sws) != 0 { - t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws)) - } - // unsynced should not be empty because we manually populated unsynced only - if len(uws) != watcherN { - t.Errorf("unsynced size = %d, want %d", len(uws), watcherN) - } - - // this should move all unsynced watchers to synced ones - s.syncWatchers() - - sws = s.synced.watcherSetByKey(string(testKey)) - uws = s.unsynced.watcherSetByKey(string(testKey)) - - // After running s.syncWatchers(), synced should not be empty because syncwatchers - // populates synced in this test case - if len(sws) != watcherN { - t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN) - } - - // unsynced should be empty because syncwatchers is expected to move all watchers - // from unsynced to synced in this test case - if len(uws) != 0 { - t.Errorf("unsynced size = %d, want 0", len(uws)) - } - - for w := range sws { - if w.minRev != s.Rev()+1 { - t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1) - } - } - - if len(w.(*watchStream).ch) != watcherN { - t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN) - } - - evs := (<-w.(*watchStream).ch).Events - if len(evs) != 1 { - t.Errorf("len(evs) got = %d, want = 1", len(evs)) - } - if evs[0].Type != mvccpb.PUT { - t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT) - } - if !bytes.Equal(evs[0].Kv.Key, testKey) { - t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey) - } - if !bytes.Equal(evs[0].Kv.Value, testValue) { - t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue) - } -} - -// TestWatchCompacted tests a watcher that watches on a compacted revision. -func TestWatchCompacted(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - testKey := []byte("foo") - testValue := []byte("bar") - - maxRev := 10 - compactRev := int64(5) - for i := 0; i < maxRev; i++ { - s.Put(testKey, testValue, lease.NoLease) - } - _, err := s.Compact(traceutil.TODO(), compactRev) - if err != nil { - t.Fatalf("failed to compact kv (%v)", err) - } - - w := s.NewWatchStream() - wt, _ := w.Watch(0, testKey, nil, compactRev-1) - - select { - case resp := <-w.Chan(): - if resp.WatchID != wt { - t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt) - } - if resp.CompactRevision == 0 { - t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev) - } - case <-time.After(1 * time.Second): - t.Fatalf("failed to receive response (timeout)") - } -} - -func TestWatchFutureRev(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - b.Close() - s.Close() - os.Remove(tmpPath) - }() - - testKey := []byte("foo") - testValue := []byte("bar") - - w := s.NewWatchStream() - wrev := int64(10) - w.Watch(0, testKey, nil, wrev) - - for i := 0; i < 10; i++ { - rev := s.Put(testKey, testValue, lease.NoLease) - if rev >= wrev { - break - } - } - - select { - case resp := <-w.Chan(): - if resp.Revision != wrev { - t.Fatalf("rev = %d, want %d", resp.Revision, wrev) - } - if len(resp.Events) != 1 { - t.Fatalf("failed to get events from the response") - } - if resp.Events[0].Kv.ModRevision != wrev { - t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev) - } - case <-time.After(time.Second): - t.Fatal("failed to receive event in 1 second.") - } -} - -func TestWatchRestore(t *testing.T) { - test := func(delay time.Duration) func(t *testing.T) { - return func(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s, b, tmpPath) - - testKey := []byte("foo") - testValue := []byte("bar") - rev := s.Put(testKey, testValue, lease.NoLease) - - newBackend, newPath := betesting.NewDefaultTmpBackend(t) - newStore := newWatchableStore(zaptest.NewLogger(t), newBackend, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(newStore, newBackend, newPath) - - w := newStore.NewWatchStream() - w.Watch(0, testKey, nil, rev-1) - - time.Sleep(delay) - - newStore.Restore(b) - select { - case resp := <-w.Chan(): - if resp.Revision != rev { - t.Fatalf("rev = %d, want %d", resp.Revision, rev) - } - if len(resp.Events) != 1 { - t.Fatalf("failed to get events from the response") - } - if resp.Events[0].Kv.ModRevision != rev { - t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev) - } - case <-time.After(time.Second): - t.Fatal("failed to receive event in 1 second.") - } - } - } - - t.Run("Normal", test(0)) - t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration -} - -// TestWatchRestoreSyncedWatcher tests such a case that: -// 1. watcher is created with a future revision "math.MaxInt64 - 2" -// 2. watcher with a future revision is added to "synced" watcher group -// 3. restore/overwrite storage with snapshot of a higher lasat revision -// 4. restore operation moves "synced" to "unsynced" watcher group -// 5. choose the watcher from step 1, without panic -func TestWatchRestoreSyncedWatcher(t *testing.T) { - b1, b1Path := betesting.NewDefaultTmpBackend(t) - s1 := newWatchableStore(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s1, b1, b1Path) - - b2, b2Path := betesting.NewDefaultTmpBackend(t) - s2 := newWatchableStore(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{}) - defer cleanup(s2, b2, b2Path) - - testKey, testValue := []byte("foo"), []byte("bar") - rev := s1.Put(testKey, testValue, lease.NoLease) - startRev := rev + 2 - - // create a watcher with a future revision - // add to "synced" watcher group (startRev > s.store.currentRev) - w1 := s1.NewWatchStream() - w1.Watch(0, testKey, nil, startRev) - - // make "s2" ends up with a higher last revision - s2.Put(testKey, testValue, lease.NoLease) - s2.Put(testKey, testValue, lease.NoLease) - - // overwrite storage with higher revisions - if err := s1.Restore(b2); err != nil { - t.Fatal(err) - } - - // wait for next "syncWatchersLoop" iteration - // and the unsynced watcher should be chosen - time.Sleep(2 * time.Second) - - // trigger events for "startRev" - s1.Put(testKey, testValue, lease.NoLease) - - select { - case resp := <-w1.Chan(): - if resp.Revision != startRev { - t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision) - } - if len(resp.Events) != 1 { - t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events)) - } - if resp.Events[0].Kv.ModRevision != startRev { - t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision) - } - case <-time.After(time.Second): - t.Fatal("failed to receive event in 1 second") - } -} - -// TestWatchBatchUnsynced tests batching on unsynced watchers -func TestWatchBatchUnsynced(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - oldMaxRevs := watchBatchMaxRevs - defer func() { - watchBatchMaxRevs = oldMaxRevs - s.store.Close() - os.Remove(tmpPath) - }() - batches := 3 - watchBatchMaxRevs = 4 - - v := []byte("foo") - for i := 0; i < watchBatchMaxRevs*batches; i++ { - s.Put(v, v, lease.NoLease) - } - - w := s.NewWatchStream() - w.Watch(0, v, nil, 1) - for i := 0; i < batches; i++ { - if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs { - t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs) - } - } - - s.store.revMu.Lock() - defer s.store.revMu.Unlock() - if size := s.synced.size(); size != 1 { - t.Errorf("synced size = %d, want 1", size) - } -} - -func TestNewMapwatcherToEventMap(t *testing.T) { - k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2") - v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2") - - ws := []*watcher{{key: k0}, {key: k1}, {key: k2}} - - evs := []mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: k0, Value: v0}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: k1, Value: v1}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: k2, Value: v2}, - }, - } - - tests := []struct { - sync []*watcher - evs []mvccpb.Event - - wwe map[*watcher][]mvccpb.Event - }{ - // no watcher in sync, some events should return empty wwe - { - nil, - evs, - map[*watcher][]mvccpb.Event{}, - }, - - // one watcher in sync, one event that does not match the key of that - // watcher should return empty wwe - { - []*watcher{ws[2]}, - evs[:1], - map[*watcher][]mvccpb.Event{}, - }, - - // one watcher in sync, one event that matches the key of that - // watcher should return wwe with that matching watcher - { - []*watcher{ws[1]}, - evs[1:2], - map[*watcher][]mvccpb.Event{ - ws[1]: evs[1:2], - }, - }, - - // two watchers in sync that watches two different keys, one event - // that matches the key of only one of the watcher should return wwe - // with the matching watcher - { - []*watcher{ws[0], ws[2]}, - evs[2:], - map[*watcher][]mvccpb.Event{ - ws[2]: evs[2:], - }, - }, - - // two watchers in sync that watches the same key, two events that - // match the keys should return wwe with those two watchers - { - []*watcher{ws[0], ws[1]}, - evs[:2], - map[*watcher][]mvccpb.Event{ - ws[0]: evs[:1], - ws[1]: evs[1:2], - }, - }, - } - - for i, tt := range tests { - wg := newWatcherGroup() - for _, w := range tt.sync { - wg.add(w) - } - - gwe := newWatcherBatch(&wg, tt.evs) - if len(gwe) != len(tt.wwe) { - t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe)) - } - // compare gwe and tt.wwe - for w, eb := range gwe { - if len(eb.evs) != len(tt.wwe[w]) { - t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w])) - } - if !reflect.DeepEqual(eb.evs, tt.wwe[w]) { - t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false) - } - } - } -} - -// TestWatchVictims tests that watchable store delivers watch events -// when the watch channel is temporarily clogged with too many events. -func TestWatchVictims(t *testing.T) { - oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync - - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - b.Close() - s.Close() - os.Remove(tmpPath) - chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync - }() - - chanBufLen, maxWatchersPerSync = 1, 2 - numPuts := chanBufLen * 64 - testKey, testValue := []byte("foo"), []byte("bar") - - var wg sync.WaitGroup - numWatches := maxWatchersPerSync * 128 - errc := make(chan error, numWatches) - wg.Add(numWatches) - for i := 0; i < numWatches; i++ { - go func() { - w := s.NewWatchStream() - w.Watch(0, testKey, nil, 1) - defer func() { - w.Close() - wg.Done() - }() - tc := time.After(10 * time.Second) - evs, nextRev := 0, int64(2) - for evs < numPuts { - select { - case <-tc: - errc <- fmt.Errorf("time out") - return - case wr := <-w.Chan(): - evs += len(wr.Events) - for _, ev := range wr.Events { - if ev.Kv.ModRevision != nextRev { - errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision) - return - } - nextRev++ - } - time.Sleep(time.Millisecond) - } - } - if evs != numPuts { - errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs) - return - } - select { - case <-w.Chan(): - errc <- fmt.Errorf("unexpected response") - default: - } - }() - time.Sleep(time.Millisecond) - } - - var wgPut sync.WaitGroup - wgPut.Add(numPuts) - for i := 0; i < numPuts; i++ { - go func() { - defer wgPut.Done() - s.Put(testKey, testValue, lease.NoLease) - }() - } - wgPut.Wait() - - wg.Wait() - select { - case err := <-errc: - t.Fatal(err) - default: - } -} - -// TestStressWatchCancelClose tests closing a watch stream while -// canceling its watches. -func TestStressWatchCancelClose(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - b.Close() - s.Close() - os.Remove(tmpPath) - }() - - testKey, testValue := []byte("foo"), []byte("bar") - var wg sync.WaitGroup - readyc := make(chan struct{}) - wg.Add(100) - for i := 0; i < 100; i++ { - go func() { - defer wg.Done() - w := s.NewWatchStream() - ids := make([]WatchID, 10) - for i := range ids { - ids[i], _ = w.Watch(0, testKey, nil, 0) - } - <-readyc - wg.Add(1 + len(ids)/2) - for i := range ids[:len(ids)/2] { - go func(n int) { - defer wg.Done() - w.Cancel(ids[n]) - }(i) - } - go func() { - defer wg.Done() - w.Close() - }() - }() - } - - close(readyc) - for i := 0; i < 100; i++ { - s.Put(testKey, testValue, lease.NoLease) - } - - wg.Wait() -} diff --git a/server/storage/mvcc/watchable_store_txn.go b/server/storage/mvcc/watchable_store_txn.go deleted file mode 100644 index b70d8ceca47..00000000000 --- a/server/storage/mvcc/watchable_store_txn.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/traceutil" -) - -func (tw *watchableStoreTxnWrite) End() { - changes := tw.Changes() - if len(changes) == 0 { - tw.TxnWrite.End() - return - } - - rev := tw.Rev() + 1 - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - evs[i].Kv = &changes[i] - if change.CreateRevision == 0 { - evs[i].Type = mvccpb.DELETE - evs[i].Kv.ModRevision = rev - } else { - evs[i].Type = mvccpb.PUT - } - } - - // end write txn under watchable store lock so the updates are visible - // when asynchronous event posting checks the current store revision - tw.s.mu.Lock() - tw.s.notify(rev, evs) - tw.TxnWrite.End() - tw.s.mu.Unlock() -} - -type watchableStoreTxnWrite struct { - TxnWrite - s *watchableStore -} - -func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite { - return &watchableStoreTxnWrite{s.store.Write(trace), s} -} diff --git a/server/storage/mvcc/watcher.go b/server/storage/mvcc/watcher.go deleted file mode 100644 index 7d2490b1d6e..00000000000 --- a/server/storage/mvcc/watcher.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "sync" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" -) - -var ( - ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") - ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty") - ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream") -) - -type WatchID int64 - -// FilterFunc returns true if the given event should be filtered out. -type FilterFunc func(e mvccpb.Event) bool - -type WatchStream interface { - // Watch creates a watcher. The watcher watches the events happening or - // happened on the given key or range [key, end) from the given startRev. - // - // The whole event history can be watched unless compacted. - // If "startRev" <=0, watch observes events after currentRev. - // - // The returned "id" is the ID of this watcher. It appears as WatchID - // in events that are sent to the created watcher through stream channel. - // The watch ID is used when it's not equal to AutoWatchID. Otherwise, - // an auto-generated watch ID is returned. - Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) - - // Chan returns a chan. All watch response will be sent to the returned chan. - Chan() <-chan WatchResponse - - // RequestProgress requests the progress of the watcher with given ID. The response - // will only be sent if the watcher is currently synced. - // The responses will be sent through the WatchRespone Chan attached - // with this stream to ensure correct ordering. - // The responses contains no events. The revision in the response is the progress - // of the watchers since the watcher is currently synced. - RequestProgress(id WatchID) - - // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be - // returned. - Cancel(id WatchID) error - - // Close closes Chan and release all related resources. - Close() - - // Rev returns the current revision of the KV the stream watches on. - Rev() int64 -} - -type WatchResponse struct { - // WatchID is the WatchID of the watcher this response sent to. - WatchID WatchID - - // Events contains all the events that needs to send. - Events []mvccpb.Event - - // Revision is the revision of the KV when the watchResponse is created. - // For a normal response, the revision should be the same as the last - // modified revision inside Events. For a delayed response to a unsynced - // watcher, the revision is greater than the last modified revision - // inside Events. - Revision int64 - - // CompactRevision is set when the watcher is cancelled due to compaction. - CompactRevision int64 -} - -// watchStream contains a collection of watchers that share -// one streaming chan to send out watched events and other control events. -type watchStream struct { - watchable watchable - ch chan WatchResponse - - mu sync.Mutex // guards fields below it - // nextID is the ID pre-allocated for next new watcher in this stream - nextID WatchID - closed bool - cancels map[WatchID]cancelFunc - watchers map[WatchID]*watcher -} - -// Watch creates a new watcher in the stream and returns its WatchID. -func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) { - // prevent wrong range where key >= end lexicographically - // watch request with 'WithFromKey' has empty-byte range end - if len(end) != 0 && bytes.Compare(key, end) != -1 { - return -1, ErrEmptyWatcherRange - } - - ws.mu.Lock() - defer ws.mu.Unlock() - if ws.closed { - return -1, ErrEmptyWatcherRange - } - - if id == clientv3.AutoWatchID { - for ws.watchers[ws.nextID] != nil { - ws.nextID++ - } - id = ws.nextID - ws.nextID++ - } else if _, ok := ws.watchers[id]; ok { - return -1, ErrWatcherDuplicateID - } - - w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) - - ws.cancels[id] = c - ws.watchers[id] = w - return id, nil -} - -func (ws *watchStream) Chan() <-chan WatchResponse { - return ws.ch -} - -func (ws *watchStream) Cancel(id WatchID) error { - ws.mu.Lock() - cancel, ok := ws.cancels[id] - w := ws.watchers[id] - ok = ok && !ws.closed - ws.mu.Unlock() - - if !ok { - return ErrWatcherNotExist - } - cancel() - - ws.mu.Lock() - // The watch isn't removed until cancel so that if Close() is called, - // it will wait for the cancel. Otherwise, Close() could close the - // watch channel while the store is still posting events. - if ww := ws.watchers[id]; ww == w { - delete(ws.cancels, id) - delete(ws.watchers, id) - } - ws.mu.Unlock() - - return nil -} - -func (ws *watchStream) Close() { - ws.mu.Lock() - defer ws.mu.Unlock() - - for _, cancel := range ws.cancels { - cancel() - } - ws.closed = true - close(ws.ch) - watchStreamGauge.Dec() -} - -func (ws *watchStream) Rev() int64 { - ws.mu.Lock() - defer ws.mu.Unlock() - return ws.watchable.rev() -} - -func (ws *watchStream) RequestProgress(id WatchID) { - ws.mu.Lock() - w, ok := ws.watchers[id] - ws.mu.Unlock() - if !ok { - return - } - ws.watchable.progress(w) -} diff --git a/server/storage/mvcc/watcher_bench_test.go b/server/storage/mvcc/watcher_bench_test.go deleted file mode 100644 index 264369d75eb..00000000000 --- a/server/storage/mvcc/watcher_bench_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "fmt" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func BenchmarkKVWatcherMemoryUsage(b *testing.B) { - be, tmpPath := betesting.NewDefaultTmpBackend(b) - watchable := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{}) - - defer cleanup(watchable, be, tmpPath) - - w := watchable.NewWatchStream() - - b.ReportAllocs() - b.StartTimer() - for i := 0; i < b.N; i++ { - w.Watch(0, []byte(fmt.Sprint("foo", i)), nil, 0) - } -} diff --git a/server/storage/mvcc/watcher_group.go b/server/storage/mvcc/watcher_group.go deleted file mode 100644 index 356b49e6413..00000000000 --- a/server/storage/mvcc/watcher_group.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "fmt" - "math" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/pkg/v3/adt" -) - -var ( - // watchBatchMaxRevs is the maximum distinct revisions that - // may be sent to an unsynced watcher at a time. Declared as - // var instead of const for testing purposes. - watchBatchMaxRevs = 1000 -) - -type eventBatch struct { - // evs is a batch of revision-ordered events - evs []mvccpb.Event - // revs is the minimum unique revisions observed for this batch - revs int - // moreRev is first revision with more events following this batch - moreRev int64 -} - -func (eb *eventBatch) add(ev mvccpb.Event) { - if eb.revs > watchBatchMaxRevs { - // maxed out batch size - return - } - - if len(eb.evs) == 0 { - // base case - eb.revs = 1 - eb.evs = append(eb.evs, ev) - return - } - - // revision accounting - ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision - evRev := ev.Kv.ModRevision - if evRev > ebRev { - eb.revs++ - if eb.revs > watchBatchMaxRevs { - eb.moreRev = evRev - return - } - } - - eb.evs = append(eb.evs, ev) -} - -type watcherBatch map[*watcher]*eventBatch - -func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { - eb := wb[w] - if eb == nil { - eb = &eventBatch{} - wb[w] = eb - } - eb.add(ev) -} - -// newWatcherBatch maps watchers to their matched events. It enables quick -// events look up by watcher. -func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { - if len(wg.watchers) == 0 { - return nil - } - - wb := make(watcherBatch) - for _, ev := range evs { - for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { - if ev.Kv.ModRevision >= w.minRev { - // don't double notify - wb.add(w, ev) - } - } - } - return wb -} - -type watcherSet map[*watcher]struct{} - -func (w watcherSet) add(wa *watcher) { - if _, ok := w[wa]; ok { - panic("add watcher twice!") - } - w[wa] = struct{}{} -} - -func (w watcherSet) union(ws watcherSet) { - for wa := range ws { - w.add(wa) - } -} - -func (w watcherSet) delete(wa *watcher) { - if _, ok := w[wa]; !ok { - panic("removing missing watcher!") - } - delete(w, wa) -} - -type watcherSetByKey map[string]watcherSet - -func (w watcherSetByKey) add(wa *watcher) { - set := w[string(wa.key)] - if set == nil { - set = make(watcherSet) - w[string(wa.key)] = set - } - set.add(wa) -} - -func (w watcherSetByKey) delete(wa *watcher) bool { - k := string(wa.key) - if v, ok := w[k]; ok { - if _, ok := v[wa]; ok { - delete(v, wa) - if len(v) == 0 { - // remove the set; nothing left - delete(w, k) - } - return true - } - } - return false -} - -// watcherGroup is a collection of watchers organized by their ranges -type watcherGroup struct { - // keyWatchers has the watchers that watch on a single key - keyWatchers watcherSetByKey - // ranges has the watchers that watch a range; it is sorted by interval - ranges adt.IntervalTree - // watchers is the set of all watchers - watchers watcherSet -} - -func newWatcherGroup() watcherGroup { - return watcherGroup{ - keyWatchers: make(watcherSetByKey), - ranges: adt.NewIntervalTree(), - watchers: make(watcherSet), - } -} - -// add puts a watcher in the group. -func (wg *watcherGroup) add(wa *watcher) { - wg.watchers.add(wa) - if wa.end == nil { - wg.keyWatchers.add(wa) - return - } - - // interval already registered? - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - if iv := wg.ranges.Find(ivl); iv != nil { - iv.Val.(watcherSet).add(wa) - return - } - - // not registered, put in interval tree - ws := make(watcherSet) - ws.add(wa) - wg.ranges.Insert(ivl, ws) -} - -// contains is whether the given key has a watcher in the group. -func (wg *watcherGroup) contains(key string) bool { - _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) -} - -// size gives the number of unique watchers in the group. -func (wg *watcherGroup) size() int { return len(wg.watchers) } - -// delete removes a watcher from the group. -func (wg *watcherGroup) delete(wa *watcher) bool { - if _, ok := wg.watchers[wa]; !ok { - return false - } - wg.watchers.delete(wa) - if wa.end == nil { - wg.keyWatchers.delete(wa) - return true - } - - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - iv := wg.ranges.Find(ivl) - if iv == nil { - return false - } - - ws := iv.Val.(watcherSet) - delete(ws, wa) - if len(ws) == 0 { - // remove interval missing watchers - if ok := wg.ranges.Delete(ivl); !ok { - panic("could not remove watcher from interval tree") - } - } - - return true -} - -// choose selects watchers from the watcher group to update -func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { - if len(wg.watchers) < maxWatchers { - return wg, wg.chooseAll(curRev, compactRev) - } - ret := newWatcherGroup() - for w := range wg.watchers { - if maxWatchers <= 0 { - break - } - maxWatchers-- - ret.add(w) - } - return &ret, ret.chooseAll(curRev, compactRev) -} - -func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { - minRev := int64(math.MaxInt64) - for w := range wg.watchers { - if w.minRev > curRev { - // after network partition, possibly choosing future revision watcher from restore operation - // with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2" - // do not panic when such watcher had been moved from "synced" watcher during restore operation - if !w.restore { - panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev)) - } - - // mark 'restore' done, since it's chosen - w.restore = false - } - if w.minRev < compactRev { - select { - case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: - w.compacted = true - wg.delete(w) - default: - // retry next time - } - continue - } - if minRev > w.minRev { - minRev = w.minRev - } - } - return minRev -} - -// watcherSetByKey gets the set of watchers that receive events on the given key. -func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { - wkeys := wg.keyWatchers[key] - wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) - - // zero-copy cases - switch { - case len(wranges) == 0: - // no need to merge ranges or copy; reuse single-key set - return wkeys - case len(wranges) == 0 && len(wkeys) == 0: - return nil - case len(wranges) == 1 && len(wkeys) == 0: - return wranges[0].Val.(watcherSet) - } - - // copy case - ret := make(watcherSet) - ret.union(wg.keyWatchers[key]) - for _, item := range wranges { - ret.union(item.Val.(watcherSet)) - } - return ret -} diff --git a/server/storage/mvcc/watcher_test.go b/server/storage/mvcc/watcher_test.go deleted file mode 100644 index cbe39402224..00000000000 --- a/server/storage/mvcc/watcher_test.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "fmt" - "os" - "reflect" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/server/v3/lease" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -// TestWatcherWatchID tests that each watcher provides unique watchID, -// and the watched event attaches the correct watchID. -func TestWatcherWatchID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - idm := make(map[WatchID]struct{}) - - for i := 0; i < 10; i++ { - id, _ := w.Watch(0, []byte("foo"), nil, 0) - if _, ok := idm[id]; ok { - t.Errorf("#%d: id %d exists", i, id) - } - idm[id] = struct{}{} - - s.Put([]byte("foo"), []byte("bar"), lease.NoLease) - - resp := <-w.Chan() - if resp.WatchID != id { - t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id) - } - - if err := w.Cancel(id); err != nil { - t.Error(err) - } - } - - s.Put([]byte("foo2"), []byte("bar"), lease.NoLease) - - // unsynced watchers - for i := 10; i < 20; i++ { - id, _ := w.Watch(0, []byte("foo2"), nil, 1) - if _, ok := idm[id]; ok { - t.Errorf("#%d: id %d exists", i, id) - } - idm[id] = struct{}{} - - resp := <-w.Chan() - if resp.WatchID != id { - t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id) - } - - if err := w.Cancel(id); err != nil { - t.Error(err) - } - } -} - -func TestWatcherRequestsCustomID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - // - Request specifically ID #1 - // - Try to duplicate it, get an error - // - Make sure the auto-assignment skips over things we manually assigned - - tt := []struct { - givenID WatchID - expectedID WatchID - expectedErr error - }{ - {1, 1, nil}, - {1, 0, ErrWatcherDuplicateID}, - {0, 0, nil}, - {0, 2, nil}, - } - - for i, tcase := range tt { - id, err := w.Watch(tcase.givenID, []byte("foo"), nil, 0) - if tcase.expectedErr != nil || err != nil { - if err != tcase.expectedErr { - t.Errorf("expected get error %q in test case %q, got %q", tcase.expectedErr, i, err) - } - } else if tcase.expectedID != id { - t.Errorf("expected to create ID %d, got %d in test case %d", tcase.expectedID, id, i) - } - } -} - -// TestWatcherWatchPrefix tests if Watch operation correctly watches -// and returns events with matching prefixes. -func TestWatcherWatchPrefix(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - idm := make(map[WatchID]struct{}) - - val := []byte("bar") - keyWatch, keyEnd, keyPut := []byte("foo"), []byte("fop"), []byte("foobar") - - for i := 0; i < 10; i++ { - id, _ := w.Watch(0, keyWatch, keyEnd, 0) - if _, ok := idm[id]; ok { - t.Errorf("#%d: unexpected duplicated id %x", i, id) - } - idm[id] = struct{}{} - - s.Put(keyPut, val, lease.NoLease) - - resp := <-w.Chan() - if resp.WatchID != id { - t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id) - } - - if err := w.Cancel(id); err != nil { - t.Errorf("#%d: unexpected cancel error %v", i, err) - } - - if len(resp.Events) != 1 { - t.Errorf("#%d: len(resp.Events) got = %d, want = 1", i, len(resp.Events)) - } - if len(resp.Events) == 1 { - if !bytes.Equal(resp.Events[0].Kv.Key, keyPut) { - t.Errorf("#%d: resp.Events got = %s, want = %s", i, resp.Events[0].Kv.Key, keyPut) - } - } - } - - keyWatch1, keyEnd1, keyPut1 := []byte("foo1"), []byte("foo2"), []byte("foo1bar") - s.Put(keyPut1, val, lease.NoLease) - - // unsynced watchers - for i := 10; i < 15; i++ { - id, _ := w.Watch(0, keyWatch1, keyEnd1, 1) - if _, ok := idm[id]; ok { - t.Errorf("#%d: id %d exists", i, id) - } - idm[id] = struct{}{} - - resp := <-w.Chan() - if resp.WatchID != id { - t.Errorf("#%d: watch id in event = %d, want %d", i, resp.WatchID, id) - } - - if err := w.Cancel(id); err != nil { - t.Error(err) - } - - if len(resp.Events) != 1 { - t.Errorf("#%d: len(resp.Events) got = %d, want = 1", i, len(resp.Events)) - } - if len(resp.Events) == 1 { - if !bytes.Equal(resp.Events[0].Kv.Key, keyPut1) { - t.Errorf("#%d: resp.Events got = %s, want = %s", i, resp.Events[0].Kv.Key, keyPut1) - } - } - } -} - -// TestWatcherWatchWrongRange ensures that watcher with wrong 'end' range -// does not create watcher, which panics when canceling in range tree. -func TestWatcherWatchWrongRange(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - if _, err := w.Watch(0, []byte("foa"), []byte("foa"), 1); err != ErrEmptyWatcherRange { - t.Fatalf("key == end range given; expected ErrEmptyWatcherRange, got %+v", err) - } - if _, err := w.Watch(0, []byte("fob"), []byte("foa"), 1); err != ErrEmptyWatcherRange { - t.Fatalf("key > end range given; expected ErrEmptyWatcherRange, got %+v", err) - } - // watch request with 'WithFromKey' has empty-byte range end - if id, _ := w.Watch(0, []byte("foo"), []byte{}, 1); id != 0 { - t.Fatalf("\x00 is range given; id expected 0, got %d", id) - } -} - -func TestWatchDeleteRange(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}) - - defer func() { - b.Close() - s.Close() - os.Remove(tmpPath) - }() - - testKeyPrefix := []byte("foo") - - for i := 0; i < 3; i++ { - s.Put([]byte(fmt.Sprintf("%s_%d", testKeyPrefix, i)), []byte("bar"), lease.NoLease) - } - - w := s.NewWatchStream() - from, to := testKeyPrefix, []byte(fmt.Sprintf("%s_%d", testKeyPrefix, 99)) - w.Watch(0, from, to, 0) - - s.DeleteRange(from, to) - - we := []mvccpb.Event{ - {Type: mvccpb.DELETE, Kv: &mvccpb.KeyValue{Key: []byte("foo_0"), ModRevision: 5}}, - {Type: mvccpb.DELETE, Kv: &mvccpb.KeyValue{Key: []byte("foo_1"), ModRevision: 5}}, - {Type: mvccpb.DELETE, Kv: &mvccpb.KeyValue{Key: []byte("foo_2"), ModRevision: 5}}, - } - - select { - case r := <-w.Chan(): - if !reflect.DeepEqual(r.Events, we) { - t.Errorf("event = %v, want %v", r.Events, we) - } - case <-time.After(10 * time.Second): - t.Fatal("failed to receive event after 10 seconds!") - } -} - -// TestWatchStreamCancelWatcherByID ensures cancel calls the cancel func of the watcher -// with given id inside watchStream. -func TestWatchStreamCancelWatcherByID(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - id, _ := w.Watch(0, []byte("foo"), nil, 0) - - tests := []struct { - cancelID WatchID - werr error - }{ - // no error should be returned when cancel the created watcher. - {id, nil}, - // not exist error should be returned when cancel again. - {id, ErrWatcherNotExist}, - // not exist error should be returned when cancel a bad id. - {id + 1, ErrWatcherNotExist}, - } - - for i, tt := range tests { - gerr := w.Cancel(tt.cancelID) - - if gerr != tt.werr { - t.Errorf("#%d: err = %v, want %v", i, gerr, tt.werr) - } - } - - if l := len(w.(*watchStream).cancels); l != 0 { - t.Errorf("cancels = %d, want 0", l) - } -} - -// TestWatcherRequestProgress ensures synced watcher can correctly -// report its correct progress. -func TestWatcherRequestProgress(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - - // manually create watchableStore instead of newWatchableStore - // because newWatchableStore automatically calls syncWatchers - // method to sync watchers in unsynced map. We want to keep watchers - // in unsynced to test if syncWatchers works as expected. - s := &watchableStore{ - store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - } - - defer func() { - s.store.Close() - os.Remove(tmpPath) - }() - - testKey := []byte("foo") - notTestKey := []byte("bad") - testValue := []byte("bar") - s.Put(testKey, testValue, lease.NoLease) - - w := s.NewWatchStream() - - badID := WatchID(1000) - w.RequestProgress(badID) - select { - case resp := <-w.Chan(): - t.Fatalf("unexpected %+v", resp) - default: - } - - id, _ := w.Watch(0, notTestKey, nil, 1) - w.RequestProgress(id) - select { - case resp := <-w.Chan(): - t.Fatalf("unexpected %+v", resp) - default: - } - - s.syncWatchers() - - w.RequestProgress(id) - wrs := WatchResponse{WatchID: id, Revision: 2} - select { - case resp := <-w.Chan(): - if !reflect.DeepEqual(resp, wrs) { - t.Fatalf("got %+v, expect %+v", resp, wrs) - } - case <-time.After(time.Second): - t.Fatal("failed to receive progress") - } -} - -func TestWatcherWatchWithFilter(t *testing.T) { - b, tmpPath := betesting.NewDefaultTmpBackend(t) - s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})) - defer cleanup(s, b, tmpPath) - - w := s.NewWatchStream() - defer w.Close() - - filterPut := func(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT - } - - w.Watch(0, []byte("foo"), nil, 0, filterPut) - done := make(chan struct{}, 1) - - go func() { - <-w.Chan() - done <- struct{}{} - }() - - s.Put([]byte("foo"), []byte("bar"), 0) - - select { - case <-done: - t.Fatal("failed to filter put request") - case <-time.After(100 * time.Millisecond): - } - - s.DeleteRange([]byte("foo"), nil) - - select { - case <-done: - case <-time.After(100 * time.Millisecond): - t.Fatal("failed to receive delete request") - } -} diff --git a/server/storage/quota.go b/server/storage/quota.go deleted file mode 100644 index f24ca987cb4..00000000000 --- a/server/storage/quota.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "sync" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/storage/backend" - - humanize "github.com/dustin/go-humanize" - "go.uber.org/zap" -) - -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB -) - -// Quota represents an arbitrary quota against arbitrary requests. Each request -// costs some charge; if there is not enough remaining charge, then there are -// too few resources available within the quota to apply the request. -type Quota interface { - // Available judges whether the given request fits within the quota. - Available(req interface{}) bool - // Cost computes the charge against the quota for a given request. - Cost(req interface{}) int - // Remaining is the amount of charge left for the quota. - Remaining() int64 -} - -type passthroughQuota struct{} - -func (*passthroughQuota) Available(interface{}) bool { return true } -func (*passthroughQuota) Cost(interface{}) int { return 0 } -func (*passthroughQuota) Remaining() int64 { return 1 } - -type BackendQuota struct { - be backend.Backend - maxBackendBytes int64 -} - -const ( - // leaseOverhead is an estimate for the cost of storing a lease - leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's Metadata - kvOverhead = 256 -) - -var ( - // only log once - quotaLogOnce sync.Once - - DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes)) - maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes)) -) - -// NewBackendQuota creates a quota layer with the given storage limit. -func NewBackendQuota(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, name string) Quota { - quotaBackendBytes.Set(float64(quotaBackendBytesCfg)) - if quotaBackendBytesCfg < 0 { - // disable quotas if negative - quotaLogOnce.Do(func() { - lg.Info( - "disabled backend quota", - zap.String("quota-name", name), - zap.Int64("quota-size-bytes", quotaBackendBytesCfg), - ) - }) - return &passthroughQuota{} - } - - if quotaBackendBytesCfg == 0 { - // use default size if no quota size given - quotaLogOnce.Do(func() { - if lg != nil { - lg.Info( - "enabled backend quota with default value", - zap.String("quota-name", name), - zap.Int64("quota-size-bytes", DefaultQuotaBytes), - zap.String("quota-size", DefaultQuotaSize), - ) - } - }) - quotaBackendBytes.Set(float64(DefaultQuotaBytes)) - return &BackendQuota{be, DefaultQuotaBytes} - } - - quotaLogOnce.Do(func() { - if quotaBackendBytesCfg > MaxQuotaBytes { - lg.Warn( - "quota exceeds the maximum value", - zap.String("quota-name", name), - zap.Int64("quota-size-bytes", quotaBackendBytesCfg), - zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), - zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), - zap.String("quota-maximum-size", maxQuotaSize), - ) - } - lg.Info( - "enabled backend quota", - zap.String("quota-name", name), - zap.Int64("quota-size-bytes", quotaBackendBytesCfg), - zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), - ) - }) - return &BackendQuota{be, quotaBackendBytesCfg} -} - -func (b *BackendQuota) Available(v interface{}) bool { - cost := b.Cost(v) - // if there are no mutating requests, it's safe to pass through - if cost == 0 { - return true - } - // TODO: maybe optimize Backend.Size() - return b.be.Size()+int64(cost) < b.maxBackendBytes -} - -func (b *BackendQuota) Cost(v interface{}) int { - switch r := v.(type) { - case *pb.PutRequest: - return costPut(r) - case *pb.TxnRequest: - return costTxn(r) - case *pb.LeaseGrantRequest: - return leaseOverhead - default: - panic("unexpected cost") - } -} - -func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } - -func costTxnReq(u *pb.RequestOp) int { - r := u.GetRequestPut() - if r == nil { - return 0 - } - return costPut(r) -} - -func costTxn(r *pb.TxnRequest) int { - sizeSuccess := 0 - for _, u := range r.Success { - sizeSuccess += costTxnReq(u) - } - sizeFailure := 0 - for _, u := range r.Failure { - sizeFailure += costTxnReq(u) - } - if sizeFailure > sizeSuccess { - return sizeFailure - } - return sizeSuccess -} - -func (b *BackendQuota) Remaining() int64 { - return b.maxBackendBytes - b.be.Size() -} diff --git a/server/storage/schema/actions.go b/server/storage/schema/actions.go deleted file mode 100644 index fb161560502..00000000000 --- a/server/storage/schema/actions.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/storage/backend" -) - -type action interface { - // unsafeDo executes the action and returns revert action, when executed - // should restore the state from before. - unsafeDo(tx backend.BatchTx) (revert action, err error) -} - -type setKeyAction struct { - Bucket backend.Bucket - FieldName []byte - FieldValue []byte -} - -func (a setKeyAction) unsafeDo(tx backend.BatchTx) (action, error) { - revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) - tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue) - return revert, nil -} - -type deleteKeyAction struct { - Bucket backend.Bucket - FieldName []byte -} - -func (a deleteKeyAction) unsafeDo(tx backend.BatchTx) (action, error) { - revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) - tx.UnsafeDelete(a.Bucket, a.FieldName) - return revert, nil -} - -func restoreFieldValueAction(tx backend.BatchTx, bucket backend.Bucket, fieldName []byte) action { - _, vs := tx.UnsafeRange(bucket, fieldName, nil, 1) - if len(vs) == 1 { - return &setKeyAction{ - Bucket: bucket, - FieldName: fieldName, - FieldValue: vs[0], - } - } - return &deleteKeyAction{ - Bucket: bucket, - FieldName: fieldName, - } -} - -type ActionList []action - -// unsafeExecute executes actions one by one. If one of actions returns error, -// it will revert them. -func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.BatchTx) error { - var revertActions = make(ActionList, 0, len(as)) - for _, a := range as { - revert, err := a.unsafeDo(tx) - - if err != nil { - revertActions.unsafeExecuteInReversedOrder(lg, tx) - return err - } - revertActions = append(revertActions, revert) - } - return nil -} - -// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on -// action error. Should be used when reverting. -func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.BatchTx) { - for j := len(as) - 1; j >= 0; j-- { - _, err := as[j].unsafeDo(tx) - if err != nil { - lg.Panic("Cannot recover from revert error", zap.Error(err)) - } - } -} diff --git a/server/storage/schema/actions_test.go b/server/storage/schema/actions_test.go deleted file mode 100644 index 5a3ef28fd97..00000000000 --- a/server/storage/schema/actions_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestActionIsReversible(t *testing.T) { - tcs := []struct { - name string - action action - state map[string]string - }{ - { - name: "setKeyAction empty state", - action: setKeyAction{ - Bucket: Meta, - FieldName: []byte("/test"), - FieldValue: []byte("1"), - }, - }, - { - name: "setKeyAction with key", - action: setKeyAction{ - Bucket: Meta, - FieldName: []byte("/test"), - FieldValue: []byte("1"), - }, - state: map[string]string{"/test": "2"}, - }, - { - name: "deleteKeyAction empty state", - action: deleteKeyAction{ - Bucket: Meta, - FieldName: []byte("/test"), - }, - }, - { - name: "deleteKeyAction with key", - action: deleteKeyAction{ - Bucket: Meta, - FieldName: []byte("/test"), - }, - state: map[string]string{"/test": "2"}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10) - defer be.Close() - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - defer tx.Unlock() - UnsafeCreateMetaBucket(tx) - putKeyValues(tx, Meta, tc.state) - - assertBucketState(t, tx, Meta, tc.state) - reverse, err := tc.action.unsafeDo(tx) - if err != nil { - t.Errorf("Failed to upgrade, err: %v", err) - } - _, err = reverse.unsafeDo(tx) - if err != nil { - t.Errorf("Failed to downgrade, err: %v", err) - } - assertBucketState(t, tx, Meta, tc.state) - }) - } -} - -func TestActionListRevert(t *testing.T) { - tcs := []struct { - name string - - actions ActionList - expectState map[string]string - expectError error - }{ - { - name: "Apply multiple actions", - actions: ActionList{ - setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")}, - setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")}, - }, - expectState: map[string]string{"/testKey1": "testValue1", "/testKey2": "testValue2"}, - }, - { - name: "Broken action should result in changes reverted", - actions: ActionList{ - setKeyAction{Meta, []byte("/testKey1"), []byte("testValue1")}, - brokenAction{}, - setKeyAction{Meta, []byte("/testKey2"), []byte("testValue2")}, - }, - expectState: map[string]string{}, - expectError: errBrokenAction, - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - - be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10) - defer be.Close() - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - defer tx.Unlock() - - UnsafeCreateMetaBucket(tx) - err := tc.actions.unsafeExecute(lg, tx) - if err != tc.expectError { - t.Errorf("Unexpected error or lack thereof, expected: %v, got: %v", tc.expectError, err) - } - assertBucketState(t, tx, Meta, tc.expectState) - }) - } -} - -type brokenAction struct{} - -var errBrokenAction = fmt.Errorf("broken action error") - -func (c brokenAction) unsafeDo(tx backend.BatchTx) (action, error) { - return nil, errBrokenAction -} - -func putKeyValues(tx backend.BatchTx, bucket backend.Bucket, kvs map[string]string) { - for k, v := range kvs { - tx.UnsafePut(bucket, []byte(k), []byte(v)) - } -} - -func assertBucketState(t *testing.T, tx backend.BatchTx, bucket backend.Bucket, expect map[string]string) { - t.Helper() - got := map[string]string{} - ks, vs := tx.UnsafeRange(bucket, []byte("\x00"), []byte("\xff"), 0) - for i := 0; i < len(ks); i++ { - got[string(ks[i])] = string(vs[i]) - } - if expect == nil { - expect = map[string]string{} - } - assert.Equal(t, expect, got) -} diff --git a/server/storage/schema/alarm.go b/server/storage/schema/alarm.go deleted file mode 100644 index f1d80b27a6b..00000000000 --- a/server/storage/schema/alarm.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -type alarmBackend struct { - lg *zap.Logger - be backend.Backend -} - -func NewAlarmBackend(lg *zap.Logger, be backend.Backend) *alarmBackend { - return &alarmBackend{ - lg: lg, - be: be, - } -} - -func (s *alarmBackend) CreateAlarmBucket() { - tx := s.be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeCreateBucket(Alarm) -} - -func (s *alarmBackend) MustPutAlarm(alarm *etcdserverpb.AlarmMember) { - tx := s.be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - s.mustUnsafePutAlarm(tx, alarm) -} - -func (s *alarmBackend) mustUnsafePutAlarm(tx backend.BatchTx, alarm *etcdserverpb.AlarmMember) { - v, err := alarm.Marshal() - if err != nil { - s.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - tx.UnsafePut(Alarm, v, nil) -} - -func (s *alarmBackend) MustDeleteAlarm(alarm *etcdserverpb.AlarmMember) { - tx := s.be.BatchTx() - tx.LockInsideApply() - defer tx.Unlock() - s.mustUnsafeDeleteAlarm(tx, alarm) -} - -func (s *alarmBackend) mustUnsafeDeleteAlarm(tx backend.BatchTx, alarm *etcdserverpb.AlarmMember) { - v, err := alarm.Marshal() - if err != nil { - s.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - tx.UnsafeDelete(Alarm, v) -} - -func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) { - tx := s.be.ReadTx() - tx.Lock() - defer tx.Unlock() - return s.unsafeGetAllAlarms(tx) -} - -func (s *alarmBackend) unsafeGetAllAlarms(tx backend.ReadTx) ([]*etcdserverpb.AlarmMember, error) { - var ms []*etcdserverpb.AlarmMember - err := tx.UnsafeForEach(Alarm, func(k, v []byte) error { - var m etcdserverpb.AlarmMember - if err := m.Unmarshal(k); err != nil { - return err - } - ms = append(ms, &m) - return nil - }) - return ms, err -} - -func (s alarmBackend) ForceCommit() { - s.be.ForceCommit() -} diff --git a/server/storage/schema/auth.go b/server/storage/schema/auth.go deleted file mode 100644 index 2375f066654..00000000000 --- a/server/storage/schema/auth.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "bytes" - "encoding/binary" - - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -const ( - revBytesLen = 8 -) - -var ( - authEnabled = []byte{1} - authDisabled = []byte{0} -) - -type authBackend struct { - be backend.Backend - lg *zap.Logger -} - -var _ auth.AuthBackend = (*authBackend)(nil) - -func NewAuthBackend(lg *zap.Logger, be backend.Backend) *authBackend { - return &authBackend{ - be: be, - lg: lg, - } -} - -func (abe *authBackend) CreateAuthBuckets() { - tx := abe.be.BatchTx() - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeCreateBucket(Auth) - tx.UnsafeCreateBucket(AuthUsers) - tx.UnsafeCreateBucket(AuthRoles) -} - -func (abe *authBackend) ForceCommit() { - abe.be.ForceCommit() -} - -func (abe *authBackend) ReadTx() auth.AuthReadTx { - return &authReadTx{tx: abe.be.ReadTx(), lg: abe.lg} -} - -func (abe *authBackend) BatchTx() auth.AuthBatchTx { - return &authBatchTx{tx: abe.be.BatchTx(), lg: abe.lg} -} - -type authReadTx struct { - tx backend.ReadTx - lg *zap.Logger -} - -type authBatchTx struct { - tx backend.BatchTx - lg *zap.Logger -} - -var _ auth.AuthReadTx = (*authReadTx)(nil) -var _ auth.AuthBatchTx = (*authBatchTx)(nil) - -func (atx *authBatchTx) UnsafeSaveAuthEnabled(enabled bool) { - if enabled { - atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authEnabled) - } else { - atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authDisabled) - } -} - -func (atx *authBatchTx) UnsafeSaveAuthRevision(rev uint64) { - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, rev) - atx.tx.UnsafePut(Auth, AuthRevisionKeyName, revBytes) -} - -func (atx *authBatchTx) UnsafeReadAuthEnabled() bool { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeReadAuthEnabled() -} - -func (atx *authBatchTx) UnsafeReadAuthRevision() uint64 { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeReadAuthRevision() -} - -func (atx *authBatchTx) Lock() { - atx.tx.LockInsideApply() -} - -func (atx *authBatchTx) Unlock() { - atx.tx.Unlock() - // Calling Commit() for defensive purpose. If the number of pending writes doesn't exceed batchLimit, - // ReadTx can miss some writes issued by its predecessor BatchTx. - atx.tx.Commit() -} - -func (atx *authReadTx) UnsafeReadAuthEnabled() bool { - _, vs := atx.tx.UnsafeRange(Auth, AuthEnabledKeyName, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - return true - } - } - return false -} - -func (atx *authReadTx) UnsafeReadAuthRevision() uint64 { - _, vs := atx.tx.UnsafeRange(Auth, AuthRevisionKeyName, nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - return binary.BigEndian.Uint64(vs[0]) -} - -func (atx *authReadTx) Lock() { - atx.tx.RLock() -} - -func (atx *authReadTx) Unlock() { - atx.tx.RUnlock() -} diff --git a/server/storage/schema/auth_roles.go b/server/storage/schema/auth_roles.go deleted file mode 100644 index e0fea6d4f3f..00000000000 --- a/server/storage/schema/auth_roles.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -func UnsafeCreateAuthRolesBucket(tx backend.BatchTx) { - tx.UnsafeCreateBucket(AuthRoles) -} - -func (abe *authBackend) GetRole(roleName string) *authpb.Role { - tx := abe.BatchTx() - tx.Lock() - defer tx.Unlock() - return tx.UnsafeGetRole(roleName) -} - -func (atx *authBatchTx) UnsafeGetRole(roleName string) *authpb.Role { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeGetRole(roleName) -} - -func (abe *authBackend) GetAllRoles() []*authpb.Role { - tx := abe.BatchTx() - tx.Lock() - defer tx.Unlock() - return tx.UnsafeGetAllRoles() -} - -func (atx *authBatchTx) UnsafeGetAllRoles() []*authpb.Role { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeGetAllRoles() -} - -func (atx *authBatchTx) UnsafePutRole(role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - atx.lg.Panic( - "failed to marshal 'authpb.Role'", - zap.String("role-name", string(role.Name)), - zap.Error(err), - ) - } - - atx.tx.UnsafePut(AuthRoles, role.Name, b) -} - -func (atx *authBatchTx) UnsafeDeleteRole(rolename string) { - atx.tx.UnsafeDelete(AuthRoles, []byte(rolename)) -} - -func (atx *authReadTx) UnsafeGetRole(roleName string) *authpb.Role { - _, vs := atx.tx.UnsafeRange(AuthRoles, []byte(roleName), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - atx.lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - return role -} - -func (atx *authReadTx) UnsafeGetAllRoles() []*authpb.Role { - _, vs := atx.tx.UnsafeRange(AuthRoles, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - roles := make([]*authpb.Role, len(vs)) - for i := range vs { - role := &authpb.Role{} - err := role.Unmarshal(vs[i]) - if err != nil { - atx.lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - roles[i] = role - } - return roles -} diff --git a/server/storage/schema/auth_roles_test.go b/server/storage/schema/auth_roles_test.go deleted file mode 100644 index 31c3ff60500..00000000000 --- a/server/storage/schema/auth_roles_test.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestGetAllRoles(t *testing.T) { - tcs := []struct { - name string - setup func(tx auth.AuthBatchTx) - want []*authpb.Role - }{ - { - name: "Empty by default", - setup: func(tx auth.AuthBatchTx) {}, - want: nil, - }, - { - name: "Returns data put before", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("readKey"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - Key: []byte("key"), - RangeEnd: []byte("end"), - }, - }, - }) - }, - want: []*authpb.Role{ - { - Name: []byte("readKey"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - Key: []byte("key"), - RangeEnd: []byte("end"), - }, - }, - }, - }, - }, - { - name: "Skips deleted", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - }) - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role2"), - }) - tx.UnsafeDeleteRole("role1") - }, - want: []*authpb.Role{{Name: []byte("role2")}}, - }, - { - name: "Returns data overriden by put", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - }, - }, - }) - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role2"), - }) - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READWRITE, - }, - }, - }) - }, - want: []*authpb.Role{ - {Name: []byte("role1"), KeyPermission: []*authpb.Permission{{PermType: authpb.READWRITE}}}, - {Name: []byte("role2")}, - }, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - abe.CreateAuthBuckets() - - tx := abe.BatchTx() - tx.Lock() - tc.setup(tx) - tx.Unlock() - - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - users := abe2.GetAllRoles() - - assert.Equal(t, tc.want, users) - }) - } -} - -func TestGetRole(t *testing.T) { - tcs := []struct { - name string - setup func(tx auth.AuthBatchTx) - want *authpb.Role - }{ - { - name: "Returns nil for missing", - setup: func(tx auth.AuthBatchTx) {}, - want: nil, - }, - { - name: "Returns data put before", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - Key: []byte("key"), - RangeEnd: []byte("end"), - }, - }, - }) - }, - want: &authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - Key: []byte("key"), - RangeEnd: []byte("end"), - }, - }, - }, - }, - { - name: "Return nil for deleted", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - }) - tx.UnsafeDeleteRole("role1") - }, - want: nil, - }, - { - name: "Returns data overriden by put", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READ, - }, - }, - }) - tx.UnsafePutRole(&authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{ - { - PermType: authpb.READWRITE, - }, - }, - }) - }, - want: &authpb.Role{ - Name: []byte("role1"), - KeyPermission: []*authpb.Permission{{PermType: authpb.READWRITE}}, - }, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - abe.CreateAuthBuckets() - - tx := abe.BatchTx() - tx.Lock() - tc.setup(tx) - tx.Unlock() - - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - users := abe2.GetRole("role1") - - assert.Equal(t, tc.want, users) - }) - } -} diff --git a/server/storage/schema/auth_test.go b/server/storage/schema/auth_test.go deleted file mode 100644 index 96174e50ffc..00000000000 --- a/server/storage/schema/auth_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "math" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -// TestAuthEnabled ensures that UnsafeSaveAuthEnabled&UnsafeReadAuthEnabled work well together. -func TestAuthEnabled(t *testing.T) { - tcs := []struct { - name string - skipSetting bool - setEnabled bool - wantEnabled bool - }{ - { - name: "Returns true after setting true", - setEnabled: true, - wantEnabled: true, - }, - { - name: "Returns false after setting false", - setEnabled: false, - wantEnabled: false, - }, - { - name: "Returns false by default", - skipSetting: true, - wantEnabled: false, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - tx := abe.BatchTx() - abe.CreateAuthBuckets() - - tx.Lock() - if !tc.skipSetting { - tx.UnsafeSaveAuthEnabled(tc.setEnabled) - } - tx.Unlock() - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - tx = abe2.BatchTx() - tx.Lock() - defer tx.Unlock() - v := tx.UnsafeReadAuthEnabled() - - assert.Equal(t, tc.wantEnabled, v) - }) - } -} - -// TestAuthRevision ensures that UnsafeSaveAuthRevision&UnsafeReadAuthRevision work well together. -func TestAuthRevision(t *testing.T) { - tcs := []struct { - name string - setRevision uint64 - wantRevision uint64 - }{ - { - name: "Returns 0 by default", - wantRevision: 0, - }, - { - name: "Returns 1 after setting 1", - setRevision: 1, - wantRevision: 1, - }, - { - name: "Returns max int after setting max int", - setRevision: math.MaxUint64, - wantRevision: math.MaxUint64, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - abe.CreateAuthBuckets() - - if tc.setRevision != 0 { - tx := abe.BatchTx() - tx.Lock() - tx.UnsafeSaveAuthRevision(tc.setRevision) - tx.Unlock() - } - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - tx := abe2.BatchTx() - tx.Lock() - defer tx.Unlock() - v := tx.UnsafeReadAuthRevision() - - assert.Equal(t, tc.wantRevision, v) - }) - } -} diff --git a/server/storage/schema/auth_users.go b/server/storage/schema/auth_users.go deleted file mode 100644 index 762eaf30179..00000000000 --- a/server/storage/schema/auth_users.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/authpb" -) - -func (abe *authBackend) GetUser(username string) *authpb.User { - tx := abe.BatchTx() - tx.Lock() - defer tx.Unlock() - return tx.UnsafeGetUser(username) -} - -func (atx *authBatchTx) UnsafeGetUser(username string) *authpb.User { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeGetUser(username) -} - -func (atx *authBatchTx) UnsafeGetAllUsers() []*authpb.User { - arx := &authReadTx{tx: atx.tx, lg: atx.lg} - return arx.UnsafeGetAllUsers() -} - -func (atx *authBatchTx) UnsafePutUser(user *authpb.User) { - b, err := user.Marshal() - if err != nil { - atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - atx.tx.UnsafePut(AuthUsers, user.Name, b) -} - -func (atx *authBatchTx) UnsafeDeleteUser(username string) { - atx.tx.UnsafeDelete(AuthUsers, []byte(username)) -} - -func (atx *authReadTx) UnsafeGetUser(username string) *authpb.User { - _, vs := atx.tx.UnsafeRange(AuthUsers, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - atx.lg.Panic( - "failed to unmarshal 'authpb.User'", - zap.String("user-name", username), - zap.Error(err), - ) - } - return user -} - -func (abe *authBackend) GetAllUsers() []*authpb.User { - tx := abe.BatchTx() - tx.Lock() - defer tx.Unlock() - return tx.UnsafeGetAllUsers() -} - -func (atx *authReadTx) UnsafeGetAllUsers() []*authpb.User { - var vs [][]byte - err := atx.tx.UnsafeForEach(AuthUsers, func(k []byte, v []byte) error { - vs = append(vs, v) - return nil - }) - if err != nil { - atx.lg.Panic("failed to get users", - zap.Error(err)) - } - if len(vs) == 0 { - return nil - } - - users := make([]*authpb.User, len(vs)) - for i := range vs { - user := &authpb.User{} - err := user.Unmarshal(vs[i]) - if err != nil { - atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - users[i] = user - } - return users -} diff --git a/server/storage/schema/auth_users_test.go b/server/storage/schema/auth_users_test.go deleted file mode 100644 index ed0d0f5b801..00000000000 --- a/server/storage/schema/auth_users_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/storage/backend" - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestGetAllUsers(t *testing.T) { - tcs := []struct { - name string - setup func(tx auth.AuthBatchTx) - want []*authpb.User - }{ - { - name: "Empty by default", - setup: func(tx auth.AuthBatchTx) {}, - want: nil, - }, - { - name: "Returns user put before", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("alicePassword"), - Roles: []string{"aliceRole1", "aliceRole2"}, - Options: &authpb.UserAddOptions{ - NoPassword: true, - }, - }) - }, - want: []*authpb.User{ - { - Name: []byte("alice"), - Password: []byte("alicePassword"), - Roles: []string{"aliceRole1", "aliceRole2"}, - Options: &authpb.UserAddOptions{ - NoPassword: true, - }, - }, - }, - }, - { - name: "Skips deleted user", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - }) - tx.UnsafePutUser(&authpb.User{ - Name: []byte("bob"), - }) - tx.UnsafeDeleteUser("alice") - }, - want: []*authpb.User{{Name: []byte("bob")}}, - }, - { - name: "Returns data overriden by put", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("oldPassword"), - }) - tx.UnsafePutUser(&authpb.User{ - Name: []byte("bob"), - }) - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("newPassword"), - }) - }, - want: []*authpb.User{ - {Name: []byte("alice"), Password: []byte("newPassword")}, - {Name: []byte("bob")}, - }, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - abe.CreateAuthBuckets() - - tx := abe.BatchTx() - tx.Lock() - tc.setup(tx) - tx.Unlock() - - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - users := abe2.ReadTx().UnsafeGetAllUsers() - - assert.Equal(t, tc.want, users) - }) - } -} - -func TestGetUser(t *testing.T) { - tcs := []struct { - name string - setup func(tx auth.AuthBatchTx) - want *authpb.User - }{ - { - name: "Returns nil for missing user", - setup: func(tx auth.AuthBatchTx) {}, - want: nil, - }, - { - name: "Returns data put before", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("alicePassword"), - Roles: []string{"aliceRole1", "aliceRole2"}, - Options: &authpb.UserAddOptions{ - NoPassword: true, - }, - }) - }, - want: &authpb.User{ - Name: []byte("alice"), - Password: []byte("alicePassword"), - Roles: []string{"aliceRole1", "aliceRole2"}, - Options: &authpb.UserAddOptions{ - NoPassword: true, - }, - }, - }, - { - name: "Skips deleted", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - }) - tx.UnsafeDeleteUser("alice") - }, - want: nil, - }, - { - name: "Returns data overriden by put", - setup: func(tx auth.AuthBatchTx) { - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("oldPassword"), - }) - tx.UnsafePutUser(&authpb.User{ - Name: []byte("alice"), - Password: []byte("newPassword"), - }) - }, - want: &authpb.User{ - Name: []byte("alice"), - Password: []byte("newPassword"), - }, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - be, tmpPath := betesting.NewTmpBackend(t, time.Microsecond, 10) - abe := NewAuthBackend(lg, be) - abe.CreateAuthBuckets() - - tx := abe.BatchTx() - tx.Lock() - tc.setup(tx) - tx.Unlock() - - abe.ForceCommit() - be.Close() - - be2 := backend.NewDefaultBackend(lg, tmpPath) - defer be2.Close() - abe2 := NewAuthBackend(lg, be2) - users := abe2.GetUser("alice") - - assert.Equal(t, tc.want, users) - }) - } -} diff --git a/server/storage/schema/bucket.go b/server/storage/schema/bucket.go deleted file mode 100644 index e5eda721b53..00000000000 --- a/server/storage/schema/bucket.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "bytes" - - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -var ( - keyBucketName = []byte("key") - metaBucketName = []byte("meta") - leaseBucketName = []byte("lease") - alarmBucketName = []byte("alarm") - - clusterBucketName = []byte("cluster") - - membersBucketName = []byte("members") - membersRemovedBucketName = []byte("members_removed") - - authBucketName = []byte("auth") - authUsersBucketName = []byte("authUsers") - authRolesBucketName = []byte("authRoles") - - testBucketName = []byte("test") -) - -var ( - Key = backend.Bucket(bucket{id: 1, name: keyBucketName, safeRangeBucket: true}) - Meta = backend.Bucket(bucket{id: 2, name: metaBucketName, safeRangeBucket: false}) - Lease = backend.Bucket(bucket{id: 3, name: leaseBucketName, safeRangeBucket: false}) - Alarm = backend.Bucket(bucket{id: 4, name: alarmBucketName, safeRangeBucket: false}) - Cluster = backend.Bucket(bucket{id: 5, name: clusterBucketName, safeRangeBucket: false}) - - Members = backend.Bucket(bucket{id: 10, name: membersBucketName, safeRangeBucket: false}) - MembersRemoved = backend.Bucket(bucket{id: 11, name: membersRemovedBucketName, safeRangeBucket: false}) - - Auth = backend.Bucket(bucket{id: 20, name: authBucketName, safeRangeBucket: false}) - AuthUsers = backend.Bucket(bucket{id: 21, name: authUsersBucketName, safeRangeBucket: false}) - AuthRoles = backend.Bucket(bucket{id: 22, name: authRolesBucketName, safeRangeBucket: false}) - - Test = backend.Bucket(bucket{id: 100, name: testBucketName, safeRangeBucket: false}) -) - -type bucket struct { - id backend.BucketID - name []byte - safeRangeBucket bool -} - -func (b bucket) ID() backend.BucketID { return b.id } -func (b bucket) Name() []byte { return b.name } -func (b bucket) String() string { return string(b.Name()) } -func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket } - -var ( - // Pre v3.5 - ScheduledCompactKeyName = []byte("scheduledCompactRev") - FinishedCompactKeyName = []byte("finishedCompactRev") - MetaConsistentIndexKeyName = []byte("consistent_index") - AuthEnabledKeyName = []byte("authEnabled") - AuthRevisionKeyName = []byte("authRevision") - // Since v3.5 - MetaTermKeyName = []byte("term") - MetaConfStateName = []byte("confState") - ClusterClusterVersionKeyName = []byte("clusterVersion") - ClusterDowngradeKeyName = []byte("downgrade") - // Since v3.6 - MetaStorageVersionName = []byte("storageVersion") - // Before adding new meta key please update server/etcdserver/version -) - -// DefaultIgnores defines buckets & keys to ignore in hash checking. -func DefaultIgnores(bucket, key []byte) bool { - // consistent index & term might be changed due to v2 internal sync, which - // is not controllable by the user. - // storage version might change after wal snapshot and is not controller by user. - return bytes.Compare(bucket, Meta.Name()) == 0 && - (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0 || bytes.Compare(key, MetaStorageVersionName) == 0) -} - -func BackendMemberKey(id types.ID) []byte { - return []byte(id.String()) -} diff --git a/server/storage/schema/changes.go b/server/storage/schema/changes.go deleted file mode 100644 index 6eb0b751209..00000000000 --- a/server/storage/schema/changes.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import "go.etcd.io/etcd/server/v3/storage/backend" - -type schemaChange interface { - upgradeAction() action - downgradeAction() action -} - -// addNewField represents adding new field when upgrading. Downgrade will remove the field. -func addNewField(bucket backend.Bucket, fieldName []byte, fieldValue []byte) schemaChange { - return simpleSchemaChange{ - upgrade: setKeyAction{ - Bucket: bucket, - FieldName: fieldName, - FieldValue: fieldValue, - }, - downgrade: deleteKeyAction{ - Bucket: bucket, - FieldName: fieldName, - }, - } -} - -type simpleSchemaChange struct { - upgrade action - downgrade action -} - -func (c simpleSchemaChange) upgradeAction() action { - return c.upgrade -} - -func (c simpleSchemaChange) downgradeAction() action { - return c.downgrade -} diff --git a/server/storage/schema/changes_test.go b/server/storage/schema/changes_test.go deleted file mode 100644 index 05b8d49cf44..00000000000 --- a/server/storage/schema/changes_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "testing" - "time" - - betesting "go.etcd.io/etcd/server/v3/storage/backend/testing" -) - -func TestUpgradeDowngrade(t *testing.T) { - tcs := []struct { - name string - change schemaChange - expectStateAfterUpgrade map[string]string - expectStateAfterDowngrade map[string]string - }{ - { - name: "addNewField empty", - change: addNewField(Meta, []byte("/test"), []byte("1")), - expectStateAfterUpgrade: map[string]string{"/test": "1"}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - be, _ := betesting.NewTmpBackend(t, time.Microsecond, 10) - defer be.Close() - tx := be.BatchTx() - if tx == nil { - t.Fatal("batch tx is nil") - } - tx.Lock() - defer tx.Unlock() - UnsafeCreateMetaBucket(tx) - - _, err := tc.change.upgradeAction().unsafeDo(tx) - if err != nil { - t.Errorf("Failed to upgrade, err: %v", err) - } - assertBucketState(t, tx, Meta, tc.expectStateAfterUpgrade) - _, err = tc.change.downgradeAction().unsafeDo(tx) - if err != nil { - t.Errorf("Failed to downgrade, err: %v", err) - } - assertBucketState(t, tx, Meta, tc.expectStateAfterDowngrade) - }) - } -} diff --git a/server/storage/schema/cindex.go b/server/storage/schema/cindex.go deleted file mode 100644 index a2d15b2788c..00000000000 --- a/server/storage/schema/cindex.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "encoding/binary" - "fmt" - - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.etcd.io/etcd/server/v3/storage/backend" -) - -// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exist yet). -func UnsafeCreateMetaBucket(tx backend.BatchTx) { - tx.UnsafeCreateBucket(Meta) -} - -// CreateMetaBucket creates the `meta` bucket (if it does not exist yet). -func CreateMetaBucket(tx backend.BatchTx) { - tx.LockOutsideApply() - defer tx.Unlock() - tx.UnsafeCreateBucket(Meta) -} - -// UnsafeReadConsistentIndex loads consistent index & term from given transaction. -// returns 0,0 if the data are not found. -// Term is persisted since v3.5. -func UnsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - _, vs := tx.UnsafeRange(Meta, MetaConsistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0, 0 - } - v := binary.BigEndian.Uint64(vs[0]) - _, ts := tx.UnsafeRange(Meta, MetaTermKeyName, nil, 0) - if len(ts) == 0 { - return v, 0 - } - t := binary.BigEndian.Uint64(ts[0]) - return v, t -} - -// ReadConsistentIndex loads consistent index and term from given transaction. -// returns 0 if the data are not found. -func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - tx.RLock() - defer tx.RUnlock() - return UnsafeReadConsistentIndex(tx) -} - -func UnsafeUpdateConsistentIndexForce(tx backend.BatchTx, index uint64, term uint64) { - unsafeUpdateConsistentIndex(tx, index, term, true) -} - -func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64) { - unsafeUpdateConsistentIndex(tx, index, term, false) -} - -func unsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, allowDecreasing bool) { - if index == 0 { - // Never save 0 as it means that we didn't load the real index yet. - return - } - bs1 := make([]byte, 8) - binary.BigEndian.PutUint64(bs1, index) - - if !allowDecreasing { - verify.Verify(func() { - previousIndex, _ := UnsafeReadConsistentIndex(tx) - if index < previousIndex { - panic(fmt.Errorf("update of consistent index not advancing: previous: %v new: %v", previousIndex, index)) - } - }) - } - - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1) - if term > 0 { - bs2 := make([]byte, 8) - binary.BigEndian.PutUint64(bs2, term) - tx.UnsafePut(Meta, MetaTermKeyName, bs2) - } -} diff --git a/server/storage/schema/confstate.go b/server/storage/schema/confstate.go deleted file mode 100644 index ead2e527d68..00000000000 --- a/server/storage/schema/confstate.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - "encoding/json" - "log" - - "go.uber.org/zap" - - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/raft/v3/raftpb" -) - -// MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx). -// confState in backend is persisted since etcd v3.5. -func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) { - confStateBytes, err := json.Marshal(confState) - if err != nil { - lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err)) - } - - tx.UnsafePut(Meta, MetaConfStateName, confStateBytes) -} - -// UnsafeConfStateFromBackend retrieves ConfState from the backend. -// Returns nil if confState in backend is not persisted (e.g. backend writen by depending on the deprecation stage, warns or report an error -// if the v2store contains custom content. -func AssertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { - metaOnly, err := membership.IsMetaStoreOnly(st) - if err != nil { - return err - } - if metaOnly { - return nil - } - if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { - return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) - } - lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") - return nil -} - -// CreateConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - found := false - for _, id := range ids { - if id == self { - found = true - } - } - - var ents []raftpb.Entry - next := index + 1 - - // NB: always add self first, then remove other nodes. Raft will panic if the - // set of voters ever becomes empty. - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - lg.Panic("failed to marshal member", zap.Error(err)) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - for _, id := range ids { - if id == self { - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - return ents -} - -// GetEffectiveNodeIDsFromWalEntries returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain three kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will Be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set. -// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set. -func GetEffectiveNodeIDsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Voters { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddLearnerNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return sids -} diff --git a/server/storage/wal/decoder.go b/server/storage/wal/decoder.go deleted file mode 100644 index 0f47b72fd3e..00000000000 --- a/server/storage/wal/decoder.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "encoding/binary" - "fmt" - "hash" - "io" - "sync" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/crc" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -const minSectorSize = 512 - -// frameSizeBytes is frame size in bytes, including record size and padding size. -const frameSizeBytes = 8 - -type Decoder interface { - Decode(rec *walpb.Record) error - LastOffset() int64 - LastCRC() uint32 - UpdateCRC(prevCrc uint32) -} - -type decoder struct { - mu sync.Mutex - brs []*fileutil.FileBufReader - - // lastValidOff file offset following the last valid decoded record - lastValidOff int64 - crc hash.Hash32 - - // continueOnCrcError - causes the decoder to continue working even in case of crc mismatch. - // This is a desired mode for tools performing inspection of the corrupted WAL logs. - // See comments on 'Decode' method for semantic. - continueOnCrcError bool -} - -func NewDecoderAdvanced(continueOnCrcError bool, r ...fileutil.FileReader) Decoder { - readers := make([]*fileutil.FileBufReader, len(r)) - for i := range r { - readers[i] = fileutil.NewFileBufReader(r[i]) - } - return &decoder{ - brs: readers, - crc: crc.New(0, crcTable), - continueOnCrcError: continueOnCrcError, - } -} - -func NewDecoder(r ...fileutil.FileReader) Decoder { - return NewDecoderAdvanced(false, r...) -} - -// Decode reads the next record out of the file. -// In the success path, fills 'rec' and returns nil. -// When it fails, it returns err and usually resets 'rec' to the defaults. -// When continueOnCrcError is set, the method may return ErrUnexpectedEOF or ErrCRCMismatch, but preserve the read -// (potentially corrupted) record content. -func (d *decoder) Decode(rec *walpb.Record) error { - rec.Reset() - d.mu.Lock() - defer d.mu.Unlock() - return d.decodeRecord(rec) -} - -func (d *decoder) decodeRecord(rec *walpb.Record) error { - if len(d.brs) == 0 { - return io.EOF - } - - fileBufReader := d.brs[0] - l, err := readInt64(fileBufReader) - if err == io.EOF || (err == nil && l == 0) { - // hit end of file or preallocated space - d.brs = d.brs[1:] - if len(d.brs) == 0 { - return io.EOF - } - d.lastValidOff = 0 - return d.decodeRecord(rec) - } - if err != nil { - return err - } - - recBytes, padBytes := decodeFrameSize(l) - // The length of current WAL entry must be less than the remaining file size. - maxEntryLimit := fileBufReader.FileInfo().Size() - d.lastValidOff - padBytes - if recBytes > maxEntryLimit { - return fmt.Errorf("%w: [wal] max entry size limit exceeded when reading %q, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)", - io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), recBytes, fileBufReader.FileInfo().Size(), d.lastValidOff, padBytes, maxEntryLimit) - } - - data := make([]byte, recBytes+padBytes) - if _, err = io.ReadFull(fileBufReader, data); err != nil { - // ReadFull returns io.EOF only if no bytes were read - // the decoder should treat this as an ErrUnexpectedEOF instead. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - if err := rec.Unmarshal(data[:recBytes]); err != nil { - if d.isTornEntry(data) { - return io.ErrUnexpectedEOF - } - return err - } - - // skip crc checking if the record type is CrcType - if rec.Type != CrcType { - _, err := d.crc.Write(rec.Data) - if err != nil { - return err - } - if err := rec.Validate(d.crc.Sum32()); err != nil { - if !d.continueOnCrcError { - rec.Reset() - } else { - // If we continue, we want to update lastValidOff, such that following errors are consistent - defer func() { d.lastValidOff += frameSizeBytes + recBytes + padBytes }() - } - - if d.isTornEntry(data) { - return fmt.Errorf("%w: in file '%s' at position: %d", io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), d.lastValidOff) - } - return fmt.Errorf("%w: in file '%s' at position: %d", err, fileBufReader.FileInfo().Name(), d.lastValidOff) - } - } - // record decoded as valid; point last valid offset to end of record - d.lastValidOff += frameSizeBytes + recBytes + padBytes - return nil -} - -func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) { - // the record size is stored in the lower 56 bits of the 64-bit length - recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56)) - // non-zero padding is indicated by set MSb / a negative length - if lenField < 0 { - // padding is stored in lower 3 bits of length MSB - padBytes = int64((uint64(lenField) >> 56) & 0x7) - } - return recBytes, padBytes -} - -// isTornEntry determines whether the last entry of the WAL was partially written -// and corrupted because of a torn write. -func (d *decoder) isTornEntry(data []byte) bool { - if len(d.brs) != 1 { - return false - } - - fileOff := d.lastValidOff + frameSizeBytes - curOff := 0 - var chunks [][]byte - // split data on sector boundaries - for curOff < len(data) { - chunkLen := int(minSectorSize - (fileOff % minSectorSize)) - if chunkLen > len(data)-curOff { - chunkLen = len(data) - curOff - } - chunks = append(chunks, data[curOff:curOff+chunkLen]) - fileOff += int64(chunkLen) - curOff += chunkLen - } - - // if any data for a sector chunk is all 0, it's a torn write - for _, sect := range chunks { - isZero := true - for _, v := range sect { - if v != 0 { - isZero = false - break - } - } - if isZero { - return true - } - } - return false -} - -func (d *decoder) UpdateCRC(prevCrc uint32) { - d.crc = crc.New(prevCrc, crcTable) -} - -func (d *decoder) LastCRC() uint32 { - return d.crc.Sum32() -} - -func (d *decoder) LastOffset() int64 { return d.lastValidOff } - -func MustUnmarshalEntry(d []byte) raftpb.Entry { - var e raftpb.Entry - pbutil.MustUnmarshal(&e, d) - return e -} - -func MustUnmarshalState(d []byte) raftpb.HardState { - var s raftpb.HardState - pbutil.MustUnmarshal(&s, d) - return s -} - -func readInt64(r io.Reader) (int64, error) { - var n int64 - err := binary.Read(r, binary.LittleEndian, &n) - return n, err -} diff --git a/server/storage/wal/doc.go b/server/storage/wal/doc.go deleted file mode 100644 index 0f7ef8527be..00000000000 --- a/server/storage/wal/doc.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package wal provides an implementation of write ahead log that is used by -etcd. - -A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside each file the raft state and entries are appended -to it with the Save method: - - metadata := []byte{} - w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata) - ... - err := w.Save(s, ents) - -After saving a raft snapshot to disk, SaveSnapshot method should be called to -record it. So WAL can match with the saved snapshot when restarting. - - err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) - -When a user has finished using a WAL it must be closed: - - w.Close() - -Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record -protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a -64-bit packed structure holding the length of the remaining logical record data in its lower -56 bits and its physical padding in the first three bits of the most significant byte. Each -record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 -value of all record protobufs preceding the current record. - -WAL files are placed inside the directory in the following format: -$seq-$index.wal - -The first WAL file to be created will be 0000000000000000-0000000000000000.wal -indicating an initial sequence of 0 and an initial raft index of 0. The first -entry written to WAL MUST have raft index 0. - -WAL will cut its current tail wal file if its size exceeds 64 MB. This will increment an internal -sequence number and cause a new file to be created. If the last raft index saved -was 0x20 and this is the first time cut has been called on this WAL then the sequence will -increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later, then the file will be called: -0000000000000002-0000000000000031.wal. - -At a later time a WAL can be opened at a particular snapshot. If there is no -snapshot, an empty snapshot should be passed in. - - w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) - ... - -The snapshot must have been written to the WAL. - -Additional items cannot be Saved to this WAL until all the items from the given -snapshot to the end of the WAL are read first: - - metadata, state, ents, err := w.ReadAll() - -This will give you the metadata, the last raft.State and the slice of -raft.Entry items in the log. -*/ -package wal diff --git a/server/storage/wal/encoder.go b/server/storage/wal/encoder.go deleted file mode 100644 index 6d1f97ad647..00000000000 --- a/server/storage/wal/encoder.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "encoding/binary" - "hash" - "io" - "os" - "sync" - - "go.etcd.io/etcd/pkg/v3/crc" - "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" -) - -// walPageBytes is the alignment for flushing records to the backing Writer. -// It should be a multiple of the minimum sector size so that WAL can safely -// distinguish between torn writes and ordinary data corruption. -const walPageBytes = 8 * minSectorSize - -type encoder struct { - mu sync.Mutex - bw *ioutil.PageWriter - - crc hash.Hash32 - buf []byte - uint64buf []byte -} - -func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { - return &encoder{ - bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset), - crc: crc.New(prevCrc, crcTable), - // 1MB buffer - buf: make([]byte, 1024*1024), - uint64buf: make([]byte, 8), - } -} - -// newFileEncoder creates a new encoder with current file offset for the page writer. -func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - return newEncoder(f, prevCrc, int(offset)), nil -} - -func (e *encoder) encode(rec *walpb.Record) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.crc.Write(rec.Data) - rec.Crc = e.crc.Sum32() - var ( - data []byte - err error - n int - ) - - if rec.Size() > len(e.buf) { - data, err = rec.Marshal() - if err != nil { - return err - } - } else { - n, err = rec.MarshalTo(e.buf) - if err != nil { - return err - } - data = e.buf[:n] - } - - lenField, padBytes := encodeFrameSize(len(data)) - if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil { - return err - } - - if padBytes != 0 { - data = append(data, make([]byte, padBytes)...) - } - n, err = e.bw.Write(data) - walWriteBytes.Add(float64(n)) - return err -} - -func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { - lenField = uint64(dataBytes) - // force 8 byte alignment so length never gets a torn write - padBytes = (8 - (dataBytes % 8)) % 8 - if padBytes != 0 { - lenField |= uint64(0x80|padBytes) << 56 - } - return lenField, padBytes -} - -func (e *encoder) flush() error { - e.mu.Lock() - defer e.mu.Unlock() - return e.bw.Flush() -} - -func writeUint64(w io.Writer, n uint64, buf []byte) error { - // http://golang.org/src/encoding/binary/binary.go - binary.LittleEndian.PutUint64(buf, n) - nv, err := w.Write(buf) - walWriteBytes.Add(float64(nv)) - return err -} diff --git a/server/storage/wal/file_pipeline.go b/server/storage/wal/file_pipeline.go deleted file mode 100644 index 9d3a78104f2..00000000000 --- a/server/storage/wal/file_pipeline.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "fmt" - "os" - "path/filepath" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - - "go.uber.org/zap" -) - -// filePipeline pipelines allocating disk space -type filePipeline struct { - lg *zap.Logger - - // dir to put files - dir string - // size of files to make, in bytes - size int64 - // count number of files generated - count int - - filec chan *fileutil.LockedFile - errc chan error - donec chan struct{} -} - -func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline { - if lg == nil { - lg = zap.NewNop() - } - fp := &filePipeline{ - lg: lg, - dir: dir, - size: fileSize, - filec: make(chan *fileutil.LockedFile), - errc: make(chan error, 1), - donec: make(chan struct{}), - } - go fp.run() - return fp -} - -// Open returns a fresh file for writing. Rename the file before calling -// Open again or there will be file collisions. -// it will 'block' if the tmp file lock is already taken. -func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { - select { - case f = <-fp.filec: - case err = <-fp.errc: - } - return f, err -} - -func (fp *filePipeline) Close() error { - close(fp.donec) - return <-fp.errc -} - -func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { - // count % 2 so this file isn't the same as the one last published - fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) - if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { - return nil, err - } - if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { - fp.lg.Error("failed to preallocate space when creating a new WAL", zap.Int64("size", fp.size), zap.Error(err)) - f.Close() - return nil, err - } - fp.count++ - return f, nil -} - -func (fp *filePipeline) run() { - defer close(fp.errc) - for { - f, err := fp.alloc() - if err != nil { - fp.errc <- err - return - } - select { - case fp.filec <- f: - case <-fp.donec: - os.Remove(f.Name()) - f.Close() - return - } - } -} diff --git a/server/storage/wal/file_pipeline_test.go b/server/storage/wal/file_pipeline_test.go deleted file mode 100644 index bb59270bf3a..00000000000 --- a/server/storage/wal/file_pipeline_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "math" - "testing" - - "go.uber.org/zap/zaptest" -) - -func TestFilePipeline(t *testing.T) { - tdir := t.TempDir() - - fp := newFilePipeline(zaptest.NewLogger(t), tdir, SegmentSizeBytes) - defer fp.Close() - - f, ferr := fp.Open() - if ferr != nil { - t.Fatal(ferr) - } - f.Close() -} - -func TestFilePipelineFailPreallocate(t *testing.T) { - tdir := t.TempDir() - - fp := newFilePipeline(zaptest.NewLogger(t), tdir, math.MaxInt64) - defer fp.Close() - - f, ferr := fp.Open() - if f != nil || ferr == nil { // no space left on device - t.Fatal("expected error on invalid pre-allocate size, but no error") - } -} diff --git a/server/storage/wal/metrics.go b/server/storage/wal/metrics.go deleted file mode 100644 index 814d654cdd3..00000000000 --- a/server/storage/wal/metrics.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "github.com/prometheus/client_golang/prometheus" - -var ( - walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "wal_fsync_duration_seconds", - Help: "The latency distributions of fsync called by WAL.", - - // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 - // highest bucket start of 0.001 sec * 2^13 == 8.192 sec - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "wal_write_bytes_total", - Help: "Total number of bytes written in WAL.", - }) -) - -func init() { - prometheus.MustRegister(walFsyncSec) - prometheus.MustRegister(walWriteBytes) -} diff --git a/server/storage/wal/record_test.go b/server/storage/wal/record_test.go deleted file mode 100644 index 85ceebed9c1..00000000000 --- a/server/storage/wal/record_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bytes" - "errors" - "hash/crc32" - "io" - "os" - "reflect" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" -) - -var ( - infoData = []byte("\b\xef\xfd\x02") - infoRecord = append([]byte("\x0e\x00\x00\x00\x00\x00\x00\x00\b\x01\x10\x99\xb5\xe4\xd0\x03\x1a\x04"), infoData...) -) - -func TestReadRecord(t *testing.T) { - badInfoRecord := make([]byte, len(infoRecord)) - copy(badInfoRecord, infoRecord) - badInfoRecord[len(badInfoRecord)-1] = 'a' - - tests := []struct { - data []byte - wr *walpb.Record - we error - }{ - {infoRecord, &walpb.Record{Type: 1, Crc: crc32.Checksum(infoData, crcTable), Data: infoData}, nil}, - {[]byte(""), &walpb.Record{}, io.EOF}, - {infoRecord[:14], &walpb.Record{}, io.ErrUnexpectedEOF}, - {infoRecord[:len(infoRecord)-len(infoData)], &walpb.Record{}, io.ErrUnexpectedEOF}, - {infoRecord[:len(infoRecord)-8], &walpb.Record{}, io.ErrUnexpectedEOF}, - {badInfoRecord, &walpb.Record{}, walpb.ErrCRCMismatch}, - } - - rec := &walpb.Record{} - for i, tt := range tests { - buf := bytes.NewBuffer(tt.data) - f, err := createFileWithData(t, buf) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - decoder := NewDecoder(fileutil.NewFileReader(f)) - e := decoder.Decode(rec) - if !reflect.DeepEqual(rec, tt.wr) { - t.Errorf("#%d: block = %v, want %v", i, rec, tt.wr) - } - if !errors.Is(e, tt.we) { - t.Errorf("#%d: err = %v, want %v", i, e, tt.we) - } - rec = &walpb.Record{} - } -} - -func TestWriteRecord(t *testing.T) { - b := &walpb.Record{} - typ := int64(0xABCD) - d := []byte("Hello world!") - buf := new(bytes.Buffer) - e := newEncoder(buf, 0, 0) - e.encode(&walpb.Record{Type: typ, Data: d}) - e.flush() - f, err := createFileWithData(t, buf) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - decoder := NewDecoder(fileutil.NewFileReader(f)) - err = decoder.Decode(b) - if err != nil { - t.Errorf("err = %v, want nil", err) - } - if b.Type != typ { - t.Errorf("type = %d, want %d", b.Type, typ) - } - if !reflect.DeepEqual(b.Data, d) { - t.Errorf("data = %v, want %v", b.Data, d) - } -} - -func createFileWithData(t *testing.T, bf *bytes.Buffer) (*os.File, error) { - f, err := os.CreateTemp(t.TempDir(), "wal") - if err != nil { - return nil, err - } - if _, err := f.Write(bf.Bytes()); err != nil { - return nil, err - } - f.Seek(0, 0) - return f, nil -} diff --git a/server/storage/wal/repair_test.go b/server/storage/wal/repair_test.go deleted file mode 100644 index b1fd9d25d39..00000000000 --- a/server/storage/wal/repair_test.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "fmt" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -type corruptFunc func(string, int64) error - -// TestRepairTruncate ensures a truncated file can be repaired -func TestRepairTruncate(t *testing.T) { - corruptf := func(p string, offset int64) error { - f, err := openLast(zaptest.NewLogger(t), p) - if err != nil { - return err - } - defer f.Close() - return f.Truncate(offset - 4) - } - - testRepair(t, makeEnts(10), corruptf, 9) -} - -func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expectedEnts int) { - lg := zaptest.NewLogger(t) - p := t.TempDir() - - // create WAL - w, err := Create(lg, p, nil) - defer func() { - // The Close might fail. - _ = w.Close() - }() - require.NoError(t, err) - - for _, es := range ents { - require.NoError(t, w.Save(raftpb.HardState{}, es)) - } - - offset, err := w.tail().Seek(0, io.SeekCurrent) - require.NoError(t, err) - require.NoError(t, w.Close()) - - require.NoError(t, corrupt(p, offset)) - - // verify we broke the wal - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - require.NoError(t, err) - - _, _, _, err = w.ReadAll() - require.ErrorIs(t, err, io.ErrUnexpectedEOF) - require.NoError(t, w.Close()) - - // repair the wal - require.True(t, Repair(lg, p), "'Repair' returned 'false', want 'true'") - - // read it back - w, err = Open(lg, p, walpb.Snapshot{}) - require.NoError(t, err) - - _, _, walEnts, err := w.ReadAll() - require.NoError(t, err) - assert.Len(t, walEnts, expectedEnts) - - // write some more entries to repaired log - for i := 1; i <= 10; i++ { - es := []raftpb.Entry{{Index: uint64(expectedEnts + i)}} - require.NoError(t, w.Save(raftpb.HardState{}, es)) - } - require.NoError(t, w.Close()) - - // read back entries following repair, ensure it's all there - w, err = Open(lg, p, walpb.Snapshot{}) - require.NoError(t, err) - _, _, walEnts, err = w.ReadAll() - require.NoError(t, err) - assert.Len(t, walEnts, expectedEnts+10) -} - -func makeEnts(ents int) (ret [][]raftpb.Entry) { - for i := 1; i <= ents; i++ { - ret = append(ret, []raftpb.Entry{{Index: uint64(i)}}) - } - return ret -} - -// TestRepairWriteTearLast repairs the WAL in case the last record is a torn write -// that straddled two sectors. -func TestRepairWriteTearLast(t *testing.T) { - corruptf := func(p string, offset int64) error { - f, err := openLast(zaptest.NewLogger(t), p) - if err != nil { - return err - } - defer f.Close() - // 512 bytes perfectly aligns the last record, so use 1024 - if offset < 1024 { - return fmt.Errorf("got offset %d, expected >1024", offset) - } - if terr := f.Truncate(1024); terr != nil { - return terr - } - return f.Truncate(offset) - } - testRepair(t, makeEnts(50), corruptf, 40) -} - -// TestRepairWriteTearMiddle repairs the WAL when there is write tearing -// in the middle of a record. -func TestRepairWriteTearMiddle(t *testing.T) { - corruptf := func(p string, offset int64) error { - f, err := openLast(zaptest.NewLogger(t), p) - if err != nil { - return err - } - defer f.Close() - // corrupt middle of 2nd record - _, werr := f.WriteAt(make([]byte, 512), 4096+512) - return werr - } - ents := makeEnts(5) - // 4096 bytes of data so a middle sector is easy to corrupt - dat := make([]byte, 4096) - for i := range dat { - dat[i] = byte(i) - } - for i := range ents { - ents[i][0].Data = dat - } - testRepair(t, ents, corruptf, 1) -} - -func TestRepairFailDeleteDir(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - - oldSegmentSizeBytes := SegmentSizeBytes - SegmentSizeBytes = 64 - defer func() { - SegmentSizeBytes = oldSegmentSizeBytes - }() - for _, es := range makeEnts(50) { - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - } - - _, serr := w.tail().Seek(0, io.SeekCurrent) - if serr != nil { - t.Fatal(serr) - } - w.Close() - - f, err := openLast(zaptest.NewLogger(t), p) - if err != nil { - t.Fatal(err) - } - if terr := f.Truncate(20); terr != nil { - t.Fatal(err) - } - f.Close() - - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - _, _, _, err = w.ReadAll() - if err != io.ErrUnexpectedEOF { - t.Fatalf("err = %v, want error %v", err, io.ErrUnexpectedEOF) - } - w.Close() - - os.RemoveAll(p) - if Repair(zaptest.NewLogger(t), p) { - t.Fatal("expect 'Repair' fail on unexpected directory deletion") - } -} diff --git a/server/storage/wal/testing/waltesting.go b/server/storage/wal/testing/waltesting.go deleted file mode 100644 index ba093030363..00000000000 --- a/server/storage/wal/testing/waltesting.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testing - -import ( - "os" - "path/filepath" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL, string) { - t.Helper() - dir, err := os.MkdirTemp(t.TempDir(), "etcd_wal_test") - if err != nil { - panic(err) - } - tmpPath := filepath.Join(dir, "wal") - lg := zaptest.NewLogger(t) - w, err := wal.Create(lg, tmpPath, nil) - if err != nil { - t.Fatalf("Failed to create WAL: %v", err) - } - err = w.Close() - if err != nil { - t.Fatalf("Failed to close WAL: %v", err) - } - if len(reqs) != 0 { - w, err = wal.Open(lg, tmpPath, walpb.Snapshot{}) - if err != nil { - t.Fatalf("Failed to open WAL: %v", err) - } - _, state, _, err := w.ReadAll() - if err != nil { - t.Fatalf("Failed to read WAL: %v", err) - } - var entries []raftpb.Entry - for _, req := range reqs { - entries = append(entries, raftpb.Entry{ - Term: 1, - Index: 1, - Type: raftpb.EntryNormal, - Data: pbutil.MustMarshal(&req), - }) - } - err = w.Save(state, entries) - if err != nil { - t.Fatalf("Failed to save WAL: %v", err) - } - err = w.Close() - if err != nil { - t.Fatalf("Failed to close WAL: %v", err) - } - } - - w, err = wal.OpenForRead(lg, tmpPath, walpb.Snapshot{}) - if err != nil { - t.Fatalf("Failed to open WAL: %v", err) - } - return w, tmpPath -} - -func Reopen(t testing.TB, walPath string) *wal.WAL { - t.Helper() - lg := zaptest.NewLogger(t) - w, err := wal.OpenForRead(lg, walPath, walpb.Snapshot{}) - if err != nil { - t.Fatalf("Failed to open WAL: %v", err) - } - return w -} diff --git a/server/storage/wal/util.go b/server/storage/wal/util.go deleted file mode 100644 index 7da3d35177d..00000000000 --- a/server/storage/wal/util.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "strings" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - - "go.uber.org/zap" -) - -var errBadWALName = errors.New("bad wal name") - -// Exist returns true if there are any files in a given directory. -func Exist(dir string) bool { - names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal")) - if err != nil { - return false - } - return len(names) != 0 -} - -// searchIndex returns the last array index of names whose raft index section is -// equal to or smaller than the given index. -// The given names MUST be sorted. -func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) { - for i := len(names) - 1; i >= 0; i-- { - name := names[i] - _, curIndex, err := parseWALName(name) - if err != nil { - lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) - } - if index >= curIndex { - return i, true - } - } - return -1, false -} - -// names should have been sorted based on sequence number. -// isValidSeq checks whether seq increases continuously. -func isValidSeq(lg *zap.Logger, names []string) bool { - var lastSeq uint64 - for _, name := range names { - curSeq, _, err := parseWALName(name) - if err != nil { - lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) - } - if lastSeq != 0 && lastSeq != curSeq-1 { - return false - } - lastSeq = curSeq - } - return true -} - -func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, fmt.Errorf("[readWALNames] fileutil.ReadDir failed: %w", err) - } - wnames := checkWalNames(lg, names) - if len(wnames) == 0 { - return nil, ErrFileNotFound - } - return wnames, nil -} - -func checkWalNames(lg *zap.Logger, names []string) []string { - wnames := make([]string, 0) - for _, name := range names { - if _, _, err := parseWALName(name); err != nil { - // don't complain about left over tmp files - if !strings.HasSuffix(name, ".tmp") { - lg.Warn( - "ignored file in WAL directory", - zap.String("path", name), - ) - } - continue - } - wnames = append(wnames, name) - } - return wnames -} - -func parseWALName(str string) (seq, index uint64, err error) { - if !strings.HasSuffix(str, ".wal") { - return 0, 0, errBadWALName - } - _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) - return seq, index, err -} - -func walName(seq, index uint64) string { - return fmt.Sprintf("%016x-%016x.wal", seq, index) -} diff --git a/server/storage/wal/version.go b/server/storage/wal/version.go deleted file mode 100644 index 5c90af18a80..00000000000 --- a/server/storage/wal/version.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/descriptorpb" - - "go.etcd.io/etcd/api/v3/version" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/raft/v3/raftpb" -) - -// ReadWALVersion reads remaining entries from opened WAL and returns struct -// that implements schema.WAL interface. -func ReadWALVersion(w *WAL) (*walVersion, error) { - _, _, ents, err := w.ReadAll() - if err != nil { - return nil, err - } - return &walVersion{entries: ents}, nil -} - -type walVersion struct { - entries []raftpb.Entry -} - -// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, -func (w *walVersion) MinimalEtcdVersion() *semver.Version { - return MinimalEtcdVersion(w.entries) -} - -// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, -// determined by looking at entries since the last snapshot and returning the highest -// etcd version annotation from used messages, fields, enums and their values. -func MinimalEtcdVersion(ents []raftpb.Entry) *semver.Version { - var maxVer *semver.Version - for _, ent := range ents { - err := visitEntry(ent, func(path protoreflect.FullName, ver *semver.Version) error { - maxVer = maxVersion(maxVer, ver) - return nil - }) - if err != nil { - panic(err) - } - } - return maxVer -} - -type Visitor func(path protoreflect.FullName, ver *semver.Version) error - -// VisitFileDescriptor calls visitor on each field and enum value with etcd version read from proto definition. -// If field/enum value is not annotated, visitor will be called with nil. -// Upon encountering invalid annotation, will immediately exit with error. -func VisitFileDescriptor(file protoreflect.FileDescriptor, visitor Visitor) error { - msgs := file.Messages() - for i := 0; i < msgs.Len(); i++ { - err := visitMessageDescriptor(msgs.Get(i), visitor) - if err != nil { - return err - } - } - enums := file.Enums() - for i := 0; i < enums.Len(); i++ { - err := visitEnumDescriptor(enums.Get(i), visitor) - if err != nil { - return err - } - } - return nil -} - -func visitEntry(ent raftpb.Entry, visitor Visitor) error { - err := visitMessage(proto.MessageReflect(&ent), visitor) - if err != nil { - return err - } - return visitEntryData(ent.Type, ent.Data, visitor) -} - -func visitEntryData(entryType raftpb.EntryType, data []byte, visitor Visitor) error { - var msg protoreflect.Message - switch entryType { - case raftpb.EntryNormal: - var raftReq etcdserverpb.InternalRaftRequest - if err := pbutil.Unmarshaler(&raftReq).Unmarshal(data); err != nil { - // try V2 Request - var r etcdserverpb.Request - if pbutil.Unmarshaler(&r).Unmarshal(data) != nil { - // return original error - return err - } - msg = proto.MessageReflect(&r) - break - } - msg = proto.MessageReflect(&raftReq) - if raftReq.ClusterVersionSet != nil { - ver, err := semver.NewVersion(raftReq.ClusterVersionSet.Ver) - if err != nil { - return err - } - err = visitor(msg.Descriptor().FullName(), ver) - if err != nil { - return err - } - } - case raftpb.EntryConfChange: - var confChange raftpb.ConfChange - err := pbutil.Unmarshaler(&confChange).Unmarshal(data) - if err != nil { - return nil - } - msg = proto.MessageReflect(&confChange) - return visitor(msg.Descriptor().FullName(), &version.V3_0) - case raftpb.EntryConfChangeV2: - var confChange raftpb.ConfChangeV2 - err := pbutil.Unmarshaler(&confChange).Unmarshal(data) - if err != nil { - return nil - } - msg = proto.MessageReflect(&confChange) - return visitor(msg.Descriptor().FullName(), &version.V3_4) - default: - panic("unhandled") - } - return visitMessage(msg, visitor) -} - -func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor) error { - err := visitDescriptor(md, visitor) - if err != nil { - return err - } - fields := md.Fields() - for i := 0; i < fields.Len(); i++ { - fd := fields.Get(i) - err = visitDescriptor(fd, visitor) - if err != nil { - return err - } - } - - enums := md.Enums() - for i := 0; i < enums.Len(); i++ { - err := visitEnumDescriptor(enums.Get(i), visitor) - if err != nil { - return err - } - } - return err -} - -func visitMessage(m protoreflect.Message, visitor Visitor) error { - md := m.Descriptor() - err := visitDescriptor(md, visitor) - if err != nil { - return err - } - m.Range(func(field protoreflect.FieldDescriptor, value protoreflect.Value) bool { - fd := md.Fields().Get(field.Index()) - err = visitDescriptor(fd, visitor) - if err != nil { - return false - } - - switch m := value.Interface().(type) { - case protoreflect.Message: - err = visitMessage(m, visitor) - case protoreflect.EnumNumber: - err = visitEnumNumber(fd.Enum(), m, visitor) - } - if err != nil { - return false - } - return true - }) - return err -} - -func visitEnumDescriptor(enum protoreflect.EnumDescriptor, visitor Visitor) error { - err := visitDescriptor(enum, visitor) - if err != nil { - return err - } - fields := enum.Values() - for i := 0; i < fields.Len(); i++ { - fd := fields.Get(i) - err = visitDescriptor(fd, visitor) - if err != nil { - return err - } - } - return err -} - -func visitEnumNumber(enum protoreflect.EnumDescriptor, number protoreflect.EnumNumber, visitor Visitor) error { - err := visitDescriptor(enum, visitor) - if err != nil { - return err - } - intNumber := int(number) - fields := enum.Values() - if intNumber >= fields.Len() || intNumber < 0 { - return fmt.Errorf("could not visit EnumNumber [%d]", intNumber) - } - return visitEnumValue(fields.Get(intNumber), visitor) -} - -func visitEnumValue(enum protoreflect.EnumValueDescriptor, visitor Visitor) error { - valueOpts := enum.Options().(*descriptorpb.EnumValueOptions) - if valueOpts != nil { - ver, _ := etcdVersionFromOptionsString(valueOpts.String()) - err := visitor(enum.FullName(), ver) - if err != nil { - return err - } - } - return nil -} - -func visitDescriptor(md protoreflect.Descriptor, visitor Visitor) error { - opts, ok := md.Options().(fmt.Stringer) - if !ok { - return nil - } - ver, err := etcdVersionFromOptionsString(opts.String()) - if err != nil { - return fmt.Errorf("%s: %s", md.FullName(), err) - } - return visitor(md.FullName(), ver) -} - -func maxVersion(a *semver.Version, b *semver.Version) *semver.Version { - if a != nil && (b == nil || b.LessThan(*a)) { - return a - } - return b -} - -func etcdVersionFromOptionsString(opts string) (*semver.Version, error) { - // TODO: Use proto.GetExtention when gogo/protobuf is usable with protoreflect - msgs := []string{"[versionpb.etcd_version_msg]:", "[versionpb.etcd_version_field]:", "[versionpb.etcd_version_enum]:", "[versionpb.etcd_version_enum_value]:"} - var end, index int - for _, msg := range msgs { - index = strings.Index(opts, msg) - end = index + len(msg) - if index != -1 { - break - } - } - if index == -1 { - return nil, nil - } - var verStr string - _, err := fmt.Sscanf(opts[end:], "%q", &verStr) - if err != nil { - return nil, err - } - if strings.Count(verStr, ".") == 1 { - verStr = verStr + ".0" - } - ver, err := semver.NewVersion(verStr) - if err != nil { - return nil, err - } - return ver, nil -} diff --git a/server/storage/wal/version_test.go b/server/storage/wal/version_test.go deleted file mode 100644 index 8b5420e94aa..00000000000 --- a/server/storage/wal/version_test.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "fmt" - "testing" - - "github.com/coreos/go-semver/semver" - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/reflect/protoreflect" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/raft/v3/raftpb" -) - -func TestEtcdVersionFromEntry(t *testing.T) { - raftReq := etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}} - normalRequestData := pbutil.MustMarshal(&raftReq) - - clusterVersionV3_6Req := etcdserverpb.InternalRaftRequest{ClusterVersionSet: &membershippb.ClusterVersionSetRequest{Ver: "3.6.0"}} - clusterVersionV3_6Data := pbutil.MustMarshal(&clusterVersionV3_6Req) - - confChange := raftpb.ConfChange{Type: raftpb.ConfChangeAddLearnerNode} - confChangeData := pbutil.MustMarshal(&confChange) - - confChangeV2 := raftpb.ConfChangeV2{Transition: raftpb.ConfChangeTransitionJointExplicit} - confChangeV2Data := pbutil.MustMarshal(&confChangeV2) - - tcs := []struct { - name string - input raftpb.Entry - expect *semver.Version - }{ - { - name: "Using RequestHeader AuthRevision in NormalEntry implies v3.1", - input: raftpb.Entry{ - Term: 1, - Index: 2, - Type: raftpb.EntryNormal, - Data: normalRequestData, - }, - expect: &version.V3_1, - }, - { - name: "Setting cluster version implies version within", - input: raftpb.Entry{ - Term: 1, - Index: 2, - Type: raftpb.EntryNormal, - Data: clusterVersionV3_6Data, - }, - expect: &version.V3_6, - }, - { - name: "Using ConfigChange implies v3.0", - input: raftpb.Entry{ - Term: 1, - Index: 2, - Type: raftpb.EntryConfChange, - Data: confChangeData, - }, - expect: &version.V3_0, - }, - { - name: "Using ConfigChangeV2 implies v3.4", - input: raftpb.Entry{ - Term: 1, - Index: 2, - Type: raftpb.EntryConfChangeV2, - Data: confChangeV2Data, - }, - expect: &version.V3_4, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - var maxVer *semver.Version - err := visitEntry(tc.input, func(path protoreflect.FullName, ver *semver.Version) error { - maxVer = maxVersion(maxVer, ver) - return nil - }) - assert.NoError(t, err) - assert.Equal(t, tc.expect, maxVer) - }) - } -} - -func TestEtcdVersionFromMessage(t *testing.T) { - tcs := []struct { - name string - input proto.Message - expect *semver.Version - }{ - { - name: "Empty RequestHeader impies v3.0", - input: &etcdserverpb.RequestHeader{}, - expect: &version.V3_0, - }, - { - name: "RequestHeader AuthRevision field set implies v3.5", - input: &etcdserverpb.RequestHeader{AuthRevision: 1}, - expect: &version.V3_1, - }, - { - name: "RequestHeader Username set implies v3.0", - input: &etcdserverpb.RequestHeader{Username: "Alice"}, - expect: &version.V3_0, - }, - { - name: "When two fields are set take higher version", - input: &etcdserverpb.RequestHeader{AuthRevision: 1, Username: "Alice"}, - expect: &version.V3_1, - }, - { - name: "Setting a RequestHeader AuthRevision in subfield implies v3.1", - input: &etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}}, - expect: &version.V3_1, - }, - { - name: "Setting a DowngradeInfoSetRequest implies v3.5", - input: &etcdserverpb.InternalRaftRequest{DowngradeInfoSet: &membershippb.DowngradeInfoSetRequest{}}, - expect: &version.V3_5, - }, - { - name: "Enum CompareResult set to EQUAL implies v3.0", - input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_EQUAL}, - expect: &version.V3_0, - }, - { - name: "Enum CompareResult set to NOT_EQUAL implies v3.1", - input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_NOT_EQUAL}, - expect: &version.V3_1, - }, - { - name: "Oneof Compare version set implies v3.1", - input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Version{}}, - expect: &version.V3_0, - }, - { - name: "Oneof Compare lease set implies v3.3", - input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Lease{}}, - expect: &version.V3_3, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - var maxVer *semver.Version - err := visitMessage(proto.MessageReflect(tc.input), func(path protoreflect.FullName, ver *semver.Version) error { - maxVer = maxVersion(maxVer, ver) - return nil - }) - assert.NoError(t, err) - assert.Equal(t, tc.expect, maxVer) - }) - } -} - -func TestEtcdVersionFromFieldOptionsString(t *testing.T) { - tcs := []struct { - input string - expect *semver.Version - }{ - { - input: "65001:0", - }, - { - input: `65001:0 65004:"NodeID"`, - }, - { - input: `[versionpb.XXX]:"3.5"`, - }, - { - input: `[versionpb.etcd_version_msg]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_enum]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_field]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_enum_value]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `65001:0 [versionpb.etcd_version_msg]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `65004:"NodeID" [versionpb.etcd_version_msg]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `65004:"NodeID" [versionpb.etcd_version_enum]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_msg]:"3.5" 65001:0`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_msg]:"3.5" 65004:"NodeID"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.etcd_version_msg]:"3.5" [versionpb.other_field]:"NodeID"`, - expect: &version.V3_5, - }, - { - input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5" [versionpb.another_field]:"NodeID"`, - expect: &version.V3_5, - }, - { - input: `65001:0 [versionpb.etcd_version_msg]:"3.5" 65001:0"`, - expect: &version.V3_5, - }, - } - for _, tc := range tcs { - t.Run(tc.input, func(t *testing.T) { - ver, err := etcdVersionFromOptionsString(tc.input) - assert.NoError(t, err) - assert.Equal(t, ver, tc.expect) - }) - } -} - -func TestMaxVersion(t *testing.T) { - tcs := []struct { - a, b, expect *semver.Version - }{ - { - a: nil, - b: nil, - expect: nil, - }, - { - a: &version.V3_5, - b: nil, - expect: &version.V3_5, - }, - { - a: nil, - b: &version.V3_5, - expect: &version.V3_5, - }, - { - a: &version.V3_6, - b: &version.V3_5, - expect: &version.V3_6, - }, - { - a: &version.V3_5, - b: &version.V3_6, - expect: &version.V3_6, - }, - } - for _, tc := range tcs { - t.Run(fmt.Sprintf("%v %v %v", tc.a, tc.b, tc.expect), func(t *testing.T) { - got := maxVersion(tc.a, tc.b) - assert.Equal(t, got, tc.expect) - }) - } -} diff --git a/server/storage/wal/wal.go b/server/storage/wal/wal.go deleted file mode 100644 index 7f8b25f5ddd..00000000000 --- a/server/storage/wal/wal.go +++ /dev/null @@ -1,1020 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bytes" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3" - "go.etcd.io/raft/v3/raftpb" - - "go.uber.org/zap" -) - -const ( - MetadataType int64 = iota + 1 - EntryType - StateType - CrcType - SnapshotType - - // warnSyncDuration is the amount of time allotted to an fsync before - // logging a warning - warnSyncDuration = time.Second -) - -var ( - // SegmentSizeBytes is the preallocated size of each wal segment file. - // The actual size might be larger than this. In general, the default - // value should be used, but this is defined as an exported variable - // so that tests can set a different segment size. - SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB - - ErrMetadataConflict = errors.New("wal: conflicting metadata found") - ErrFileNotFound = errors.New("wal: file not found") - ErrCRCMismatch = walpb.ErrCRCMismatch - ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") - ErrSnapshotNotFound = errors.New("wal: snapshot not found") - ErrSliceOutOfRange = errors.New("wal: slice bounds out of range") - ErrDecoderNotFound = errors.New("wal: decoder not found") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -// WAL is a logical representation of the stable storage. -// WAL is either in read mode or append mode but not both. -// A newly created WAL is in append mode, and ready for appending records. -// A just opened WAL is in read mode, and ready for reading records. -// The WAL will be ready for appending after reading out all the previous records. -type WAL struct { - lg *zap.Logger - - dir string // the living directory of the underlay files - - // dirFile is a fd for the wal directory for syncing on Rename - dirFile *os.File - - metadata []byte // metadata recorded at the head of each WAL - state raftpb.HardState // hardstate recorded at the head of WAL - - start walpb.Snapshot // snapshot to start reading - decoder Decoder // decoder to Decode records - readClose func() error // closer for Decode reader - - unsafeNoSync bool // if set, do not fsync - - mu sync.Mutex - enti uint64 // index of the last entry saved to the wal - encoder *encoder // encoder to encode records - - locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing) - fp *filePipeline -} - -// Create creates a WAL ready for appending records. The given metadata is -// recorded at the head of each WAL file, and can be retrieved with ReadAll -// after the file is Open. -func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { - if Exist(dirpath) { - return nil, os.ErrExist - } - - if lg == nil { - lg = zap.NewNop() - } - - // keep temporary wal directory so WAL initialization appears atomic - tmpdirpath := filepath.Clean(dirpath) + ".tmp" - if fileutil.Exist(tmpdirpath) { - if err := os.RemoveAll(tmpdirpath); err != nil { - return nil, err - } - } - defer os.RemoveAll(tmpdirpath) - - if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil { - lg.Warn( - "failed to create a temporary WAL directory", - zap.String("tmp-dir-path", tmpdirpath), - zap.String("dir-path", dirpath), - zap.Error(err), - ) - return nil, err - } - - p := filepath.Join(tmpdirpath, walName(0, 0)) - f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) - if err != nil { - lg.Warn( - "failed to flock an initial WAL file", - zap.String("path", p), - zap.Error(err), - ) - return nil, err - } - if _, err = f.Seek(0, io.SeekEnd); err != nil { - lg.Warn( - "failed to seek an initial WAL file", - zap.String("path", p), - zap.Error(err), - ) - return nil, err - } - if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { - lg.Warn( - "failed to preallocate an initial WAL file", - zap.String("path", p), - zap.Int64("segment-bytes", SegmentSizeBytes), - zap.Error(err), - ) - return nil, err - } - - w := &WAL{ - lg: lg, - dir: dirpath, - metadata: metadata, - } - w.encoder, err = newFileEncoder(f.File, 0) - if err != nil { - return nil, err - } - w.locks = append(w.locks, f) - if err = w.saveCrc(0); err != nil { - return nil, err - } - if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: metadata}); err != nil { - return nil, err - } - if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { - return nil, err - } - - logDirPath := w.dir - if w, err = w.renameWAL(tmpdirpath); err != nil { - lg.Warn( - "failed to rename the temporary WAL directory", - zap.String("tmp-dir-path", tmpdirpath), - zap.String("dir-path", logDirPath), - zap.Error(err), - ) - return nil, err - } - - var perr error - defer func() { - if perr != nil { - w.cleanupWAL(lg) - } - }() - - // directory was renamed; sync parent dir to persist rename - pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) - if perr != nil { - lg.Warn( - "failed to open the parent data directory", - zap.String("parent-dir-path", filepath.Dir(w.dir)), - zap.String("dir-path", w.dir), - zap.Error(perr), - ) - return nil, perr - } - dirCloser := func() error { - if perr = pdir.Close(); perr != nil { - lg.Warn( - "failed to close the parent data directory file", - zap.String("parent-dir-path", filepath.Dir(w.dir)), - zap.String("dir-path", w.dir), - zap.Error(perr), - ) - return perr - } - return nil - } - start := time.Now() - if perr = fileutil.Fsync(pdir); perr != nil { - dirCloser() - lg.Warn( - "failed to fsync the parent data directory file", - zap.String("parent-dir-path", filepath.Dir(w.dir)), - zap.String("dir-path", w.dir), - zap.Error(perr), - ) - return nil, perr - } - walFsyncSec.Observe(time.Since(start).Seconds()) - if err = dirCloser(); err != nil { - return nil, err - } - - return w, nil -} - -func (w *WAL) Reopen(lg *zap.Logger, snap walpb.Snapshot) (*WAL, error) { - err := w.Close() - if err != nil { - lg.Panic("failed to close WAL during reopen", zap.Error(err)) - } - return Open(lg, w.dir, snap) -} - -func (w *WAL) SetUnsafeNoFsync() { - w.unsafeNoSync = true -} - -func (w *WAL) cleanupWAL(lg *zap.Logger) { - var err error - if err = w.Close(); err != nil { - lg.Panic("failed to close WAL during cleanup", zap.Error(err)) - } - brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999")) - if err = os.Rename(w.dir, brokenDirName); err != nil { - lg.Panic( - "failed to rename WAL during cleanup", - zap.Error(err), - zap.String("source-path", w.dir), - zap.String("rename-path", brokenDirName), - ) - } -} - -func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) { - if err := os.RemoveAll(w.dir); err != nil { - return nil, err - } - // On non-Windows platforms, hold the lock while renaming. Releasing - // the lock and trying to reacquire it quickly can be flaky because - // it's possible the process will fork to spawn a process while this is - // happening. The fds are set up as close-on-exec by the Go runtime, - // but there is a window between the fork and the exec where another - // process holds the lock. - if err := os.Rename(tmpdirpath, w.dir); err != nil { - if _, ok := err.(*os.LinkError); ok { - return w.renameWALUnlock(tmpdirpath) - } - return nil, err - } - w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes) - df, err := fileutil.OpenDir(w.dir) - w.dirFile = df - return w, err -} - -func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) { - // rename of directory with locked files doesn't work on windows/cifs; - // close the WAL to release the locks so the directory can be renamed. - w.lg.Info( - "closing WAL to release flock and retry directory renaming", - zap.String("from", tmpdirpath), - zap.String("to", w.dir), - ) - w.Close() - - if err := os.Rename(tmpdirpath, w.dir); err != nil { - return nil, err - } - - // reopen and relock - newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{}) - if oerr != nil { - return nil, oerr - } - if _, _, _, err := newWAL.ReadAll(); err != nil { - newWAL.Close() - return nil, err - } - return newWAL, nil -} - -// Open opens the WAL at the given snap. -// The snap SHOULD have been previously saved to the WAL, or the following -// ReadAll will fail. -// The returned WAL is ready to read and the first record will be the one after -// the given snap. The WAL cannot be appended to before reading out all of its -// previous records. -func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { - w, err := openAtIndex(lg, dirpath, snap, true) - if err != nil { - return nil, fmt.Errorf("openAtIndex failed: %w", err) - } - if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { - return nil, fmt.Errorf("fileutil.OpenDir failed: %w", err) - } - return w, nil -} - -// OpenForRead only opens the wal files for read. -// Write on a read only wal panics. -func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(lg, dirpath, snap, false) -} - -func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { - if lg == nil { - lg = zap.NewNop() - } - names, nameIndex, err := selectWALFiles(lg, dirpath, snap) - if err != nil { - return nil, fmt.Errorf("[openAtIndex] selectWALFiles failed: %w", err) - } - - rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write) - if err != nil { - return nil, fmt.Errorf("[openAtIndex] openWALFiles failed: %w", err) - } - - // create a WAL ready for reading - w := &WAL{ - lg: lg, - dir: dirpath, - start: snap, - decoder: NewDecoder(rs...), - readClose: closer, - locks: ls, - } - - if write { - // write reuses the file descriptors from read; don't close so - // WAL can append without dropping the file lock - w.readClose = nil - if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil { - closer() - return nil, fmt.Errorf("[openAtIndex] parseWALName failed: %w", err) - } - w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes) - } - - return w, nil -} - -func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) { - names, err := readWALNames(lg, dirpath) - if err != nil { - return nil, -1, fmt.Errorf("readWALNames failed: %w", err) - } - - nameIndex, ok := searchIndex(lg, names, snap.Index) - if !ok { - return nil, -1, fmt.Errorf("wal: file not found which matches the snapshot index '%d'", snap.Index) - } - - if !isValidSeq(lg, names[nameIndex:]) { - return nil, -1, fmt.Errorf("wal: file sequence numbers (starting from %d) do not increase continuously", nameIndex) - } - - return names, nameIndex, nil -} - -func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]fileutil.FileReader, []*fileutil.LockedFile, func() error, error) { - rcs := make([]io.ReadCloser, 0) - rs := make([]fileutil.FileReader, 0) - ls := make([]*fileutil.LockedFile, 0) - for _, name := range names[nameIndex:] { - p := filepath.Join(dirpath, name) - var f *os.File - if write { - l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) - if err != nil { - closeAll(lg, rcs...) - return nil, nil, nil, fmt.Errorf("[openWALFiles] fileutil.TryLockFile failed: %w", err) - } - ls = append(ls, l) - rcs = append(rcs, l) - f = l.File - } else { - rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) - if err != nil { - closeAll(lg, rcs...) - return nil, nil, nil, fmt.Errorf("[openWALFiles] os.OpenFile failed (%q): %w", p, err) - } - ls = append(ls, nil) - rcs = append(rcs, rf) - f = rf - } - fileReader := fileutil.NewFileReader(f) - rs = append(rs, fileReader) - } - - closer := func() error { return closeAll(lg, rcs...) } - - return rs, ls, closer, nil -} - -// ReadAll reads out records of the current WAL. -// If opened in write mode, it must read out all records until EOF. Or an error -// will be returned. -// If opened in read mode, it will try to read all records if possible. -// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. -// If loaded snap doesn't match with the expected one, it will return -// all the records and error ErrSnapshotMismatch. -// TODO: detect not-last-snap error. -// TODO: maybe loose the checking of match. -// After ReadAll, the WAL will be ready for appending new records. -// -// ReadAll suppresses WAL entries that got overridden (i.e. a newer entry with the same index -// exists in the log). Such a situation can happen in cases described in figure 7. of the -// RAFT paper (http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14.pdf). -// -// ReadAll may return uncommitted yet entries, that are subject to be overriden. -// Do not apply entries that have index > state.commit, as they are subject to change. -func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{} - - if w.decoder == nil { - return nil, state, nil, ErrDecoderNotFound - } - decoder := w.decoder - - var match bool - for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { - switch rec.Type { - case EntryType: - e := MustUnmarshalEntry(rec.Data) - // 0 <= e.Index-w.start.Index - 1 < len(ents) - if e.Index > w.start.Index { - // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0" - up := e.Index - w.start.Index - 1 - if up > uint64(len(ents)) { - // return error before append call causes runtime panic - return nil, state, nil, ErrSliceOutOfRange - } - // The line below is potentially overriding some 'uncommitted' entries. - ents = append(ents[:up], e) - } - w.enti = e.Index - - case StateType: - state = MustUnmarshalState(rec.Data) - - case MetadataType: - if metadata != nil && !bytes.Equal(metadata, rec.Data) { - state.Reset() - return nil, state, nil, ErrMetadataConflict - } - metadata = rec.Data - - case CrcType: - crc := decoder.LastCRC() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - state.Reset() - return nil, state, nil, ErrCRCMismatch - } - decoder.UpdateCRC(rec.Crc) - - case SnapshotType: - var snap walpb.Snapshot - pbutil.MustUnmarshal(&snap, rec.Data) - if snap.Index == w.start.Index { - if snap.Term != w.start.Term { - state.Reset() - return nil, state, nil, ErrSnapshotMismatch - } - match = true - } - - default: - state.Reset() - return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) - } - } - - switch w.tail() { - case nil: - // We do not have to read out all entries in read mode. - // The last record maybe a partial written one, so - // `io.ErrUnexpectedEOF` might be returned. - if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { - state.Reset() - return nil, state, nil, err - } - default: - // We must read all the entries if WAL is opened in write mode. - if !errors.Is(err, io.EOF) { - state.Reset() - return nil, state, nil, err - } - // decodeRecord() will return io.EOF if it detects a zero record, - // but this zero record may be followed by non-zero records from - // a torn write. Overwriting some of these non-zero records, but - // not all, will cause CRC errors on WAL open. Since the records - // were never fully synced to disk in the first place, it's safe - // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.LastOffset(), io.SeekStart); err != nil { - return nil, state, nil, err - } - if err = fileutil.ZeroToEnd(w.tail().File); err != nil { - return nil, state, nil, err - } - } - - err = nil - if !match { - err = ErrSnapshotNotFound - } - - // close decoder, disable reading - if w.readClose != nil { - w.readClose() - w.readClose = nil - } - w.start = walpb.Snapshot{} - - w.metadata = metadata - - if w.tail() != nil { - // create encoder (chain crc with the decoder), enable appending - w.encoder, err = newFileEncoder(w.tail().File, w.decoder.LastCRC()) - if err != nil { - return - } - } - w.decoder = nil - - return metadata, state, ents, err -} - -// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory. -// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate. -func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) { - var snaps []walpb.Snapshot - var state raftpb.HardState - var err error - - rec := &walpb.Record{} - names, err := readWALNames(lg, walDir) - if err != nil { - return nil, err - } - - // open wal files in read mode, so that there is no conflict - // when the same WAL is opened elsewhere in write mode - rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false) - if err != nil { - return nil, err - } - defer func() { - if closer != nil { - closer() - } - }() - - // create a new decoder from the readers on the WAL files - decoder := NewDecoder(rs...) - - for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { - switch rec.Type { - case SnapshotType: - var loadedSnap walpb.Snapshot - pbutil.MustUnmarshal(&loadedSnap, rec.Data) - snaps = append(snaps, loadedSnap) - case StateType: - state = MustUnmarshalState(rec.Data) - case CrcType: - crc := decoder.LastCRC() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - return nil, ErrCRCMismatch - } - decoder.UpdateCRC(rec.Crc) - } - } - // We do not have to read out all the WAL entries - // as the decoder is opened in read mode. - if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { - return nil, err - } - - // filter out any snaps that are newer than the committed hardstate - n := 0 - for _, s := range snaps { - if s.Index <= state.Commit { - snaps[n] = s - n++ - } - } - snaps = snaps[:n:n] - return snaps, nil -} - -// Verify reads through the given WAL and verifies that it is not corrupted. -// It creates a new decoder to read through the records of the given WAL. -// It does not conflict with any open WAL, but it is recommended not to -// call this function after opening the WAL for writing. -// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. -// If the loaded snap doesn't match with the expected one, it will -// return error ErrSnapshotMismatch. -func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) { - var metadata []byte - var err error - var match bool - var state raftpb.HardState - - rec := &walpb.Record{} - - if lg == nil { - lg = zap.NewNop() - } - names, nameIndex, err := selectWALFiles(lg, walDir, snap) - if err != nil { - return nil, err - } - - // open wal files in read mode, so that there is no conflict - // when the same WAL is opened elsewhere in write mode - rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false) - if err != nil { - return nil, err - } - defer func() { - if closer != nil { - closer() - } - }() - - // create a new decoder from the readers on the WAL files - decoder := NewDecoder(rs...) - - for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) { - switch rec.Type { - case MetadataType: - if metadata != nil && !bytes.Equal(metadata, rec.Data) { - return nil, ErrMetadataConflict - } - metadata = rec.Data - case CrcType: - crc := decoder.LastCRC() - // Current crc of decoder must match the crc of the record. - // We need not match 0 crc, since the decoder is a new one at this point. - if crc != 0 && rec.Validate(crc) != nil { - return nil, ErrCRCMismatch - } - decoder.UpdateCRC(rec.Crc) - case SnapshotType: - var loadedSnap walpb.Snapshot - pbutil.MustUnmarshal(&loadedSnap, rec.Data) - if loadedSnap.Index == snap.Index { - if loadedSnap.Term != snap.Term { - return nil, ErrSnapshotMismatch - } - match = true - } - // We ignore all entry and state type records as these - // are not necessary for validating the WAL contents - case EntryType: - case StateType: - pbutil.MustUnmarshal(&state, rec.Data) - default: - return nil, fmt.Errorf("unexpected block type %d", rec.Type) - } - } - - // We do not have to read out all the WAL entries - // as the decoder is opened in read mode. - if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { - return nil, err - } - - if !match { - return nil, ErrSnapshotNotFound - } - - return &state, nil -} - -// cut closes current file written and creates a new one ready to append. -// cut first creates a temp wal file and writes necessary headers into it. -// Then cut atomically rename temp wal file to a wal file. -func (w *WAL) cut() error { - // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, io.SeekCurrent) - if serr != nil { - return serr - } - - if err := w.tail().Truncate(off); err != nil { - return err - } - - if err := w.sync(); err != nil { - return err - } - - fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1)) - - // create a temp wal file with name sequence + 1, or truncate the existing one - newTail, err := w.fp.Open() - if err != nil { - return err - } - - // update writer and save the previous crc - w.locks = append(w.locks, newTail) - prevCrc := w.encoder.crc.Sum32() - w.encoder, err = newFileEncoder(w.tail().File, prevCrc) - if err != nil { - return err - } - - if err = w.saveCrc(prevCrc); err != nil { - return err - } - - if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: w.metadata}); err != nil { - return err - } - - if err = w.saveState(&w.state); err != nil { - return err - } - - // atomically move temp wal file to wal file - if err = w.sync(); err != nil { - return err - } - - off, err = w.tail().Seek(0, io.SeekCurrent) - if err != nil { - return err - } - - if err = os.Rename(newTail.Name(), fpath); err != nil { - return err - } - start := time.Now() - if err = fileutil.Fsync(w.dirFile); err != nil { - return err - } - walFsyncSec.Observe(time.Since(start).Seconds()) - - // reopen newTail with its new path so calls to Name() match the wal filename format - newTail.Close() - - if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { - return err - } - if _, err = newTail.Seek(off, io.SeekStart); err != nil { - return err - } - - w.locks[len(w.locks)-1] = newTail - - prevCrc = w.encoder.crc.Sum32() - w.encoder, err = newFileEncoder(w.tail().File, prevCrc) - if err != nil { - return err - } - - w.lg.Info("created a new WAL segment", zap.String("path", fpath)) - return nil -} - -func (w *WAL) sync() error { - if w.encoder != nil { - if err := w.encoder.flush(); err != nil { - return err - } - } - - if w.unsafeNoSync { - return nil - } - - start := time.Now() - err := fileutil.Fdatasync(w.tail().File) - - took := time.Since(start) - if took > warnSyncDuration { - w.lg.Warn( - "slow fdatasync", - zap.Duration("took", took), - zap.Duration("expected-duration", warnSyncDuration), - ) - } - walFsyncSec.Observe(took.Seconds()) - - return err -} - -func (w *WAL) Sync() error { - return w.sync() -} - -// ReleaseLockTo releases the locks, which has smaller index than the given index -// except the largest one among them. -// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release -// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. -func (w *WAL) ReleaseLockTo(index uint64) error { - w.mu.Lock() - defer w.mu.Unlock() - - if len(w.locks) == 0 { - return nil - } - - var smaller int - found := false - for i, l := range w.locks { - _, lockIndex, err := parseWALName(filepath.Base(l.Name())) - if err != nil { - return err - } - if lockIndex >= index { - smaller = i - 1 - found = true - break - } - } - - // if no lock index is greater than the release index, we can - // release lock up to the last one(excluding). - if !found { - smaller = len(w.locks) - 1 - } - - if smaller <= 0 { - return nil - } - - for i := 0; i < smaller; i++ { - if w.locks[i] == nil { - continue - } - w.locks[i].Close() - } - w.locks = w.locks[smaller:] - - return nil -} - -// Close closes the current WAL file and directory. -func (w *WAL) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.fp != nil { - w.fp.Close() - w.fp = nil - } - - if w.tail() != nil { - if err := w.sync(); err != nil { - return err - } - } - for _, l := range w.locks { - if l == nil { - continue - } - if err := l.Close(); err != nil { - w.lg.Error("failed to close WAL", zap.Error(err)) - } - } - - return w.dirFile.Close() -} - -func (w *WAL) saveEntry(e *raftpb.Entry) error { - // TODO: add MustMarshalTo to reduce one allocation. - b := pbutil.MustMarshal(e) - rec := &walpb.Record{Type: EntryType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - w.enti = e.Index - return nil -} - -func (w *WAL) saveState(s *raftpb.HardState) error { - if raft.IsEmptyHardState(*s) { - return nil - } - w.state = *s - b := pbutil.MustMarshal(s) - rec := &walpb.Record{Type: StateType, Data: b} - return w.encoder.encode(rec) -} - -func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { - w.mu.Lock() - defer w.mu.Unlock() - - // short cut, do not call sync - if raft.IsEmptyHardState(st) && len(ents) == 0 { - return nil - } - - mustSync := raft.MustSync(st, w.state, len(ents)) - - // TODO(xiangli): no more reference operator - for i := range ents { - if err := w.saveEntry(&ents[i]); err != nil { - return err - } - } - if err := w.saveState(&st); err != nil { - return err - } - - curOff, err := w.tail().Seek(0, io.SeekCurrent) - if err != nil { - return err - } - if curOff < SegmentSizeBytes { - if mustSync { - // gofail: var walBeforeSync struct{} - err = w.sync() - // gofail: var walAfterSync struct{} - return err - } - return nil - } - - return w.cut() -} - -func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { - if err := walpb.ValidateSnapshotForWrite(&e); err != nil { - return err - } - - b := pbutil.MustMarshal(&e) - - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{Type: SnapshotType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - // update enti only when snapshot is ahead of last index - if w.enti < e.Index { - w.enti = e.Index - } - return w.sync() -} - -func (w *WAL) saveCrc(prevCrc uint32) error { - return w.encoder.encode(&walpb.Record{Type: CrcType, Crc: prevCrc}) -} - -func (w *WAL) tail() *fileutil.LockedFile { - if len(w.locks) > 0 { - return w.locks[len(w.locks)-1] - } - return nil -} - -func (w *WAL) seq() uint64 { - t := w.tail() - if t == nil { - return 0 - } - seq, _, err := parseWALName(filepath.Base(t.Name())) - if err != nil { - w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err)) - } - return seq -} - -func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error { - stringArr := make([]string, 0) - for _, f := range rcs { - if err := f.Close(); err != nil { - lg.Warn("failed to close: ", zap.Error(err)) - stringArr = append(stringArr, err.Error()) - } - } - if len(stringArr) == 0 { - return nil - } - return errors.New(strings.Join(stringArr, ", ")) -} diff --git a/server/storage/wal/wal_bench_test.go b/server/storage/wal/wal_bench_test.go deleted file mode 100644 index c8996dc1275..00000000000 --- a/server/storage/wal/wal_bench_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/raft/v3/raftpb" -) - -func BenchmarkWrite100EntryWithoutBatch(b *testing.B) { benchmarkWriteEntry(b, 100, 0) } -func BenchmarkWrite100EntryBatch10(b *testing.B) { benchmarkWriteEntry(b, 100, 10) } -func BenchmarkWrite100EntryBatch100(b *testing.B) { benchmarkWriteEntry(b, 100, 100) } -func BenchmarkWrite100EntryBatch500(b *testing.B) { benchmarkWriteEntry(b, 100, 500) } -func BenchmarkWrite100EntryBatch1000(b *testing.B) { benchmarkWriteEntry(b, 100, 1000) } - -func BenchmarkWrite1000EntryWithoutBatch(b *testing.B) { benchmarkWriteEntry(b, 1000, 0) } -func BenchmarkWrite1000EntryBatch10(b *testing.B) { benchmarkWriteEntry(b, 1000, 10) } -func BenchmarkWrite1000EntryBatch100(b *testing.B) { benchmarkWriteEntry(b, 1000, 100) } -func BenchmarkWrite1000EntryBatch500(b *testing.B) { benchmarkWriteEntry(b, 1000, 500) } -func BenchmarkWrite1000EntryBatch1000(b *testing.B) { benchmarkWriteEntry(b, 1000, 1000) } - -func benchmarkWriteEntry(b *testing.B, size int, batch int) { - p := b.TempDir() - - w, err := Create(zaptest.NewLogger(b), p, []byte("somedata")) - if err != nil { - b.Fatalf("err = %v, want nil", err) - } - data := make([]byte, size) - for i := 0; i < size; i++ { - data[i] = byte(i) - } - e := &raftpb.Entry{Data: data} - - b.ResetTimer() - n := 0 - b.SetBytes(int64(e.Size())) - for i := 0; i < b.N; i++ { - err := w.saveEntry(e) - if err != nil { - b.Fatal(err) - } - n++ - if n > batch { - w.sync() - n = 0 - } - } -} diff --git a/server/storage/wal/wal_test.go b/server/storage/wal/wal_test.go deleted file mode 100644 index 45bae828c18..00000000000 --- a/server/storage/wal/wal_test.go +++ /dev/null @@ -1,1147 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bytes" - "errors" - "fmt" - "io" - "math" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -var ( - confState = raftpb.ConfState{ - Voters: []uint64{0x00ffca74}, - AutoLeave: false, - } -) - -func TestNew(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, []byte("somedata")) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - if g := filepath.Base(w.tail().Name()); g != walName(0, 0) { - t.Errorf("name = %+v, want %+v", g, walName(0, 0)) - } - defer w.Close() - - // file is preallocated to segment size; only read data written by wal - off, err := w.tail().Seek(0, io.SeekCurrent) - if err != nil { - t.Fatal(err) - } - gd := make([]byte, off) - f, err := os.Open(filepath.Join(p, filepath.Base(w.tail().Name()))) - if err != nil { - t.Fatal(err) - } - defer f.Close() - if _, err = io.ReadFull(f, gd); err != nil { - t.Fatalf("err = %v, want nil", err) - } - - var wb bytes.Buffer - e := newEncoder(&wb, 0, 0) - err = e.encode(&walpb.Record{Type: CrcType, Crc: 0}) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - err = e.encode(&walpb.Record{Type: MetadataType, Data: []byte("somedata")}) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - r := &walpb.Record{ - Type: SnapshotType, - Data: pbutil.MustMarshal(&walpb.Snapshot{}), - } - if err = e.encode(r); err != nil { - t.Fatalf("err = %v, want nil", err) - } - e.flush() - if !bytes.Equal(gd, wb.Bytes()) { - t.Errorf("data = %v, want %v", gd, wb.Bytes()) - } -} - -func TestCreateFailFromPollutedDir(t *testing.T) { - p := t.TempDir() - os.WriteFile(filepath.Join(p, "test.wal"), []byte("data"), os.ModeTemporary) - - _, err := Create(zaptest.NewLogger(t), p, []byte("data")) - if err != os.ErrExist { - t.Fatalf("expected %v, got %v", os.ErrExist, err) - } -} - -func TestWalCleanup(t *testing.T) { - testRoot := t.TempDir() - p, err := os.MkdirTemp(testRoot, "waltest") - if err != nil { - t.Fatal(err) - } - - logger := zaptest.NewLogger(t) - w, err := Create(logger, p, []byte("")) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - w.cleanupWAL(logger) - fnames, err := fileutil.ReadDir(testRoot) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - if len(fnames) != 1 { - t.Fatalf("expected 1 file under %v, got %v", testRoot, len(fnames)) - } - pattern := fmt.Sprintf(`%s.broken\.[\d]{8}\.[\d]{6}\.[\d]{1,6}?`, filepath.Base(p)) - match, _ := regexp.MatchString(pattern, fnames[0]) - if !match { - t.Errorf("match = false, expected true for %v with pattern %v", fnames[0], pattern) - } -} - -func TestCreateFailFromNoSpaceLeft(t *testing.T) { - p := t.TempDir() - - oldSegmentSizeBytes := SegmentSizeBytes - defer func() { - SegmentSizeBytes = oldSegmentSizeBytes - }() - SegmentSizeBytes = math.MaxInt64 - - _, err := Create(zaptest.NewLogger(t), p, []byte("data")) - if err == nil { // no space left on device - t.Fatalf("expected error 'no space left on device', got nil") - } -} - -func TestNewForInitedDir(t *testing.T) { - p := t.TempDir() - - os.Create(filepath.Join(p, walName(0, 0))) - if _, err := Create(zaptest.NewLogger(t), p, nil); err == nil || err != os.ErrExist { - t.Errorf("err = %v, want %v", err, os.ErrExist) - } -} - -func TestOpenAtIndex(t *testing.T) { - dir := t.TempDir() - - f, err := os.Create(filepath.Join(dir, walName(0, 0))) - if err != nil { - t.Fatal(err) - } - f.Close() - - w, err := Open(zaptest.NewLogger(t), dir, walpb.Snapshot{}) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - if g := filepath.Base(w.tail().Name()); g != walName(0, 0) { - t.Errorf("name = %+v, want %+v", g, walName(0, 0)) - } - if w.seq() != 0 { - t.Errorf("seq = %d, want %d", w.seq(), 0) - } - w.Close() - - wname := walName(2, 10) - f, err = os.Create(filepath.Join(dir, wname)) - if err != nil { - t.Fatal(err) - } - f.Close() - - w, err = Open(zaptest.NewLogger(t), dir, walpb.Snapshot{Index: 5}) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - if g := filepath.Base(w.tail().Name()); g != wname { - t.Errorf("name = %+v, want %+v", g, wname) - } - if w.seq() != 2 { - t.Errorf("seq = %d, want %d", w.seq(), 2) - } - w.Close() - - emptydir := t.TempDir() - if _, err = Open(zaptest.NewLogger(t), emptydir, walpb.Snapshot{}); !errors.Is(err, ErrFileNotFound) { - t.Errorf("err = %v, want %v", err, ErrFileNotFound) - } -} - -// TestVerify tests that Verify throws a non-nil error when the WAL is corrupted. -// The test creates a WAL directory and cuts out multiple WAL files. Then -// it corrupts one of the files by completely truncating it. -func TestVerify(t *testing.T) { - lg := zaptest.NewLogger(t) - walDir := t.TempDir() - - // create WAL - w, err := Create(lg, walDir, nil) - if err != nil { - t.Fatal(err) - } - defer w.Close() - - // make 5 separate files - for i := 0; i < 5; i++ { - es := []raftpb.Entry{{Index: uint64(i), Data: []byte(fmt.Sprintf("waldata%d", i+1))}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if err = w.cut(); err != nil { - t.Fatal(err) - } - } - - hs := raftpb.HardState{Term: 1, Vote: 3, Commit: 5} - assert.NoError(t, w.Save(hs, nil)) - - // to verify the WAL is not corrupted at this point - hardstate, err := Verify(lg, walDir, walpb.Snapshot{}) - if err != nil { - t.Errorf("expected a nil error, got %v", err) - } - assert.Equal(t, hs, *hardstate) - - walFiles, err := os.ReadDir(walDir) - if err != nil { - t.Fatal(err) - } - - // corrupt the WAL by truncating one of the WAL files completely - err = os.Truncate(path.Join(walDir, walFiles[2].Name()), 0) - if err != nil { - t.Fatal(err) - } - - _, err = Verify(lg, walDir, walpb.Snapshot{}) - if err == nil { - t.Error("expected a non-nil error, got nil") - } -} - -// TestCut tests cut -// TODO: split it into smaller tests for better readability -func TestCut(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - defer w.Close() - - state := raftpb.HardState{Term: 1} - if err = w.Save(state, nil); err != nil { - t.Fatal(err) - } - if err = w.cut(); err != nil { - t.Fatal(err) - } - wname := walName(1, 1) - if g := filepath.Base(w.tail().Name()); g != wname { - t.Errorf("name = %s, want %s", g, wname) - } - - es := []raftpb.Entry{{Index: 1, Term: 1, Data: []byte{1}}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if err = w.cut(); err != nil { - t.Fatal(err) - } - snap := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState} - if err = w.SaveSnapshot(snap); err != nil { - t.Fatal(err) - } - wname = walName(2, 2) - if g := filepath.Base(w.tail().Name()); g != wname { - t.Errorf("name = %s, want %s", g, wname) - } - - // check the state in the last WAL - // We do check before closing the WAL to ensure that Cut syncs the data - // into the disk. - f, err := os.Open(filepath.Join(p, wname)) - if err != nil { - t.Fatal(err) - } - defer f.Close() - nw := &WAL{ - decoder: NewDecoder(fileutil.NewFileReader(f)), - start: snap, - } - _, gst, _, err := nw.ReadAll() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gst, state) { - t.Errorf("state = %+v, want %+v", gst, state) - } -} - -func TestSaveWithCut(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, []byte("metadata")) - if err != nil { - t.Fatal(err) - } - - state := raftpb.HardState{Term: 1} - if err = w.Save(state, nil); err != nil { - t.Fatal(err) - } - bigData := make([]byte, 500) - strdata := "Hello World!!" - copy(bigData, strdata) - // set a lower value for SegmentSizeBytes, else the test takes too long to complete - restoreLater := SegmentSizeBytes - const EntrySize int = 500 - SegmentSizeBytes = 2 * 1024 - defer func() { SegmentSizeBytes = restoreLater }() - index := uint64(0) - for totalSize := 0; totalSize < int(SegmentSizeBytes); totalSize += EntrySize { - ents := []raftpb.Entry{{Index: index, Term: 1, Data: bigData}} - if err = w.Save(state, ents); err != nil { - t.Fatal(err) - } - index++ - } - - w.Close() - - neww, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - defer neww.Close() - wname := walName(1, index) - if g := filepath.Base(neww.tail().Name()); g != wname { - t.Errorf("name = %s, want %s", g, wname) - } - - _, newhardstate, entries, err := neww.ReadAll() - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(newhardstate, state) { - t.Errorf("Hard State = %+v, want %+v", newhardstate, state) - } - if len(entries) != int(SegmentSizeBytes/int64(EntrySize)) { - t.Errorf("Number of entries = %d, expected = %d", len(entries), int(SegmentSizeBytes/int64(EntrySize))) - } - for _, oneent := range entries { - if !bytes.Equal(oneent.Data, bigData) { - t.Errorf("the saved data does not match at Index %d : found: %s , want :%s", oneent.Index, oneent.Data, bigData) - } - } -} - -func TestRecover(t *testing.T) { - cases := []struct { - name string - size int - }{ - { - name: "10MB", - size: 10 * 1024 * 1024, - }, - { - name: "20MB", - size: 20 * 1024 * 1024, - }, - { - name: "40MB", - size: 40 * 1024 * 1024, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, []byte("metadata")) - if err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { - t.Fatal(err) - } - - data := make([]byte, tc.size) - n, err := rand.Read(data) - assert.Equal(t, tc.size, n) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - ents := []raftpb.Entry{{Index: 1, Term: 1, Data: data}, {Index: 2, Term: 2, Data: data}} - if err = w.Save(raftpb.HardState{}, ents); err != nil { - t.Fatal(err) - } - sts := []raftpb.HardState{{Term: 1, Vote: 1, Commit: 1}, {Term: 2, Vote: 2, Commit: 2}} - for _, s := range sts { - if err = w.Save(s, nil); err != nil { - t.Fatal(err) - } - } - w.Close() - - if w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil { - t.Fatal(err) - } - metadata, state, entries, err := w.ReadAll() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(metadata, []byte("metadata")) { - t.Errorf("metadata = %s, want %s", metadata, "metadata") - } - if !reflect.DeepEqual(entries, ents) { - t.Errorf("ents = %+v, want %+v", entries, ents) - } - // only the latest state is recorded - s := sts[len(sts)-1] - if !reflect.DeepEqual(state, s) { - t.Errorf("state = %+v, want %+v", state, s) - } - w.Close() - }) - } -} - -func TestSearchIndex(t *testing.T) { - tests := []struct { - names []string - index uint64 - widx int - wok bool - }{ - { - []string{ - "0000000000000000-0000000000000000.wal", - "0000000000000001-0000000000001000.wal", - "0000000000000002-0000000000002000.wal", - }, - 0x1000, 1, true, - }, - { - []string{ - "0000000000000001-0000000000004000.wal", - "0000000000000002-0000000000003000.wal", - "0000000000000003-0000000000005000.wal", - }, - 0x4000, 1, true, - }, - { - []string{ - "0000000000000001-0000000000002000.wal", - "0000000000000002-0000000000003000.wal", - "0000000000000003-0000000000005000.wal", - }, - 0x1000, -1, false, - }, - } - for i, tt := range tests { - idx, ok := searchIndex(zaptest.NewLogger(t), tt.names, tt.index) - if idx != tt.widx { - t.Errorf("#%d: idx = %d, want %d", i, idx, tt.widx) - } - if ok != tt.wok { - t.Errorf("#%d: ok = %v, want %v", i, ok, tt.wok) - } - } -} - -func TestScanWalName(t *testing.T) { - tests := []struct { - str string - wseq, windex uint64 - wok bool - }{ - {"0000000000000000-0000000000000000.wal", 0, 0, true}, - {"0000000000000000.wal", 0, 0, false}, - {"0000000000000000-0000000000000000.snap", 0, 0, false}, - } - for i, tt := range tests { - s, index, err := parseWALName(tt.str) - if g := err == nil; g != tt.wok { - t.Errorf("#%d: ok = %v, want %v", i, g, tt.wok) - } - if s != tt.wseq { - t.Errorf("#%d: seq = %d, want %d", i, s, tt.wseq) - } - if index != tt.windex { - t.Errorf("#%d: index = %d, want %d", i, index, tt.windex) - } - } -} - -func TestRecoverAfterCut(t *testing.T) { - p := t.TempDir() - - md, err := Create(zaptest.NewLogger(t), p, []byte("metadata")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 10; i++ { - if err = md.SaveSnapshot(walpb.Snapshot{Index: uint64(i), Term: 1, ConfState: &confState}); err != nil { - t.Fatal(err) - } - es := []raftpb.Entry{{Index: uint64(i)}} - if err = md.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if err = md.cut(); err != nil { - t.Fatal(err) - } - } - md.Close() - - if err := os.Remove(filepath.Join(p, walName(4, 4))); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - w, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{Index: uint64(i), Term: 1}) - if err != nil { - if i <= 4 { - if !strings.Contains(err.Error(), "do not increase continuously") { - t.Errorf("#%d: err = %v isn't expected, want: '* do not increase continuously'", i, err) - } - } else { - t.Errorf("#%d: err = %v, want nil", i, err) - } - continue - } - metadata, _, entries, err := w.ReadAll() - if err != nil { - t.Errorf("#%d: err = %v, want nil", i, err) - continue - } - if !bytes.Equal(metadata, []byte("metadata")) { - t.Errorf("#%d: metadata = %s, want %s", i, metadata, "metadata") - } - for j, e := range entries { - if e.Index != uint64(j+i+1) { - t.Errorf("#%d: ents[%d].Index = %+v, want %+v", i, j, e.Index, j+i+1) - } - } - w.Close() - } -} - -func TestOpenAtUncommittedIndex(t *testing.T) { - p := t.TempDir() - - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { - t.Fatal(err) - } - if err = w.Save(raftpb.HardState{}, []raftpb.Entry{{Index: 0}}); err != nil { - t.Fatal(err) - } - w.Close() - - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - // commit up to index 0, try to read index 1 - if _, _, _, err = w.ReadAll(); err != nil { - t.Errorf("err = %v, want nil", err) - } - w.Close() -} - -// TestOpenForRead tests that OpenForRead can load all files. -// The tests creates WAL directory, and cut out multiple WAL files. Then -// it releases the lock of part of data, and excepts that OpenForRead -// can read out all files even if some are locked for write. -func TestOpenForRead(t *testing.T) { - p := t.TempDir() - // create WAL - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - defer w.Close() - // make 10 separate files - for i := 0; i < 10; i++ { - es := []raftpb.Entry{{Index: uint64(i)}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if err = w.cut(); err != nil { - t.Fatal(err) - } - } - // release the lock to 5 - unlockIndex := uint64(5) - w.ReleaseLockTo(unlockIndex) - - // All are available for read - w2, err := OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - defer w2.Close() - _, _, ents, err := w2.ReadAll() - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - if g := ents[len(ents)-1].Index; g != 9 { - t.Errorf("last index read = %d, want %d", g, 9) - } -} - -func TestOpenWithMaxIndex(t *testing.T) { - p := t.TempDir() - // create WAL - w1, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - defer func() { - if w1 != nil { - w1.Close() - } - }() - - es := []raftpb.Entry{{Index: uint64(math.MaxInt64)}} - if err = w1.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - w1.Close() - w1 = nil - - w2, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - defer w2.Close() - - _, _, _, err = w2.ReadAll() - if err != ErrSliceOutOfRange { - t.Fatalf("err = %v, want ErrSliceOutOfRange", err) - } -} - -func TestSaveEmpty(t *testing.T) { - var buf bytes.Buffer - var est raftpb.HardState - w := WAL{ - encoder: newEncoder(&buf, 0, 0), - } - if err := w.saveState(&est); err != nil { - t.Errorf("err = %v, want nil", err) - } - if len(buf.Bytes()) != 0 { - t.Errorf("buf.Bytes = %d, want 0", len(buf.Bytes())) - } -} - -func TestReleaseLockTo(t *testing.T) { - p := t.TempDir() - // create WAL - w, err := Create(zaptest.NewLogger(t), p, nil) - defer func() { - if err = w.Close(); err != nil { - t.Fatal(err) - } - }() - if err != nil { - t.Fatal(err) - } - - // release nothing if no files - err = w.ReleaseLockTo(10) - if err != nil { - t.Errorf("err = %v, want nil", err) - } - - // make 10 separate files - for i := 0; i < 10; i++ { - es := []raftpb.Entry{{Index: uint64(i)}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if err = w.cut(); err != nil { - t.Fatal(err) - } - } - // release the lock to 5 - unlockIndex := uint64(5) - w.ReleaseLockTo(unlockIndex) - - // expected remaining are 4,5,6,7,8,9,10 - if len(w.locks) != 7 { - t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 7) - } - for i, l := range w.locks { - var lockIndex uint64 - _, lockIndex, err = parseWALName(filepath.Base(l.Name())) - if err != nil { - t.Fatal(err) - } - - if lockIndex != uint64(i+4) { - t.Errorf("#%d: lockindex = %d, want %d", i, lockIndex, uint64(i+4)) - } - } - - // release the lock to 15 - unlockIndex = uint64(15) - w.ReleaseLockTo(unlockIndex) - - // expected remaining is 10 - if len(w.locks) != 1 { - t.Errorf("len(w.locks) = %d, want %d", len(w.locks), 1) - } - _, lockIndex, err := parseWALName(filepath.Base(w.locks[0].Name())) - if err != nil { - t.Fatal(err) - } - - if lockIndex != uint64(10) { - t.Errorf("lockindex = %d, want %d", lockIndex, 10) - } -} - -// TestTailWriteNoSlackSpace ensures that tail writes append if there's no preallocated space. -func TestTailWriteNoSlackSpace(t *testing.T) { - p := t.TempDir() - - // create initial WAL - w, err := Create(zaptest.NewLogger(t), p, []byte("metadata")) - if err != nil { - t.Fatal(err) - } - // write some entries - for i := 1; i <= 5; i++ { - es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}} - if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil { - t.Fatal(err) - } - } - // get rid of slack space by truncating file - off, serr := w.tail().Seek(0, io.SeekCurrent) - if serr != nil { - t.Fatal(serr) - } - if terr := w.tail().Truncate(off); terr != nil { - t.Fatal(terr) - } - w.Close() - - // open, write more - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - _, _, ents, rerr := w.ReadAll() - if rerr != nil { - t.Fatal(rerr) - } - if len(ents) != 5 { - t.Fatalf("got entries %+v, expected 5 entries", ents) - } - // write more entries - for i := 6; i <= 10; i++ { - es := []raftpb.Entry{{Index: uint64(i), Term: 1, Data: []byte{byte(i)}}} - if err = w.Save(raftpb.HardState{Term: 1}, es); err != nil { - t.Fatal(err) - } - } - w.Close() - - // confirm all writes - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - _, _, ents, rerr = w.ReadAll() - if rerr != nil { - t.Fatal(rerr) - } - if len(ents) != 10 { - t.Fatalf("got entries %+v, expected 10 entries", ents) - } - w.Close() -} - -// TestRestartCreateWal ensures that an interrupted WAL initialization is clobbered on restart -func TestRestartCreateWal(t *testing.T) { - p := t.TempDir() - var err error - - // make temporary directory so it looks like initialization is interrupted - tmpdir := filepath.Clean(p) + ".tmp" - if err = os.Mkdir(tmpdir, fileutil.PrivateDirMode); err != nil { - t.Fatal(err) - } - if _, err = os.OpenFile(filepath.Join(tmpdir, "test"), os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode); err != nil { - t.Fatal(err) - } - - w, werr := Create(zaptest.NewLogger(t), p, []byte("abc")) - if werr != nil { - t.Fatal(werr) - } - w.Close() - if Exist(tmpdir) { - t.Fatalf("got %q exists, expected it to not exist", tmpdir) - } - - if w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil { - t.Fatal(err) - } - defer w.Close() - - if meta, _, _, rerr := w.ReadAll(); rerr != nil || string(meta) != "abc" { - t.Fatalf("got error %v and meta %q, expected nil and %q", rerr, meta, "abc") - } -} - -// TestOpenOnTornWrite ensures that entries past the torn write are truncated. -func TestOpenOnTornWrite(t *testing.T) { - maxEntries := 40 - clobberIdx := 20 - overwriteEntries := 5 - - p := t.TempDir() - w, err := Create(zaptest.NewLogger(t), p, nil) - defer func() { - if err = w.Close(); err != nil && err != os.ErrInvalid { - t.Fatal(err) - } - }() - if err != nil { - t.Fatal(err) - } - - // get offset of end of each saved entry - offsets := make([]int64, maxEntries) - for i := range offsets { - es := []raftpb.Entry{{Index: uint64(i)}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - if offsets[i], err = w.tail().Seek(0, io.SeekCurrent); err != nil { - t.Fatal(err) - } - } - - fn := filepath.Join(p, filepath.Base(w.tail().Name())) - w.Close() - - // clobber some entry with 0's to simulate a torn write - f, ferr := os.OpenFile(fn, os.O_WRONLY, fileutil.PrivateFileMode) - if ferr != nil { - t.Fatal(ferr) - } - defer f.Close() - _, err = f.Seek(offsets[clobberIdx], io.SeekStart) - if err != nil { - t.Fatal(err) - } - zeros := make([]byte, offsets[clobberIdx+1]-offsets[clobberIdx]) - _, err = f.Write(zeros) - if err != nil { - t.Fatal(err) - } - f.Close() - - w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - // seek up to clobbered entry - _, _, _, err = w.ReadAll() - if err != nil { - t.Fatal(err) - } - - // write a few entries past the clobbered entry - for i := 0; i < overwriteEntries; i++ { - // Index is different from old, truncated entries - es := []raftpb.Entry{{Index: uint64(i + clobberIdx), Data: []byte("new")}} - if err = w.Save(raftpb.HardState{}, es); err != nil { - t.Fatal(err) - } - } - w.Close() - - // read back the entries, confirm number of entries matches expectation - w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{}) - if err != nil { - t.Fatal(err) - } - - _, _, ents, rerr := w.ReadAll() - if rerr != nil { - // CRC error? the old entries were likely never truncated away - t.Fatal(rerr) - } - wEntries := (clobberIdx - 1) + overwriteEntries - if len(ents) != wEntries { - t.Fatalf("expected len(ents) = %d, got %d", wEntries, len(ents)) - } -} - -func TestRenameFail(t *testing.T) { - p := t.TempDir() - - oldSegmentSizeBytes := SegmentSizeBytes - defer func() { - SegmentSizeBytes = oldSegmentSizeBytes - }() - SegmentSizeBytes = math.MaxInt64 - - tp := t.TempDir() - os.RemoveAll(tp) - - w := &WAL{ - lg: zaptest.NewLogger(t), - dir: p, - } - w2, werr := w.renameWAL(tp) - if w2 != nil || werr == nil { // os.Rename should fail from 'no such file or directory' - t.Fatalf("expected error, got %v", werr) - } -} - -// TestReadAllFail ensure ReadAll error if used without opening the WAL -func TestReadAllFail(t *testing.T) { - dir := t.TempDir() - - // create initial WAL - f, err := Create(zaptest.NewLogger(t), dir, []byte("metadata")) - if err != nil { - t.Fatal(err) - } - f.Close() - // try to read without opening the WAL - _, _, _, err = f.ReadAll() - if err == nil || err != ErrDecoderNotFound { - t.Fatalf("err = %v, want ErrDecoderNotFound", err) - } -} - -// TestValidSnapshotEntries ensures ValidSnapshotEntries returns all valid wal snapshot entries, accounting -// for hardstate -func TestValidSnapshotEntries(t *testing.T) { - p := t.TempDir() - snap0 := walpb.Snapshot{} - snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState} - state1 := raftpb.HardState{Commit: 1, Term: 1} - snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState} - snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState} - state2 := raftpb.HardState{Commit: 3, Term: 2} - snap4 := walpb.Snapshot{Index: 4, Term: 2, ConfState: &confState} // will be orphaned since the last committed entry will be snap3 - func() { - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - defer w.Close() - - // snap0 is implicitly created at index 0, term 0 - if err = w.SaveSnapshot(snap1); err != nil { - t.Fatal(err) - } - if err = w.Save(state1, nil); err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(snap2); err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(snap3); err != nil { - t.Fatal(err) - } - if err = w.Save(state2, nil); err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(snap4); err != nil { - t.Fatal(err) - } - }() - walSnaps, err := ValidSnapshotEntries(zaptest.NewLogger(t), p) - if err != nil { - t.Fatal(err) - } - expected := []walpb.Snapshot{snap0, snap1, snap2, snap3} - if !reflect.DeepEqual(walSnaps, expected) { - t.Errorf("expected walSnaps %+v, got %+v", expected, walSnaps) - } -} - -// TestValidSnapshotEntriesAfterPurgeWal ensure that there are many wal files, and after cleaning the first wal file, -// it can work well. -func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) { - oldSegmentSizeBytes := SegmentSizeBytes - SegmentSizeBytes = 64 - defer func() { - SegmentSizeBytes = oldSegmentSizeBytes - }() - p := t.TempDir() - snap0 := walpb.Snapshot{} - snap1 := walpb.Snapshot{Index: 1, Term: 1, ConfState: &confState} - state1 := raftpb.HardState{Commit: 1, Term: 1} - snap2 := walpb.Snapshot{Index: 2, Term: 1, ConfState: &confState} - snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState} - state2 := raftpb.HardState{Commit: 3, Term: 2} - func() { - w, err := Create(zaptest.NewLogger(t), p, nil) - if err != nil { - t.Fatal(err) - } - defer w.Close() - - // snap0 is implicitly created at index 0, term 0 - if err = w.SaveSnapshot(snap1); err != nil { - t.Fatal(err) - } - if err = w.Save(state1, nil); err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(snap2); err != nil { - t.Fatal(err) - } - if err = w.SaveSnapshot(snap3); err != nil { - t.Fatal(err) - } - for i := 0; i < 128; i++ { - if err = w.Save(state2, nil); err != nil { - t.Fatal(err) - } - } - - }() - files, _, err := selectWALFiles(nil, p, snap0) - if err != nil { - t.Fatal(err) - } - os.Remove(p + "/" + files[0]) - _, err = ValidSnapshotEntries(zaptest.NewLogger(t), p) - if err != nil { - t.Fatal(err) - } -} - -func TestLastRecordLengthExceedFileEnd(t *testing.T) { - /* The data below was generated by code something like below. The length - * of the last record was intentionally changed to 1000 in order to make - * sure it exceeds the end of the file. - * - * for i := 0; i < 3; i++ { - * es := []raftpb.Entry{{Index: uint64(i + 1), Data: []byte(fmt.Sprintf("waldata%d", i+1))}} - * if err = w.Save(raftpb.HardState{}, es); err != nil { - * t.Fatal(err) - * } - * } - * ...... - * var sb strings.Builder - * for _, ch := range buf { - * sb.WriteString(fmt.Sprintf("\\x%02x", ch)) - * } - */ - // Generate WAL file - t.Log("Generate a WAL file with the last record's length modified.") - data := []byte("\x04\x00\x00\x00\x00\x00\x00\x84\x08\x04\x10\x00\x00" + - "\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x84\x08\x01\x10\x00\x00" + - "\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x82\x08\x05\x10\xa0\xb3" + - "\x9b\x8f\x08\x1a\x04\x08\x00\x10\x00\x00\x00\x1a\x00\x00\x00\x00" + - "\x00\x00\x86\x08\x02\x10\xba\x8b\xdc\x85\x0f\x1a\x10\x08\x00\x10" + - "\x00\x18\x01\x22\x08\x77\x61\x6c\x64\x61\x74\x61\x31\x00\x00\x00" + - "\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x00\x86\x08\x02\x10\xa1\xe8" + - "\xff\x9c\x02\x1a\x10\x08\x00\x10\x00\x18\x02\x22\x08\x77\x61\x6c" + - "\x64\x61\x74\x61\x32\x00\x00\x00\x00\x00\x00\xe8\x03\x00\x00\x00" + - "\x00\x00\x86\x08\x02\x10\xa1\x9c\xa1\xaa\x04\x1a\x10\x08\x00\x10" + - "\x00\x18\x03\x22\x08\x77\x61\x6c\x64\x61\x74\x61\x33\x00\x00\x00" + - "\x00\x00\x00") - - buf := bytes.NewBuffer(data) - f, err := createFileWithData(t, buf) - fileName := f.Name() - require.NoError(t, err) - t.Logf("fileName: %v", fileName) - - // Verify low-level decoder directly - t.Log("Verify all records can be parsed correctly.") - rec := &walpb.Record{} - decoder := NewDecoder(fileutil.NewFileReader(f)) - for { - if err = decoder.Decode(rec); err != nil { - require.ErrorIs(t, err, io.ErrUnexpectedEOF) - break - } - if rec.Type == EntryType { - e := MustUnmarshalEntry(rec.Data) - t.Logf("Validating normal entry: %v", e) - recData := fmt.Sprintf("waldata%d", e.Index) - require.Equal(t, raftpb.EntryNormal, e.Type) - require.Equal(t, recData, string(e.Data)) - } - rec = &walpb.Record{} - } - require.NoError(t, f.Close()) - - // Verify w.ReadAll() returns io.ErrUnexpectedEOF in the error chain. - t.Log("Verify the w.ReadAll returns io.ErrUnexpectedEOF in the error chain") - newFileName := filepath.Join(filepath.Dir(fileName), "0000000000000000-0000000000000000.wal") - require.NoError(t, os.Rename(fileName, newFileName)) - - w, err := Open(zaptest.NewLogger(t), filepath.Dir(fileName), walpb.Snapshot{ - Index: 0, - Term: 0, - }) - require.NoError(t, err) - defer w.Close() - - _, _, _, err = w.ReadAll() - // Note: The wal file will be repaired automatically in production - // environment, but only once. - require.ErrorIs(t, err, io.ErrUnexpectedEOF) -} diff --git a/server/storage/wal/walpb/record.go b/server/storage/wal/walpb/record.go deleted file mode 100644 index 693deab113d..00000000000 --- a/server/storage/wal/walpb/record.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walpb - -import ( - "errors" - "fmt" -) - -var ( - ErrCRCMismatch = errors.New("walpb: crc mismatch") -) - -func (rec *Record) Validate(crc uint32) error { - if rec.Crc == crc { - return nil - } - return fmt.Errorf("%w: expected: %x computed: %x", ErrCRCMismatch, rec.Crc, crc) -} - -// ValidateSnapshotForWrite ensures the Snapshot the newly written snapshot is valid. -// -// There might exist log-entries written by old etcd versions that does not conform -// to the requirements. -func ValidateSnapshotForWrite(e *Snapshot) error { - // Since etcd>=3.5.0 - if e.ConfState == nil && e.Index > 0 { - return errors.New("Saved (not-initial) snapshot is missing ConfState: " + e.String()) - } - return nil -} diff --git a/server/storage/wal/walpb/record.pb.go b/server/storage/wal/walpb/record.pb.go deleted file mode 100644 index d0eba734d42..00000000000 --- a/server/storage/wal/walpb/record.pb.go +++ /dev/null @@ -1,609 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: record.proto - -package walpb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - raftpb "go.etcd.io/raft/v3/raftpb" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Record struct { - Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` - Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Record) Reset() { *m = Record{} } -func (m *Record) String() string { return proto.CompactTextString(m) } -func (*Record) ProtoMessage() {} -func (*Record) Descriptor() ([]byte, []int) { - return fileDescriptor_bf94fd919e302a1d, []int{0} -} -func (m *Record) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Record.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Record) XXX_Merge(src proto.Message) { - xxx_messageInfo_Record.Merge(m, src) -} -func (m *Record) XXX_Size() int { - return m.Size() -} -func (m *Record) XXX_DiscardUnknown() { - xxx_messageInfo_Record.DiscardUnknown(m) -} - -var xxx_messageInfo_Record proto.InternalMessageInfo - -// Keep in sync with raftpb.SnapshotMetadata. -type Snapshot struct { - Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` - // Field populated since >=etcd-3.5.0. - ConfState *raftpb.ConfState `protobuf:"bytes,3,opt,name=conf_state,json=confState" json:"conf_state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_bf94fd919e302a1d, []int{1} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Record)(nil), "walpb.Record") - proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") -} - -func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) } - -var fileDescriptor_bf94fd919e302a1d = []byte{ - // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0x41, 0x4e, 0xc3, 0x30, - 0x10, 0x45, 0x63, 0xe2, 0x22, 0x18, 0xca, 0xa2, 0x56, 0x85, 0xa2, 0x2c, 0x4c, 0xd4, 0x55, 0x56, - 0x29, 0xe2, 0x08, 0x65, 0xcf, 0x22, 0x3d, 0x00, 0x72, 0x1d, 0xa7, 0x20, 0xd1, 0x8c, 0x35, 0xb5, - 0x04, 0xdc, 0x84, 0x23, 0x65, 0xc9, 0x09, 0x10, 0x84, 0x8b, 0xa0, 0x8c, 0x03, 0xab, 0xf9, 0x7a, - 0x5f, 0xff, 0xff, 0x81, 0x39, 0x39, 0x8b, 0xd4, 0x54, 0x9e, 0x30, 0xa0, 0x9a, 0xbd, 0x98, 0x67, - 0xbf, 0xcb, 0x97, 0x7b, 0xdc, 0x23, 0x93, 0xf5, 0xa8, 0xa2, 0x99, 0x2f, 0xc8, 0xb4, 0xc1, 0xef, - 0xd6, 0xe3, 0x89, 0x68, 0x75, 0x0f, 0xa7, 0x35, 0xe7, 0x55, 0x06, 0x32, 0xbc, 0x79, 0x97, 0x89, - 0x42, 0x94, 0xe9, 0x46, 0xf6, 0x9f, 0xd7, 0x49, 0xcd, 0x44, 0x5d, 0x41, 0x6a, 0xc9, 0x66, 0x27, - 0x85, 0x28, 0x2f, 0x27, 0x63, 0x04, 0x4a, 0x81, 0x6c, 0x4c, 0x30, 0x59, 0x5a, 0x88, 0x72, 0x5e, - 0xb3, 0x5e, 0x11, 0x9c, 0x6d, 0x3b, 0xe3, 0x8f, 0x8f, 0x18, 0x54, 0x0e, 0xb3, 0xa7, 0xae, 0x71, - 0xaf, 0x5c, 0x29, 0xa7, 0x64, 0x44, 0xbc, 0xe6, 0xe8, 0xc0, 0xa5, 0xf2, 0x7f, 0xcd, 0xd1, 0x41, - 0xdd, 0x00, 0x58, 0xec, 0xda, 0x87, 0x63, 0x30, 0xc1, 0x71, 0xf7, 0xc5, 0xed, 0xa2, 0x8a, 0x9f, - 0x57, 0x77, 0xd8, 0xb5, 0xdb, 0xd1, 0xa8, 0xcf, 0xed, 0x9f, 0xdc, 0x2c, 0xfb, 0x6f, 0x9d, 0xf4, - 0x83, 0x16, 0x1f, 0x83, 0x16, 0x5f, 0x83, 0x16, 0xef, 0x3f, 0x3a, 0xf9, 0x0d, 0x00, 0x00, 0xff, - 0xff, 0x60, 0x0f, 0x3c, 0x36, 0x18, 0x01, 0x00, 0x00, -} - -func (m *Record) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Record) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a - } - i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRecord(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ConfState != nil { - { - size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRecord(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i = encodeVarintRecord(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRecord(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func encodeVarintRecord(dAtA []byte, offset int, v uint64) int { - offset -= sovRecord(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Record) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRecord(uint64(m.Type)) - n += 1 + sovRecord(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRecord(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovRecord(uint64(m.Index)) - n += 1 + sovRecord(uint64(m.Term)) - if m.ConfState != nil { - l = m.ConfState.Size() - n += 1 + l + sovRecord(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRecord(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRecord(x uint64) (n int) { - return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Record) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Record: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Crc |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRecord - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRecord - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRecord - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRecord - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfState == nil { - m.ConfState = &raftpb.ConfState{} - } - if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRecord(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRecord - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRecord - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRecord - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRecord = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/storage/wal/walpb/record_test.go b/server/storage/wal/walpb/record_test.go deleted file mode 100644 index cdacb3d03f2..00000000000 --- a/server/storage/wal/walpb/record_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walpb - -import ( - "testing" - - "github.com/golang/protobuf/descriptor" - - "go.etcd.io/raft/v3/raftpb" -) - -func TestSnapshotMetadataCompatibility(t *testing.T) { - _, snapshotMetadataMd := descriptor.ForMessage(&raftpb.SnapshotMetadata{}) - _, snapshotMd := descriptor.ForMessage(&Snapshot{}) - if len(snapshotMetadataMd.GetField()) != len(snapshotMd.GetField()) { - t.Errorf("Different number of fields in raftpb.SnapshotMetadata vs. walpb.Snapshot. " + - "They are supposed to be in sync.") - } -} - -func TestValidateSnapshot(t *testing.T) { - tests := []struct { - name string - snap *Snapshot - wantErr bool - }{ - {name: "empty", snap: &Snapshot{}, wantErr: false}, - {name: "invalid", snap: &Snapshot{Index: 5, Term: 3}, wantErr: true}, - {name: "valid", snap: &Snapshot{Index: 5, Term: 3, ConfState: &raftpb.ConfState{Voters: []uint64{0x00cad1}}}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := ValidateSnapshotForWrite(tt.snap); (err != nil) != tt.wantErr { - t.Errorf("ValidateSnapshotForWrite() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/server/verify/verify.go b/server/verify/verify.go deleted file mode 100644 index 9783be10927..00000000000 --- a/server/verify/verify.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package verify - -import ( - "fmt" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/verify" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/datadir" - "go.etcd.io/etcd/server/v3/storage/schema" - wal2 "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -const ENV_VERIFY_VALUE_STORAGE_WAL verify.VerificationType = "storage_wal" - -type Config struct { - // DataDir is a root directory where the data being verified are stored. - DataDir string - - // ExactIndex requires consistent_index in backend exactly match the last committed WAL entry. - // Usually backend's consistent_index needs to be <= WAL.commit, but for backups the match - // is expected to be exact. - ExactIndex bool - - Logger *zap.Logger -} - -// Verify performs consistency checks of given etcd data-directory. -// The errors are reported as the returned error, but for some situations -// the function can also panic. -// The function is expected to work on not-in-use data model, i.e. -// no file-locks should be taken. Verify does not modified the data. -func Verify(cfg Config) error { - lg := cfg.Logger - if lg == nil { - lg = zap.NewNop() - } - - var err error - lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir)) - defer func() { - if err != nil { - lg.Error("verification of persisted state failed", - zap.String("data-dir", cfg.DataDir), - zap.Error(err)) - } else if r := recover(); r != nil { - lg.Error("verification of persisted state failed", - zap.String("data-dir", cfg.DataDir)) - panic(r) - } else { - lg.Info("verification of persisted state successful", zap.String("data-dir", cfg.DataDir)) - } - }() - - be := backend.NewDefaultBackend(lg, datadir.ToBackendFileName(cfg.DataDir)) - defer be.Close() - - snapshot, hardstate, err := validateWal(cfg) - if err != nil { - return err - } - - // TODO: Perform validation of consistency of membership between - // backend/members & WAL confstate (and maybe storev2 if still exists). - - return validateConsistentIndex(cfg, hardstate, snapshot, be) -} - -// VerifyIfEnabled performs verification according to ETCD_VERIFY env settings. -// See Verify for more information. -func VerifyIfEnabled(cfg Config) error { - if verify.IsVerificationEnabled(ENV_VERIFY_VALUE_STORAGE_WAL) { - return Verify(cfg) - } - return nil -} - -// MustVerifyIfEnabled performs verification according to ETCD_VERIFY env settings -// and exits in case of found problems. -// See Verify for more information. -func MustVerifyIfEnabled(cfg Config) { - if err := VerifyIfEnabled(cfg); err != nil { - cfg.Logger.Fatal("Verification failed", - zap.String("data-dir", cfg.DataDir), - zap.Error(err)) - } -} - -func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error { - index, term := schema.ReadConsistentIndex(be.ReadTx()) - if cfg.ExactIndex && index != hardstate.Commit { - return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit) - } - if cfg.ExactIndex && term != hardstate.Term { - return fmt.Errorf("backend.Term (%v) expected == WAL.HardState.term, (%v)", term, hardstate.Term) - } - if index > hardstate.Commit { - return fmt.Errorf("backend.ConsistentIndex (%v) must be <= WAL.HardState.commit (%v)", index, hardstate.Commit) - } - if term > hardstate.Term { - return fmt.Errorf("backend.Term (%v) must be <= WAL.HardState.term, (%v)", term, hardstate.Term) - } - - if index < snapshot.Index { - return fmt.Errorf("backend.ConsistentIndex (%v) must be >= last snapshot index (%v)", index, snapshot.Index) - } - - cfg.Logger.Info("verification: consistentIndex OK", zap.Uint64("backend-consistent-index", index), zap.Uint64("hardstate-commit", hardstate.Commit)) - return nil -} - -func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) { - walDir := datadir.ToWalDir(cfg.DataDir) - - walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir) - if err != nil { - return nil, nil, err - } - - snapshot := walSnaps[len(walSnaps)-1] - hardstate, err := wal2.Verify(cfg.Logger, walDir, snapshot) - if err != nil { - return nil, nil, err - } - return &snapshot, hardstate, nil -} diff --git a/tests/Dockerfile b/tests/Dockerfile deleted file mode 100644 index 091398fe356..00000000000 --- a/tests/Dockerfile +++ /dev/null @@ -1,50 +0,0 @@ -FROM ubuntu:21.10 - -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN apt-get -y update \ - && apt-get -y install \ - build-essential \ - gcc \ - apt-utils \ - pkg-config \ - software-properties-common \ - apt-transport-https \ - libssl-dev \ - sudo \ - bash \ - curl \ - wget \ - tar \ - git \ - netcat \ - libaspell-dev \ - libhunspell-dev \ - hunspell-en-us \ - aspell-en \ - shellcheck \ - && apt-get -y update \ - && apt-get -y upgrade \ - && apt-get -y autoremove \ - && apt-get -y autoclean - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} -ENV GO_VERSION REPLACE_ME_GO_VERSION -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang -RUN rm -rf ${GOROOT} \ - && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ - && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ - && go version - -RUN mkdir -p ${GOPATH}/src/go.etcd.io/etcd -WORKDIR ${GOPATH}/src/go.etcd.io/etcd - -ADD ./scripts/install-marker.sh /tmp/install-marker.sh - -RUN /tmp/install-marker.sh amd64 \ - && rm -f /tmp/install-marker.sh \ - && curl -s https://codecov.io/bash >/codecov \ - && chmod 700 /codecov diff --git a/tests/LICENSE b/tests/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/tests/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tests/common/alarm_test.go b/tests/common/alarm_test.go deleted file mode 100644 index 8afaea2bd76..00000000000 --- a/tests/common/alarm_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "os" - "strings" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestAlarm(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, - config.WithClusterSize(1), - config.WithQuotaBackendBytes(int64(13*os.Getpagesize())), - ) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - // test small put still works - smallbuf := strings.Repeat("a", 64) - if err := cc.Put(ctx, "1st_test", smallbuf, config.PutOptions{}); err != nil { - t.Fatalf("alarmTest: put kv error (%v)", err) - } - - // write some chunks to fill up the database - buf := strings.Repeat("b", os.Getpagesize()) - for { - if err := cc.Put(ctx, "2nd_test", buf, config.PutOptions{}); err != nil { - if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") { - t.Fatal(err) - } - break - } - } - - // quota alarm should now be on - alarmResp, err := cc.AlarmList(ctx) - if err != nil { - t.Fatalf("alarmTest: Alarm error (%v)", err) - } - - // check that Put is rejected when alarm is on - if err := cc.Put(ctx, "3rd_test", smallbuf, config.PutOptions{}); err != nil { - if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") { - t.Fatal(err) - } - } - - // get latest revision to compact - sresp, err := cc.Status(ctx) - if err != nil { - t.Fatalf("get endpoint status error: %v", err) - } - var rvs int64 - for _, resp := range sresp { - if resp != nil && resp.Header != nil { - rvs = resp.Header.Revision - break - } - } - - // make some space - _, err = cc.Compact(ctx, rvs, config.CompactOption{Physical: true, Timeout: 10 * time.Second}) - if err != nil { - t.Fatalf("alarmTest: Compact error (%v)", err) - } - - if err = cc.Defragment(ctx, config.DefragOption{Timeout: 10 * time.Second}); err != nil { - t.Fatalf("alarmTest: defrag error (%v)", err) - } - - // turn off alarm - for _, alarm := range alarmResp.Alarms { - alarmMember := &clientv3.AlarmMember{ - MemberID: alarm.MemberID, - Alarm: alarm.Alarm, - } - _, err = cc.AlarmDisarm(ctx, alarmMember) - if err != nil { - t.Fatalf("alarmTest: Alarm error (%v)", err) - } - } - - // put one more key below quota - if err := cc.Put(ctx, "4th_test", smallbuf, config.PutOptions{}); err != nil { - t.Fatal(err) - } - }) -} - -func TestAlarmlistOnMemberRestart(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, - config.WithClusterSize(1), - config.WithQuotaBackendBytes(int64(13*os.Getpagesize())), - config.WithSnapshotCount(5), - ) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - for i := 0; i < 6; i++ { - if _, err := cc.AlarmList(ctx); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - } - - clus.Members()[0].Stop() - if err := clus.Members()[0].Start(ctx); err != nil { - t.Fatalf("failed to start etcdserver: %v", err) - } - }) -} diff --git a/tests/common/auth_test.go b/tests/common/auth_test.go deleted file mode 100644 index d1e748747a0..00000000000 --- a/tests/common/auth_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestAuthEnable(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") - }) -} - -func TestAuthDisable(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - require.NoError(t, cc.Put(ctx, "hoo", "a", config.PutOptions{})) - require.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth") - - rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword))) - testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword))) - - // test-user doesn't have the permission, it must fail - require.Error(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{})) - require.NoErrorf(t, rootAuthClient.AuthDisable(ctx), "failed to disable auth") - // now ErrAuthNotEnabled of Authenticate() is simply ignored - require.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{})) - // now the key can be accessed - require.NoError(t, cc.Put(ctx, "hoo", "bar", config.PutOptions{})) - // confirm put succeeded - resp, err := cc.Get(ctx, "hoo", config.GetOptions{}) - require.NoError(t, err) - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" { - t.Fatalf("want key value pair 'hoo', 'bar' but got %+v", resp.Kvs) - } - }) -} - -func TestAuthGracefulDisable(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") - donec := make(chan struct{}) - rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword))) - - go func() { - defer close(donec) - // sleep a bit to let the watcher connects while auth is still enabled - time.Sleep(time.Second) - // now disable auth... - if err := rootAuthClient.AuthDisable(ctx); err != nil { - t.Errorf("failed to auth disable %v", err) - return - } - // ...and restart the node - clus.Members()[0].Stop() - if err := clus.Members()[0].Start(ctx); err != nil { - t.Errorf("failed to restart member %v", err) - return - } - // the watcher should still work after reconnecting - require.NoErrorf(t, rootAuthClient.Put(ctx, "key", "value", config.PutOptions{}), "failed to put key value") - }() - - wCtx, wCancel := context.WithCancel(ctx) - defer wCancel() - - watchCh := rootAuthClient.Watch(wCtx, "key", config.WatchOptions{Revision: 1}) - wantedLen := 1 - watchTimeout := 10 * time.Second - wanted := []testutils.KV{{Key: "key", Val: "value"}} - kvs, err := testutils.KeyValuesFromWatchChan(watchCh, wantedLen, watchTimeout) - require.NoErrorf(t, err, "failed to get key-values from watch channel %s", err) - require.Equal(t, wanted, kvs) - <-donec - }) -} - -func TestAuthStatus(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - resp, err := cc.AuthStatus(ctx) - require.NoError(t, err) - require.Falsef(t, resp.Enabled, "want auth not enabled but enabled") - - require.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") - rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword))) - resp, err = rootAuthClient.AuthStatus(ctx) - require.NoError(t, err) - require.Truef(t, resp.Enabled, "want enabled but got not enabled") - }) -} diff --git a/tests/common/auth_util.go b/tests/common/auth_util.go deleted file mode 100644 index c75f20cd312..00000000000 --- a/tests/common/auth_util.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "fmt" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -const ( - rootUserName = "root" - rootRoleName = "root" - rootPassword = "rootPassword" - testUserName = "test-user" - testRoleName = "test-role" - testPassword = "pass" -) - -var ( - rootUser = authUser{user: rootUserName, pass: rootPassword, role: rootRoleName} - testUser = authUser{user: testUserName, pass: testPassword, role: testRoleName} - - testRole = authRole{ - role: testRoleName, - permission: clientv3.PermissionType(clientv3.PermReadWrite), - key: "foo", - keyEnd: "", - } -) - -type authRole struct { - role string - permission clientv3.PermissionType - key string - keyEnd string -} - -type authUser struct { - user string - pass string - role string -} - -func createRoles(c interfaces.Client, roles []authRole) error { - for _, r := range roles { - // add role - if _, err := c.RoleAdd(context.TODO(), r.role); err != nil { - return fmt.Errorf("RoleAdd failed: %w", err) - } - - // grant permission to role - if _, err := c.RoleGrantPermission(context.TODO(), r.role, r.key, r.keyEnd, r.permission); err != nil { - return fmt.Errorf("RoleGrantPermission failed: %w", err) - } - } - - return nil -} - -func createUsers(c interfaces.Client, users []authUser) error { - for _, u := range users { - // add user - if _, err := c.UserAdd(context.TODO(), u.user, u.pass, config.UserAddOptions{}); err != nil { - return fmt.Errorf("UserAdd failed: %w", err) - } - - // grant role to user - if _, err := c.UserGrantRole(context.TODO(), u.user, u.role); err != nil { - return fmt.Errorf("UserGrantRole failed: %w", err) - } - } - - return nil -} - -func setupAuth(c interfaces.Client, roles []authRole, users []authUser) error { - // create roles - if err := createRoles(c, roles); err != nil { - return err - } - - if err := createUsers(c, users); err != nil { - return err - } - - // enable auth - if err := c.AuthEnable(context.TODO()); err != nil { - return err - } - - return nil -} diff --git a/tests/common/compact_test.go b/tests/common/compact_test.go deleted file mode 100644 index fe068a77a3a..00000000000 --- a/tests/common/compact_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestCompact(t *testing.T) { - - testRunner.BeforeTest(t) - tcs := []struct { - name string - options config.CompactOption - }{ - { - name: "NoPhysical", - options: config.CompactOption{Physical: false, Timeout: 10 * time.Second}, - }, - { - name: "Physical", - options: config.CompactOption{Physical: true, Timeout: 10 * time.Second}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - var kvs = []testutils.KV{{Key: "key", Val: "val1"}, {Key: "key", Val: "val2"}, {Key: "key", Val: "val3"}} - for i := range kvs { - if err := cc.Put(ctx, kvs[i].Key, kvs[i].Val, config.PutOptions{}); err != nil { - t.Fatalf("compactTest #%d: put kv error (%v)", i, err) - } - } - get, err := cc.Get(ctx, "key", config.GetOptions{Revision: 3}) - if err != nil { - t.Fatalf("compactTest: Get kv by revision error (%v)", err) - } - - getkvs := testutils.KeyValuesFromGetResponse(get) - assert.Equal(t, kvs[1:2], getkvs) - - _, err = cc.Compact(ctx, 4, tc.options) - if err != nil { - t.Fatalf("compactTest: Compact error (%v)", err) - } - - get, err = cc.Get(ctx, "key", config.GetOptions{Revision: 3}) - if err != nil { - if !strings.Contains(err.Error(), "required revision has been compacted") { - t.Fatalf("compactTest: Get compact key error (%v)", err) - } - } else { - t.Fatalf("expected '...has been compacted' error, got ") - } - - _, err = cc.Compact(ctx, 2, tc.options) - if err != nil { - if !strings.Contains(err.Error(), "required revision has been compacted") { - t.Fatal(err) - } - } else { - t.Fatalf("expected '...has been compacted' error, got ") - } - }) - }) - } -} diff --git a/tests/common/defrag_test.go b/tests/common/defrag_test.go deleted file mode 100644 index 0197ced3756..00000000000 --- a/tests/common/defrag_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestDefragOnline(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - options := config.DefragOption{Timeout: 10 * time.Second} - clus := testRunner.NewCluster(ctx, t) - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - defer clus.Close() - var kvs = []testutils.KV{{Key: "key", Val: "val1"}, {Key: "key", Val: "val2"}, {Key: "key", Val: "val3"}} - for i := range kvs { - if err := cc.Put(ctx, kvs[i].Key, kvs[i].Val, config.PutOptions{}); err != nil { - t.Fatalf("compactTest #%d: put kv error (%v)", i, err) - } - } - _, err := cc.Compact(ctx, 4, config.CompactOption{Physical: true, Timeout: 10 * time.Second}) - if err != nil { - t.Fatalf("defrag_test: compact with revision error (%v)", err) - } - - if err = cc.Defragment(ctx, options); err != nil { - t.Fatalf("defrag_test: defrag error (%v)", err) - } - }) -} diff --git a/tests/common/e2e_test.go b/tests/common/e2e_test.go deleted file mode 100644 index fd82064aecc..00000000000 --- a/tests/common/e2e_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build e2e - -package common - -import ( - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/tests/v3/framework" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func init() { - testRunner = framework.E2eTestRunner - clusterTestCases = e2eClusterTestCases -} - -func e2eClusterTestCases() []testCase { - tcs := []testCase{ - { - name: "NoTLS", - config: config.ClusterConfig{ClusterSize: 1}, - }, - { - name: "PeerTLS", - config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.ManualTLS}, - }, - { - name: "PeerAutoTLS", - config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.AutoTLS}, - }, - { - name: "ClientTLS", - config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.ManualTLS}, - }, - { - name: "ClientAutoTLS", - config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.AutoTLS}, - }, - } - - if fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - tcs = append(tcs, testCase{ - name: "MinorityLastVersion", - config: config.ClusterConfig{ - ClusterSize: 3, - ClusterContext: &e2e.ClusterContext{ - Version: e2e.MinorityLastVersion, - }, - }, - }, testCase{ - name: "QuorumLastVersion", - config: config.ClusterConfig{ - ClusterSize: 3, - ClusterContext: &e2e.ClusterContext{ - Version: e2e.QuorumLastVersion, - }, - }, - }) - } - return tcs -} - -func WithAuth(userName, password string) config.ClientOption { - return e2e.WithAuth(userName, password) -} diff --git a/tests/common/endpoint_test.go b/tests/common/endpoint_test.go deleted file mode 100644 index ccfeef3d912..00000000000 --- a/tests/common/endpoint_test.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestEndpointStatus(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.Status(ctx) - if err != nil { - t.Fatalf("get endpoint status error: %v", err) - } - }) -} - -func TestEndpointHashKV(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - t.Log("Add some entries") - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key-%d", i) - value := fmt.Sprintf("value-%d", i) - if err := cc.Put(ctx, key, value, config.PutOptions{}); err != nil { - t.Fatalf("count not put key %q, err: %s", key, err) - } - } - - t.Log("Check all members' Hash and HashRevision") - require.Eventually(t, func() bool { - resp, err := cc.HashKV(ctx, 0) - require.NoError(t, err, "failed to get endpoint hashkv: %v", err) - - require.Equal(t, 3, len(resp)) - if resp[0].HashRevision == resp[1].HashRevision && resp[0].HashRevision == resp[2].HashRevision { - require.Equal(t, resp[0].Hash, resp[1].Hash) - require.Equal(t, resp[0].Hash, resp[2].Hash) - return true - } - t.Logf("HashRevisions are not equal: [%d, %d, %d], retry...", resp[0].HashRevision, resp[1].HashRevision, resp[2].HashRevision) - return false - }, 5*time.Second, 200*time.Millisecond) -} - -func TestEndpointHealth(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - if err := cc.Health(ctx); err != nil { - t.Fatalf("get endpoint health error: %v", err) - } - }) -} diff --git a/tests/common/integration_test.go b/tests/common/integration_test.go deleted file mode 100644 index 9bd686b8943..00000000000 --- a/tests/common/integration_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build integration - -package common - -import ( - "go.etcd.io/etcd/tests/v3/framework" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func init() { - testRunner = framework.IntegrationTestRunner - clusterTestCases = integrationClusterTestCases -} - -func integrationClusterTestCases() []testCase { - return []testCase{ - { - name: "NoTLS", - config: config.ClusterConfig{ClusterSize: 1}, - }, - { - name: "PeerTLS", - config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.ManualTLS}, - }, - { - name: "PeerAutoTLS", - config: config.ClusterConfig{ClusterSize: 3, PeerTLS: config.AutoTLS}, - }, - { - name: "ClientTLS", - config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.ManualTLS}, - }, - { - name: "ClientAutoTLS", - config: config.ClusterConfig{ClusterSize: 1, ClientTLS: config.AutoTLS}, - }, - } -} - -func WithAuth(userName, password string) config.ClientOption { - return integration.WithAuth(userName, password) -} diff --git a/tests/common/kv_test.go b/tests/common/kv_test.go deleted file mode 100644 index e271110ff65..00000000000 --- a/tests/common/kv_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestKVPut(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - key, value := "foo", "bar" - - if err := cc.Put(ctx, key, value, config.PutOptions{}); err != nil { - t.Fatalf("count not put key %q, err: %s", key, err) - } - resp, err := cc.Get(ctx, key, config.GetOptions{}) - if err != nil { - t.Fatalf("count not get key %q, err: %s", key, err) - } - if len(resp.Kvs) != 1 { - t.Errorf("Unexpected lenth of response, got %d", len(resp.Kvs)) - } - if string(resp.Kvs[0].Key) != key { - t.Errorf("Unexpected key, want %q, got %q", key, resp.Kvs[0].Key) - } - if string(resp.Kvs[0].Value) != value { - t.Errorf("Unexpected value, want %q, got %q", value, resp.Kvs[0].Value) - } - }) - }) - } -} - -func TestKVGet(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - var ( - kvs = []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"} - wantKvs = []string{"a", "b", "c", "foo", "foo/abc", "fop"} - kvsByVersion = []string{"a", "b", "foo", "foo/abc", "fop", "c"} - reversedKvs = []string{"fop", "foo/abc", "foo", "c", "b", "a"} - ) - - for i := range kvs { - if err := cc.Put(ctx, kvs[i], "bar", config.PutOptions{}); err != nil { - t.Fatalf("count not put key %q, err: %s", kvs[i], err) - } - } - tests := []struct { - begin string - end string - options config.GetOptions - - wkv []string - }{ - {begin: "a", wkv: wantKvs[:1]}, - {begin: "a", options: config.GetOptions{Serializable: true}, wkv: wantKvs[:1]}, - {begin: "a", options: config.GetOptions{End: "c"}, wkv: wantKvs[:2]}, - {begin: "", options: config.GetOptions{Prefix: true}, wkv: wantKvs}, - {begin: "", options: config.GetOptions{FromKey: true}, wkv: wantKvs}, - {begin: "a", options: config.GetOptions{End: "x"}, wkv: wantKvs}, - {begin: "", options: config.GetOptions{Prefix: true, Revision: 4}, wkv: kvs[:3]}, - {begin: "a", options: config.GetOptions{CountOnly: true}, wkv: nil}, - {begin: "foo", options: config.GetOptions{Prefix: true}, wkv: []string{"foo", "foo/abc"}}, - {begin: "foo", options: config.GetOptions{FromKey: true}, wkv: []string{"foo", "foo/abc", "fop"}}, - {begin: "", options: config.GetOptions{Prefix: true, Limit: 2}, wkv: wantKvs[:2]}, - {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortAscend, SortBy: clientv3.SortByModRevision}, wkv: wantKvs}, - {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortAscend, SortBy: clientv3.SortByVersion}, wkv: kvsByVersion}, - {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortNone, SortBy: clientv3.SortByCreateRevision}, wkv: wantKvs}, - {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortDescend, SortBy: clientv3.SortByCreateRevision}, wkv: reversedKvs}, - {begin: "", options: config.GetOptions{Prefix: true, Order: clientv3.SortDescend, SortBy: clientv3.SortByKey}, wkv: reversedKvs}, - } - for _, tt := range tests { - resp, err := cc.Get(ctx, tt.begin, tt.options) - if err != nil { - t.Fatalf("count not get key %q, err: %s", tt.begin, err) - } - kvs := testutils.KeysFromGetResponse(resp) - assert.Equal(t, tt.wkv, kvs) - } - }) - }) - } -} - -func TestKVDelete(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - kvs := []string{"a", "b", "c", "c/abc", "d"} - tests := []struct { - deleteKey string - options config.DeleteOptions - - wantDeleted int - wantKeys []string - }{ - { // delete all keys - deleteKey: "", - options: config.DeleteOptions{Prefix: true}, - wantDeleted: 5, - }, - { // delete all keys - deleteKey: "", - options: config.DeleteOptions{FromKey: true}, - wantDeleted: 5, - }, - { - deleteKey: "a", - options: config.DeleteOptions{End: "c"}, - wantDeleted: 2, - wantKeys: []string{"c", "c/abc", "d"}, - }, - { - deleteKey: "c", - wantDeleted: 1, - wantKeys: []string{"a", "b", "c/abc", "d"}, - }, - { - deleteKey: "c", - options: config.DeleteOptions{Prefix: true}, - wantDeleted: 2, - wantKeys: []string{"a", "b", "d"}, - }, - { - deleteKey: "c", - options: config.DeleteOptions{FromKey: true}, - wantDeleted: 3, - wantKeys: []string{"a", "b"}, - }, - { - deleteKey: "e", - wantDeleted: 0, - wantKeys: kvs, - }, - } - for _, tt := range tests { - for i := range kvs { - if err := cc.Put(ctx, kvs[i], "bar", config.PutOptions{}); err != nil { - t.Fatalf("count not put key %q, err: %s", kvs[i], err) - } - } - del, err := cc.Delete(ctx, tt.deleteKey, tt.options) - if err != nil { - t.Fatalf("count not get key %q, err: %s", tt.deleteKey, err) - } - assert.Equal(t, tt.wantDeleted, int(del.Deleted)) - get, err := cc.Get(ctx, "", config.GetOptions{Prefix: true}) - if err != nil { - t.Fatalf("count not get key, err: %s", err) - } - kvs := testutils.KeysFromGetResponse(get) - assert.Equal(t, tt.wantKeys, kvs) - } - }) - }) - } -} - -func TestKVGetNoQuorum(t *testing.T) { - testRunner.BeforeTest(t) - tcs := []struct { - name string - options config.GetOptions - - wantError bool - }{ - { - name: "Serializable", - options: config.GetOptions{Serializable: true}, - }, - { - name: "Linearizable", - options: config.GetOptions{Serializable: false, Timeout: time.Second}, - wantError: true, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - - clus.Members()[0].Stop() - clus.Members()[1].Stop() - - cc := clus.Members()[2].Client() - testutils.ExecuteUntil(ctx, t, func() { - key := "foo" - _, err := cc.Get(ctx, key, tc.options) - gotError := err != nil - if gotError != tc.wantError { - t.Fatalf("Unexpeted result, wantError: %v, gotErr: %v, err: %s", tc.wantError, gotError, err) - } - }) - }) - } -} diff --git a/tests/common/lease_test.go b/tests/common/lease_test.go deleted file mode 100644 index 572602805a0..00000000000 --- a/tests/common/lease_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestLeaseGrantTimeToLive(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - ttl := int64(10) - leaseResp, err := cc.Grant(ctx, ttl) - require.NoError(t, err) - - ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{}) - require.NoError(t, err) - require.Equal(t, ttl, ttlResp.GrantedTTL) - }) - }) - } -} - -func TestLeaseGrantAndList(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - nestedCases := []struct { - name string - leaseCount int - }{ - { - name: "no_leases", - leaseCount: 0, - }, - { - name: "one_lease", - leaseCount: 1, - }, - { - name: "many_leases", - leaseCount: 3, - }, - } - - for _, nc := range nestedCases { - t.Run(tc.name+"/"+nc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - t.Logf("Creating cluster...") - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - t.Logf("Created cluster and client") - testutils.ExecuteUntil(ctx, t, func() { - var createdLeases []clientv3.LeaseID - for i := 0; i < nc.leaseCount; i++ { - leaseResp, err := cc.Grant(ctx, 10) - t.Logf("Grant returned: resp:%s err:%v", leaseResp.String(), err) - require.NoError(t, err) - createdLeases = append(createdLeases, leaseResp.ID) - } - - // Because we're not guarunteed to talk to the same member, wait for - // listing to eventually return true, either by the result propagaing - // or by hitting an up to date member. - var leases []clientv3.LeaseStatus - require.Eventually(t, func() bool { - resp, err := cc.Leases(ctx) - if err != nil { - return false - } - leases = resp.Leases - // TODO: update this to use last Revision from leaseResp - // after https://github.com/etcd-io/etcd/issues/13989 is fixed - return len(leases) == len(createdLeases) - }, 2*time.Second, 10*time.Millisecond) - - returnedLeases := make([]clientv3.LeaseID, 0, nc.leaseCount) - for _, status := range leases { - returnedLeases = append(returnedLeases, status.ID) - } - - require.ElementsMatch(t, createdLeases, returnedLeases) - }) - }) - } - } -} - -func TestLeaseGrantTimeToLiveExpired(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - leaseResp, err := cc.Grant(ctx, 2) - require.NoError(t, err) - - err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID}) - require.NoError(t, err) - - getResp, err := cc.Get(ctx, "foo", config.GetOptions{}) - require.NoError(t, err) - require.Equal(t, int64(1), getResp.Count) - - time.Sleep(3 * time.Second) - - ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{}) - require.NoError(t, err) - require.Equal(t, int64(-1), ttlResp.TTL) - - getResp, err = cc.Get(ctx, "foo", config.GetOptions{}) - require.NoError(t, err) - // Value should expire with the lease - require.Equal(t, int64(0), getResp.Count) - }) - }) - } -} - -func TestLeaseGrantKeepAliveOnce(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - leaseResp, err := cc.Grant(ctx, 2) - require.NoError(t, err) - - _, err = cc.KeepAliveOnce(ctx, leaseResp.ID) - require.NoError(t, err) - - time.Sleep(2 * time.Second) // Wait for the original lease to expire - - ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{}) - require.NoError(t, err) - // We still have a lease! - require.Greater(t, int64(2), ttlResp.TTL) - }) - }) - } -} - -func TestLeaseGrantRevoke(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - leaseResp, err := cc.Grant(ctx, 20) - require.NoError(t, err) - - err = cc.Put(ctx, "foo", "bar", config.PutOptions{LeaseID: leaseResp.ID}) - require.NoError(t, err) - - getResp, err := cc.Get(ctx, "foo", config.GetOptions{}) - require.NoError(t, err) - require.Equal(t, int64(1), getResp.Count) - - _, err = cc.Revoke(ctx, leaseResp.ID) - require.NoError(t, err) - - ttlResp, err := cc.TimeToLive(ctx, leaseResp.ID, config.LeaseOption{}) - require.NoError(t, err) - require.Equal(t, int64(-1), ttlResp.TTL) - - getResp, err = cc.Get(ctx, "foo", config.GetOptions{}) - require.NoError(t, err) - // Value should expire with the lease - require.Equal(t, int64(0), getResp.Count) - }) - }) - } -} diff --git a/tests/common/main_test.go b/tests/common/main_test.go deleted file mode 100644 index be5e5a17d4d..00000000000 --- a/tests/common/main_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "testing" - - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -var ( - testRunner intf.TestRunner - clusterTestCases func() []testCase -) - -func TestMain(m *testing.M) { - testRunner.TestMain(m) -} - -type testCase struct { - name string - config config.ClusterConfig -} diff --git a/tests/common/maintenance_auth_test.go b/tests/common/maintenance_auth_test.go deleted file mode 100644 index 30434e6a6f6..00000000000 --- a/tests/common/maintenance_auth_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -/* -Test Defragment -*/ -func TestDefragmentWithNoAuth(t *testing.T) { - testDefragmentWithAuth(t, false, true) -} - -func TestDefragmentWithInvalidAuth(t *testing.T) { - testDefragmentWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestDefragmentWithRootAuth(t *testing.T) { - testDefragmentWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestDefragmentWithUserAuth(t *testing.T) { - testDefragmentWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testDefragmentWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error { - return cc.Defragment(ctx, config.DefragOption{Timeout: 10 * time.Second}) - }, opts...) -} - -/* -Test Downgrade -*/ -func TestDowngradeWithNoAuth(t *testing.T) { - testDowngradeWithAuth(t, false, true) -} - -func TestDowngradeWithInvalidAuth(t *testing.T) { - testDowngradeWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestDowngradeWithRootAuth(t *testing.T) { - testDowngradeWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestDowngradeWithUserAuth(t *testing.T) { - testDowngradeWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testDowngradeWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - // TODO(ahrtr): finish this after we added interface methods `Downgrade` into `Client` - t.Skip() -} - -/* -Test HashKV -*/ -func TestHashKVWithNoAuth(t *testing.T) { - testHashKVWithAuth(t, false, true) -} - -func TestHashKVWithInvalidAuth(t *testing.T) { - testHashKVWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestHashKVWithRootAuth(t *testing.T) { - testHashKVWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestHashKVWithUserAuth(t *testing.T) { - testHashKVWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testHashKVWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error { - _, err := cc.HashKV(ctx, 0) - return err - }, opts...) -} - -/* -Test MoveLeader -*/ -func TestMoveLeaderWithNoAuth(t *testing.T) { - testMoveLeaderWithAuth(t, false, true) -} - -func TestMoveLeaderWithInvalidAuth(t *testing.T) { - testMoveLeaderWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestMoveLeaderWithRootAuth(t *testing.T) { - testMoveLeaderWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestMoveLeaderWithUserAuth(t *testing.T) { - testMoveLeaderWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testMoveLeaderWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - // TODO(ahrtr): finish this after we added interface methods `MoveLeader` into `Client` - t.Skip() -} - -/* -Test Snapshot -*/ -func TestSnapshotWithNoAuth(t *testing.T) { - testSnapshotWithAuth(t, false, true) -} - -func TestSnapshotWithInvalidAuth(t *testing.T) { - testSnapshotWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestSnapshotWithRootAuth(t *testing.T) { - testSnapshotWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestSnapshotWithUserAuth(t *testing.T) { - testSnapshotWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testSnapshotWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - // TODO(ahrtr): finish this after we added interface methods `Snapshot` into `Client` - t.Skip() -} - -/* -Test Status -*/ -func TestStatusWithNoAuth(t *testing.T) { - testStatusWithAuth(t, false, true) -} - -func TestStatusWithInvalidAuth(t *testing.T) { - testStatusWithAuth(t, true, true, WithAuth("invalid", "invalid")) -} - -func TestStatusWithRootAuth(t *testing.T) { - testStatusWithAuth(t, false, false, WithAuth("root", "rootPass")) -} - -func TestStatusWithUserAuth(t *testing.T) { - testStatusWithAuth(t, false, true, WithAuth("user0", "user0Pass")) -} - -func testStatusWithAuth(t *testing.T, expectConnectionError, expectOperationError bool, opts ...config.ClientOption) { - testMaintenanceOperationWithAuth(t, expectConnectionError, expectOperationError, func(ctx context.Context, cc intf.Client) error { - _, err := cc.Status(ctx) - return err - }, opts...) -} - -func setupAuthForMaintenanceTest(c intf.Client) error { - roles := []authRole{ - { - role: "role0", - permission: clientv3.PermissionType(clientv3.PermReadWrite), - key: "foo", - }, - } - - users := []authUser{ - { - user: "root", - pass: "rootPass", - role: "root", - }, - { - user: "user0", - pass: "user0Pass", - role: "role0", - }, - } - - return setupAuth(c, roles, users) -} - -func testMaintenanceOperationWithAuth(t *testing.T, expectConnectError, expectOperationError bool, f func(context.Context, intf.Client) error, opts ...config.ClientOption) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - clus := testRunner.NewCluster(ctx, t) - defer clus.Close() - - cc := testutils.MustClient(clus.Client()) - err := setupAuthForMaintenanceTest(cc) - require.NoError(t, err) - - ccWithAuth, err := clus.Client(opts...) - if expectConnectError { - if err == nil { - t.Fatalf("%s: expected connection error, but got successful response", t.Name()) - } - t.Logf("%s: connection error: %v", t.Name(), err) - return - } - if err != nil { - t.Fatalf("%s: unexpected connection error (%v)", t.Name(), err) - return - } - - // sleep 1 second to wait for etcd cluster to finish the authentication process. - // TODO(ahrtr): find a better way to do it. - time.Sleep(1 * time.Second) - testutils.ExecuteUntil(ctx, t, func() { - err := f(ctx, ccWithAuth) - - if expectOperationError { - if err == nil { - t.Fatalf("%s: expected error, but got successful response", t.Name()) - } - t.Logf("%s: operation error: %v", t.Name(), err) - return - } - - if err != nil { - t.Fatalf("%s: unexpected operation error (%v)", t.Name(), err) - } - }) -} diff --git a/tests/common/member_test.go b/tests/common/member_test.go deleted file mode 100644 index ec2a15f6244..00000000000 --- a/tests/common/member_test.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/require" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestMemberList(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - resp, err := cc.MemberList(ctx) - if err != nil { - t.Fatalf("could not get member list, err: %s", err) - } - expectNum := len(clus.Members()) - gotNum := len(resp.Members) - if expectNum != gotNum { - t.Fatalf("number of members not equal, expect: %d, got: %d", expectNum, gotNum) - } - for _, m := range resp.Members { - if len(m.ClientURLs) == 0 { - t.Fatalf("member is not started, memberId:%d, memberName:%s", m.ID, m.Name) - } - } - }) - }) - } -} - -func TestMemberAdd(t *testing.T) { - testRunner.BeforeTest(t) - - learnerTcs := []struct { - name string - learner bool - }{ - { - name: "NotLearner", - learner: false, - }, - { - name: "Learner", - learner: true, - }, - } - - quorumTcs := []struct { - name string - strictReconfigCheck bool - waitForQuorum bool - expectError bool - }{ - { - name: "StrictReconfigCheck/WaitForQuorum", - strictReconfigCheck: true, - waitForQuorum: true, - }, - { - name: "StrictReconfigCheck/NoWaitForQuorum", - strictReconfigCheck: true, - expectError: true, - }, - { - name: "DisableStrictReconfigCheck/WaitForQuorum", - waitForQuorum: true, - }, - { - name: "DisableStrictReconfigCheck/NoWaitForQuorum", - }, - } - - for _, learnerTc := range learnerTcs { - for _, quorumTc := range quorumTcs { - for _, clusterTc := range clusterTestCases() { - t.Run(learnerTc.name+"/"+quorumTc.name+"/"+clusterTc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - c := clusterTc.config - c.StrictReconfigCheck = quorumTc.strictReconfigCheck - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(c)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - var addResp *clientv3.MemberAddResponse - var err error - if quorumTc.waitForQuorum { - time.Sleep(etcdserver.HealthInterval) - } - if learnerTc.learner { - addResp, err = cc.MemberAddAsLearner(ctx, "newmember", []string{"http://localhost:123"}) - } else { - addResp, err = cc.MemberAdd(ctx, "newmember", []string{"http://localhost:123"}) - } - if quorumTc.expectError && c.ClusterSize > 1 { - // calling MemberAdd/MemberAddAsLearner on a single node will not fail, - // whether strictReconfigCheck or whether waitForQuorum - require.ErrorContains(t, err, "etcdserver: unhealthy cluster") - } else { - require.NoError(t, err, "MemberAdd failed") - if addResp.Member == nil { - t.Fatalf("MemberAdd failed, expected: member != nil, got: member == nil") - } - if addResp.Member.ID == 0 { - t.Fatalf("MemberAdd failed, expected: ID != 0, got: ID == 0") - } - if len(addResp.Member.PeerURLs) == 0 { - t.Fatalf("MemberAdd failed, expected: non-empty PeerURLs, got: empty PeerURLs") - } - } - }) - }) - } - } - } -} - -func TestMemberRemove(t *testing.T) { - testRunner.BeforeTest(t) - - tcs := []struct { - name string - strictReconfigCheck bool - waitForQuorum bool - expectSingleNodeError bool - expectClusterError bool - }{ - { - name: "StrictReconfigCheck/WaitForQuorum", - strictReconfigCheck: true, - waitForQuorum: true, - expectSingleNodeError: true, - }, - { - name: "StrictReconfigCheck/NoWaitForQuorum", - strictReconfigCheck: true, - expectSingleNodeError: true, - expectClusterError: true, - }, - { - name: "DisableStrictReconfigCheck/WaitForQuorum", - waitForQuorum: true, - }, - { - name: "DisableStrictReconfigCheck/NoWaitForQuorum", - }, - } - - for _, quorumTc := range tcs { - for _, clusterTc := range clusterTestCases() { - if !quorumTc.strictReconfigCheck && clusterTc.config.ClusterSize == 1 { - // skip these test cases - // when strictReconfigCheck is disabled, calling MemberRemove will cause the single node to panic - continue - } - t.Run(quorumTc.name+"/"+clusterTc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - c := clusterTc.config - c.StrictReconfigCheck = quorumTc.strictReconfigCheck - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(c)) - defer clus.Close() - // client connects to a specific member which won't be removed from cluster - cc := clus.Members()[0].Client() - - testutils.ExecuteUntil(ctx, t, func() { - if quorumTc.waitForQuorum { - time.Sleep(etcdserver.HealthInterval) - } - - memberId, clusterId := memberToRemove(ctx, t, cc, c.ClusterSize) - removeResp, err := cc.MemberRemove(ctx, memberId) - - if c.ClusterSize == 1 && quorumTc.expectSingleNodeError { - require.ErrorContains(t, err, "etcdserver: re-configuration failed due to not enough started members") - return - } - - if c.ClusterSize > 1 && quorumTc.expectClusterError { - require.ErrorContains(t, err, "etcdserver: unhealthy cluster") - return - } - - require.NoError(t, err, "MemberRemove failed") - t.Logf("removeResp.Members:%v", removeResp.Members) - if removeResp.Header.ClusterId != clusterId { - t.Fatalf("MemberRemove failed, expected ClusterId: %d, got: %d", clusterId, removeResp.Header.ClusterId) - } - if len(removeResp.Members) != c.ClusterSize-1 { - t.Fatalf("MemberRemove failed, expected length of members: %d, got: %d", c.ClusterSize-1, len(removeResp.Members)) - } - for _, m := range removeResp.Members { - if m.ID == memberId { - t.Fatalf("MemberRemove failed, member(id=%d) is still in cluster", memberId) - } - } - }) - }) - } - } -} - -// memberToRemove chooses a member to remove. -// If clusterSize == 1, return the only member. -// Otherwise, return a member that client has not connected to. -// It ensures that `MemberRemove` function does not return an "etcdserver: server stopped" error. -func memberToRemove(ctx context.Context, t *testing.T, client intf.Client, clusterSize int) (memberId uint64, clusterId uint64) { - listResp, err := client.MemberList(ctx) - if err != nil { - t.Fatal(err) - } - - clusterId = listResp.Header.ClusterId - if clusterSize == 1 { - memberId = listResp.Members[0].ID - } else { - // get status of the specific member that client has connected to - statusResp, err := client.Status(ctx) - if err != nil { - t.Fatal(err) - } - - // choose a member that client has not connected to - for _, m := range listResp.Members { - if m.ID != statusResp[0].Header.MemberId { - memberId = m.ID - break - } - } - if memberId == 0 { - t.Fatalf("memberToRemove failed. listResp:%v, statusResp:%v", listResp, statusResp) - } - } - return memberId, clusterId -} diff --git a/tests/common/role_test.go b/tests/common/role_test.go deleted file mode 100644 index 84e35d183fc..00000000000 --- a/tests/common/role_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestRoleAdd_Simple(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.RoleAdd(ctx, "root") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - }) - }) - } -} - -func TestRoleAdd_Error(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.RoleAdd(ctx, "test-role") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleAdd(ctx, "test-role") - if err == nil || !strings.Contains(err.Error(), rpctypes.ErrRoleAlreadyExist.Error()) { - t.Fatalf("want (%v) error, but got (%v)", rpctypes.ErrRoleAlreadyExist, err) - } - _, err = cc.RoleAdd(ctx, "") - if err == nil || !strings.Contains(err.Error(), rpctypes.ErrRoleEmpty.Error()) { - t.Fatalf("want (%v) error, but got (%v)", rpctypes.ErrRoleEmpty, err) - } - }) -} - -func TestRootRole(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.RoleAdd(ctx, "root") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - resp, err := cc.RoleGet(ctx, "root") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - t.Logf("get role resp %+v", resp) - // granting to root should be refused by server and a no-op - _, err = cc.RoleGrantPermission(ctx, "root", "foo", "", clientv3.PermissionType(clientv3.PermReadWrite)) - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - resp2, err := cc.RoleGet(ctx, "root") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - t.Logf("get role resp %+v", resp2) - }) -} - -func TestRoleGrantRevokePermission(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.RoleAdd(ctx, "role1") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "", clientv3.PermissionType(clientv3.PermRead)) - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "", clientv3.PermissionType(clientv3.PermWrite)) - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleGrantPermission(ctx, "role1", "bar", "foo", clientv3.PermissionType(clientv3.PermReadWrite)) - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleRevokePermission(ctx, "role1", "foo", "") - if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionNotGranted.Error()) { - t.Fatalf("want error (%v), but got (%v)", rpctypes.ErrPermissionNotGranted, err) - } - _, err = cc.RoleRevokePermission(ctx, "role1", "bar", "foo") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - }) -} - -func TestRoleDelete(t *testing.T) { - testRunner.BeforeTest(t) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterSize(1)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - _, err := cc.RoleAdd(ctx, "role1") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - _, err = cc.RoleDelete(ctx, "role1") - if err != nil { - t.Fatalf("want no error, but got (%v)", err) - } - }) -} diff --git a/tests/common/status_test.go b/tests/common/status_test.go deleted file mode 100644 index a6a9844391a..00000000000 --- a/tests/common/status_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestStatus(t *testing.T) { - - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - rs, err := cc.Status(ctx) - if err != nil { - t.Fatalf("could not get status, err: %s", err) - } - if len(rs) != tc.config.ClusterSize { - t.Fatalf("wrong number of status responses. expected:%d, got:%d ", tc.config.ClusterSize, len(rs)) - } - memberIds := make(map[uint64]struct{}) - for _, r := range rs { - if r == nil { - t.Fatalf("status response is nil") - } - memberIds[r.Header.MemberId] = struct{}{} - } - if len(rs) != len(memberIds) { - t.Fatalf("found duplicated members") - } - }) - }) - } -} diff --git a/tests/common/txn_test.go b/tests/common/txn_test.go deleted file mode 100644 index c906f6819b7..00000000000 --- a/tests/common/txn_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -type txnReq struct { - compare []string - ifSuccess []string - ifFail []string - results []string -} - -func TestTxnSucc(t *testing.T) { - testRunner.BeforeTest(t) - reqs := []txnReq{ - { - compare: []string{`value("key1") != "value2"`, `value("key2") != "value1"`}, - ifSuccess: []string{"get key1", "get key2"}, - results: []string{"SUCCESS", "key1", "value1", "key2", "value2"}, - }, - { - compare: []string{`version("key1") = "1"`, `version("key2") = "1"`}, - ifSuccess: []string{"get key1", "get key2", `put "key \"with\" space" "value \x23"`}, - ifFail: []string{`put key1 "fail"`, `put key2 "fail"`}, - results: []string{"SUCCESS", "key1", "value1", "key2", "value2", "OK"}, - }, - { - compare: []string{`version("key \"with\" space") = "1"`}, - ifSuccess: []string{`get "key \"with\" space"`}, - results: []string{"SUCCESS", `key "with" space`, "value \x23"}, - }, - } - for _, cfg := range clusterTestCases() { - t.Run(cfg.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(cfg.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - if err := cc.Put(ctx, "key1", "value1", config.PutOptions{}); err != nil { - t.Fatalf("could not create key:%s, value:%s", "key1", "value1") - } - if err := cc.Put(ctx, "key2", "value2", config.PutOptions{}); err != nil { - t.Fatalf("could not create key:%s, value:%s", "key2", "value2") - } - for _, req := range reqs { - resp, err := cc.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{ - Interactive: true, - }) - if err != nil { - t.Errorf("Txn returned error: %s", err) - } - assert.Equal(t, req.results, getRespValues(resp)) - } - }) - }) - } -} - -func TestTxnFail(t *testing.T) { - testRunner.BeforeTest(t) - reqs := []txnReq{ - { - compare: []string{`version("key") < "0"`}, - ifSuccess: []string{`put key "success"`}, - ifFail: []string{`put key "fail"`}, - results: []string{"FAILURE", "OK"}, - }, - { - compare: []string{`value("key1") != "value1"`}, - ifSuccess: []string{`put key1 "success"`}, - ifFail: []string{`put key1 "fail"`}, - results: []string{"FAILURE", "OK"}, - }, - } - for _, cfg := range clusterTestCases() { - t.Run(cfg.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(cfg.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - if err := cc.Put(ctx, "key1", "value1", config.PutOptions{}); err != nil { - t.Fatalf("could not create key:%s, value:%s", "key1", "value1") - } - for _, req := range reqs { - resp, err := cc.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{ - Interactive: true, - }) - if err != nil { - t.Errorf("Txn returned error: %s", err) - } - assert.Equal(t, req.results, getRespValues(resp)) - } - }) - }) - } -} - -func getRespValues(r *clientv3.TxnResponse) []string { - var ss []string - if r.Succeeded { - ss = append(ss, "SUCCESS") - } else { - ss = append(ss, "FAILURE") - } - for _, resp := range r.Responses { - switch v := resp.Response.(type) { - case *pb.ResponseOp_ResponseDeleteRange: - r := (clientv3.DeleteResponse)(*v.ResponseDeleteRange) - ss = append(ss, fmt.Sprintf("%d", r.Deleted)) - case *pb.ResponseOp_ResponsePut: - r := (clientv3.PutResponse)(*v.ResponsePut) - ss = append(ss, "OK") - if r.PrevKv != nil { - ss = append(ss, string(r.PrevKv.Key), string(r.PrevKv.Value)) - } - case *pb.ResponseOp_ResponseRange: - r := (clientv3.GetResponse)(*v.ResponseRange) - for _, kv := range r.Kvs { - ss = append(ss, string(kv.Key), string(kv.Value)) - } - default: - ss = append(ss, fmt.Sprintf("\"Unknown\" : %q\n", fmt.Sprintf("%+v", v))) - } - } - return ss -} diff --git a/tests/common/unit_test.go b/tests/common/unit_test.go deleted file mode 100644 index 3e6f9a9dc44..00000000000 --- a/tests/common/unit_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !(e2e || integration) - -package common - -import ( - "go.etcd.io/etcd/tests/v3/framework" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -func init() { - testRunner = framework.UnitTestRunner - clusterTestCases = unitClusterTestCases -} - -func unitClusterTestCases() []testCase { - return nil -} - -// WithAuth is when a build tag (e.g. e2e or integration) isn't configured -// in IDE, then IDE may complain "Unresolved reference 'WithAuth'". -// So we need to define a default WithAuth to resolve such case. -func WithAuth(userName, password string) config.ClientOption { - return func(any) {} -} diff --git a/tests/common/user_test.go b/tests/common/user_test.go deleted file mode 100644 index f784fc2c402..00000000000 --- a/tests/common/user_test.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestUserAdd_Simple(t *testing.T) { - testRunner.BeforeTest(t) - tcs := []struct { - name string - username string - password string - noPassword bool - expectedError string - }{ - { - name: "empty_username_not_allowed", - username: "", - password: "foobar", - // Very Vague error expectation because the CLI and the API return very - // different error structures. - expectedError: "user name", - }, - { - // Can create a user with no password, restricted to CN auth - name: "no_password_with_noPassword_set", - username: "foo", - password: "", - noPassword: true, - }, - { - // Can create a user with no password, but not restricted to CN auth - name: "no_password_without_noPassword_set", - username: "foo", - password: "", - noPassword: false, - }, - { - name: "regular_user_with_password", - username: "foo", - password: "bar", - }, - } - for _, tc := range clusterTestCases() { - for _, nc := range tcs { - t.Run(tc.name+"/"+nc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - resp, err := cc.UserAdd(ctx, nc.username, nc.password, config.UserAddOptions{NoPassword: nc.noPassword}) - if nc.expectedError != "" { - if err != nil { - assert.Contains(t, err.Error(), nc.expectedError) - return - } - - t.Fatalf("expected user creation to fail") - } - - if err != nil { - t.Fatalf("expected no error, err: %v", err) - } - - if resp == nil { - t.Fatalf("unexpected nil response to successful user creation") - } - }) - }) - } - } -} - -func TestUserAdd_DuplicateUserNotAllowed(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - user := "barb" - password := "rhubarb" - - _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{}) - if err != nil { - t.Fatalf("first user creation should succeed, err: %v", err) - } - - _, err = cc.UserAdd(ctx, user, password, config.UserAddOptions{}) - if err == nil { - t.Fatalf("duplicate user creation should fail") - } - assert.Contains(t, err.Error(), "etcdserver: user name already exists") - }) - }) - } -} - -func TestUserList(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - // No Users Yet - resp, err := cc.UserList(ctx) - if err != nil { - t.Fatalf("user listing should succeed, err: %v", err) - } - if len(resp.Users) != 0 { - t.Fatalf("expected no pre-existing users, found: %q", resp.Users) - } - - user := "barb" - password := "rhubarb" - - _, err = cc.UserAdd(ctx, user, password, config.UserAddOptions{}) - if err != nil { - t.Fatalf("user creation should succeed, err: %v", err) - } - - // Users! - resp, err = cc.UserList(ctx) - if err != nil { - t.Fatalf("user listing should succeed, err: %v", err) - } - if len(resp.Users) != 1 { - t.Fatalf("expected one user, found: %q", resp.Users) - } - }) - }) - } -} - -func TestUserDelete(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - user := "barb" - password := "rhubarb" - - _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{}) - if err != nil { - t.Fatalf("user creation should succeed, err: %v", err) - } - - resp, err := cc.UserList(ctx) - if err != nil { - t.Fatalf("user listing should succeed, err: %v", err) - } - if len(resp.Users) != 1 { - t.Fatalf("expected one user, found: %q", resp.Users) - } - - // Delete barb, sorry barb! - _, err = cc.UserDelete(ctx, user) - if err != nil { - t.Fatalf("user deletion should succeed at first, err: %v", err) - } - - resp, err = cc.UserList(ctx) - if err != nil { - t.Fatalf("user listing should succeed, err: %v", err) - } - if len(resp.Users) != 0 { - t.Fatalf("expected no users after deletion, found: %q", resp.Users) - } - - // Try to delete barb again - _, err = cc.UserDelete(ctx, user) - if err == nil { - t.Fatalf("deleting a non-existent user should fail") - } - assert.Contains(t, err.Error(), "user name not found") - }) - }) - } -} - -func TestUserChangePassword(t *testing.T) { - testRunner.BeforeTest(t) - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - - testutils.ExecuteUntil(ctx, t, func() { - user := "barb" - password := "rhubarb" - newPassword := "potato" - - _, err := cc.UserAdd(ctx, user, password, config.UserAddOptions{}) - if err != nil { - t.Fatalf("user creation should succeed, err: %v", err) - } - - err = cc.UserChangePass(ctx, user, newPassword) - if err != nil { - t.Fatalf("user password change should succeed, err: %v", err) - } - - err = cc.UserChangePass(ctx, "non-existent-user", newPassword) - if err == nil { - t.Fatalf("user password change for non-existent user should fail") - } - assert.Contains(t, err.Error(), "user name not found") - }) - }) - } -} diff --git a/tests/common/wait_leader_test.go b/tests/common/wait_leader_test.go deleted file mode 100644 index faa1f716cd7..00000000000 --- a/tests/common/wait_leader_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/config" -) - -func TestWaitLeader(t *testing.T) { - testRunner.BeforeTest(t) - - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - - leader := clus.WaitLeader(t) - if leader < 0 || leader >= len(clus.Members()) { - t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", leader, len(clus.Members())) - } - }) - } -} - -func TestWaitLeader_MemberStop(t *testing.T) { - testRunner.BeforeTest(t) - tcs := []testCase{ - { - name: "PeerTLS", - config: config.NewClusterConfig(config.WithPeerTLS(config.ManualTLS)), - }, - { - name: "PeerAutoTLS", - config: config.NewClusterConfig(config.WithPeerTLS(config.AutoTLS)), - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - defer clus.Close() - - lead1 := clus.WaitLeader(t) - if lead1 < 0 || lead1 >= len(clus.Members()) { - t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", lead1, len(clus.Members())) - } - - clus.Members()[lead1].Stop() - lead2 := clus.WaitLeader(t) - if lead2 < 0 || lead2 >= len(clus.Members()) { - t.Fatalf("WaitLeader failed for the leader index (%d) is out of range, cluster member count: %d", lead2, len(clus.Members())) - } - - if lead1 == lead2 { - t.Fatalf("WaitLeader failed for the leader(index=%d) did not change as expected after a member stopped", lead1) - } - }) - } -} diff --git a/tests/common/watch_test.go b/tests/common/watch_test.go deleted file mode 100644 index 103a4bb03db..00000000000 --- a/tests/common/watch_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestWatch(t *testing.T) { - testRunner.BeforeTest(t) - watchTimeout := 1 * time.Second - for _, tc := range clusterTestCases() { - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.config)) - - defer clus.Close() - cc := testutils.MustClient(clus.Client()) - testutils.ExecuteUntil(ctx, t, func() { - tests := []struct { - puts []testutils.KV - watchKey string - opts config.WatchOptions - wanted []testutils.KV - }{ - { // watch by revision - puts: []testutils.KV{{Key: "bar", Val: "revision_1"}, {Key: "bar", Val: "revision_2"}, {Key: "bar", Val: "revision_3"}}, - watchKey: "bar", - opts: config.WatchOptions{Revision: 3}, - wanted: []testutils.KV{{Key: "bar", Val: "revision_2"}, {Key: "bar", Val: "revision_3"}}, - }, - { // watch 1 key - puts: []testutils.KV{{Key: "sample", Val: "value"}}, - watchKey: "sample", - opts: config.WatchOptions{Revision: 1}, - wanted: []testutils.KV{{Key: "sample", Val: "value"}}, - }, - { // watch 3 keys by prefix - puts: []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}}, - watchKey: "foo", - opts: config.WatchOptions{Revision: 1, Prefix: true}, - wanted: []testutils.KV{{Key: "foo1", Val: "val1"}, {Key: "foo2", Val: "val2"}, {Key: "foo3", Val: "val3"}}, - }, - { // watch 3 keys by range - puts: []testutils.KV{{Key: "key1", Val: "val1"}, {Key: "key3", Val: "val3"}, {Key: "key2", Val: "val2"}}, - watchKey: "key", - opts: config.WatchOptions{Revision: 1, RangeEnd: "key3"}, - wanted: []testutils.KV{{Key: "key1", Val: "val1"}, {Key: "key2", Val: "val2"}}, - }, - } - - for _, tt := range tests { - wCtx, wCancel := context.WithCancel(ctx) - wch := cc.Watch(wCtx, tt.watchKey, tt.opts) - if wch == nil { - t.Fatalf("failed to watch %s", tt.watchKey) - } - - for j := range tt.puts { - if err := cc.Put(ctx, tt.puts[j].Key, tt.puts[j].Val, config.PutOptions{}); err != nil { - t.Fatalf("can't not put key %q, err: %s", tt.puts[j].Key, err) - } - } - - kvs, err := testutils.KeyValuesFromWatchChan(wch, len(tt.wanted), watchTimeout) - if err != nil { - wCancel() - t.Fatalf("failed to get key-values from watch channel %s", err) - } - - wCancel() - assert.Equal(t, tt.wanted, kvs) - } - }) - }) - } -} diff --git a/tests/e2e/cluster_downgrade_test.go b/tests/e2e/cluster_downgrade_test.go deleted file mode 100644 index e15ba4eb227..00000000000 --- a/tests/e2e/cluster_downgrade_test.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestDowngradeUpgradeClusterOf1(t *testing.T) { - testDowngradeUpgrade(t, 1) -} - -func TestDowngradeUpgradeClusterOf3(t *testing.T) { - testDowngradeUpgrade(t, 3) -} - -func testDowngradeUpgrade(t *testing.T, clusterSize int) { - currentEtcdBinary := e2e.BinPath.Etcd - lastReleaseBinary := e2e.BinPath.EtcdLastRelease - if !fileutil.Exist(lastReleaseBinary) { - t.Skipf("%q does not exist", lastReleaseBinary) - } - - currentVersion, err := getVersionFromBinary(currentEtcdBinary) - require.NoError(t, err) - // wipe any pre-release suffix like -alpha.0 we see commonly in builds - currentVersion.PreRelease = "" - - lastVersion, err := getVersionFromBinary(lastReleaseBinary) - require.NoError(t, err) - - require.Equalf(t, lastVersion.Minor, currentVersion.Minor-1, "unexpected minor version difference") - currentVersionStr := currentVersion.String() - lastVersionStr := lastVersion.String() - - lastClusterVersion := semver.New(lastVersionStr) - lastClusterVersion.Patch = 0 - lastClusterVersionStr := lastClusterVersion.String() - - e2e.BeforeTest(t) - - t.Logf("Create cluster with version %s", currentVersionStr) - epc := newCluster(t, clusterSize) - for i := 0; i < len(epc.Procs); i++ { - validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{ - Cluster: currentVersionStr, - Server: version.Version, - Storage: currentVersionStr, - }) - } - t.Logf("Cluster created") - - t.Logf("etcdctl downgrade enable %s", lastVersionStr) - downgradeEnable(t, epc, lastVersion) - - t.Log("Downgrade enabled, validating if cluster is ready for downgrade") - for i := 0; i < len(epc.Procs); i++ { - validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{ - Cluster: lastClusterVersionStr, - Server: version.Version, - Storage: lastClusterVersionStr, - }) - e2e.AssertProcessLogs(t, epc.Procs[i], "The server is ready to downgrade") - } - - t.Log("Cluster is ready for downgrade") - t.Logf("Starting downgrade process to %q", lastVersionStr) - for i := 0; i < len(epc.Procs); i++ { - t.Logf("Downgrading member %d by running %s binary", i, lastReleaseBinary) - stopEtcd(t, epc.Procs[i]) - startEtcd(t, epc.Procs[i], lastReleaseBinary) - } - - t.Log("All members downgraded, validating downgrade") - e2e.AssertProcessLogs(t, leader(t, epc), "the cluster has been downgraded") - for i := 0; i < len(epc.Procs); i++ { - validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{ - Cluster: lastClusterVersionStr, - Server: lastVersionStr, - }) - } - - t.Log("Downgrade complete") - t.Logf("Starting upgrade process to %q", currentVersionStr) - for i := 0; i < len(epc.Procs); i++ { - t.Logf("Upgrading member %d", i) - stopEtcd(t, epc.Procs[i]) - startEtcd(t, epc.Procs[i], currentEtcdBinary) - // NOTE: The leader has monitor to the cluster version, which will - // update cluster version. We don't need to check the transient - // version just in case that it might be flaky. - } - - t.Log("All members upgraded, validating upgrade") - for i := 0; i < len(epc.Procs); i++ { - validateVersion(t, epc.Cfg, epc.Procs[i], version.Versions{ - Cluster: currentVersionStr, - Server: version.Version, - Storage: currentVersionStr, - }) - } - t.Log("Upgrade complete") -} - -func newCluster(t *testing.T, clusterSize int) *e2e.EtcdProcessCluster { - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(clusterSize), - e2e.WithKeepDataDir(true), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - return epc -} - -func startEtcd(t *testing.T, ep e2e.EtcdProcess, execPath string) { - ep.Config().ExecPath = execPath - err := ep.Restart(context.TODO()) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } -} - -func downgradeEnable(t *testing.T, epc *e2e.EtcdProcessCluster, ver *semver.Version) { - c, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - testutils.ExecuteWithTimeout(t, 20*time.Second, func() { - err := c.DowngradeEnable(context.TODO(), ver.String()) - if err != nil { - t.Fatal(err) - } - }) -} - -func stopEtcd(t *testing.T, ep e2e.EtcdProcess) { - if err := ep.Stop(); err != nil { - t.Fatal(err) - } -} - -func validateVersion(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, expect version.Versions) { - testutils.ExecuteWithTimeout(t, 30*time.Second, func() { - for { - result, err := getMemberVersionByCurl(cfg, member) - if err != nil { - cfg.Logger.Warn("failed to get member version and retrying", zap.Error(err)) - time.Sleep(time.Second) - continue - } - - if err := compareMemberVersion(expect, result); err != nil { - cfg.Logger.Warn("failed to validate and retrying", zap.Error(err)) - time.Sleep(time.Second) - continue - } - break - } - }) -} - -func leader(t *testing.T, epc *e2e.EtcdProcessCluster) e2e.EtcdProcess { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - for i := 0; i < len(epc.Procs); i++ { - endpoints := epc.Procs[i].EndpointsV3() - cli, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - DialTimeout: 3 * time.Second, - }) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - resp, err := cli.Status(ctx, endpoints[0]) - if err != nil { - t.Fatal(err) - } - if resp.Header.GetMemberId() == resp.Leader { - return epc.Procs[i] - } - } - t.Fatal("Leader not found") - return nil -} - -func compareMemberVersion(expect version.Versions, target version.Versions) error { - if expect.Server != "" && expect.Server != target.Server { - return fmt.Errorf("expect etcdserver version %v, but got %v", expect.Server, target.Server) - } - - if expect.Cluster != "" && expect.Cluster != target.Cluster { - return fmt.Errorf("expect etcdcluster version %v, but got %v", expect.Cluster, target.Cluster) - } - - if expect.Storage != "" && expect.Storage != target.Storage { - return fmt.Errorf("expect storage version %v, but got %v", expect.Storage, target.Storage) - } - return nil -} - -func getMemberVersionByCurl(cfg *e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess) (version.Versions, error) { - args := e2e.CURLPrefixArgs(cfg, member, "GET", e2e.CURLReq{Endpoint: "/version"}) - lines, err := e2e.RunUtilCompletion(args, nil) - if err != nil { - return version.Versions{}, err - } - - data := strings.Join(lines, "\n") - result := version.Versions{} - if err := json.Unmarshal([]byte(data), &result); err != nil { - return version.Versions{}, fmt.Errorf("failed to unmarshal (%v): %w", data, err) - } - return result, nil -} - -func getVersionFromBinary(binaryPath string) (*semver.Version, error) { - lines, err := e2e.RunUtilCompletion([]string{binaryPath, "--version"}, nil) - if err != nil { - return nil, fmt.Errorf("could not find binary version from %s, err: %w", binaryPath, err) - } - - for _, line := range lines { - if strings.HasPrefix(line, "etcd Version:") { - versionString := strings.TrimSpace(strings.SplitAfter(line, ":")[1]) - return semver.NewVersion(versionString) - } - } - - return nil, fmt.Errorf("could not find version in binary output of %s, lines outputted were %v", binaryPath, lines) -} diff --git a/tests/e2e/corrupt_test.go b/tests/e2e/corrupt_test.go deleted file mode 100644 index f4be8da4561..00000000000 --- a/tests/e2e/corrupt_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/storage/datadir" - "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestEtcdCorruptHash(t *testing.T) { - // oldenv := os.Getenv("EXPECT_DEBUG") - // defer os.Setenv("EXPECT_DEBUG", oldenv) - // os.Setenv("EXPECT_DEBUG", "1") - - cfg := e2e.NewConfigNoTLS() - - // trigger snapshot so that restart member can load peers from disk - cfg.SnapshotCount = 3 - - testCtl(t, corruptTest, withQuorum(), - withCfg(*cfg), - withInitialCorruptCheck(), - withCorruptFunc(testutil.CorruptBBolt), - ) -} - -func corruptTest(cx ctlCtx) { - cx.t.Log("putting 10 keys...") - for i := 0; i < 10; i++ { - if err := ctlV3Put(cx, fmt.Sprintf("foo%05d", i), fmt.Sprintf("v%05d", i), ""); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Fatalf("putTest ctlV3Put error (%v)", err) - } - } - } - // enough time for all nodes sync on the same data - cx.t.Log("sleeping 3sec to let nodes sync...") - time.Sleep(3 * time.Second) - - cx.t.Log("connecting clientv3...") - eps := cx.epc.EndpointsV3() - cli1, err := clientv3.New(clientv3.Config{Endpoints: []string{eps[1]}, DialTimeout: 3 * time.Second}) - if err != nil { - cx.t.Fatal(err) - } - defer cli1.Close() - - sresp, err := cli1.Status(context.TODO(), eps[0]) - cx.t.Logf("checked status sresp:%v err:%v", sresp, err) - if err != nil { - cx.t.Fatal(err) - } - id0 := sresp.Header.GetMemberId() - - cx.t.Log("stopping etcd[0]...") - cx.epc.Procs[0].Stop() - - // corrupting first member by modifying backend offline. - fp := datadir.ToBackendFileName(cx.epc.Procs[0].Config().DataDirPath) - cx.t.Logf("corrupting backend: %v", fp) - if err = cx.corruptFunc(fp); err != nil { - cx.t.Fatal(err) - } - - cx.t.Log("restarting etcd[0]") - ep := cx.epc.Procs[0] - proc, err := e2e.SpawnCmd(append([]string{ep.Config().ExecPath}, ep.Config().Args...), cx.envMap) - if err != nil { - cx.t.Fatal(err) - } - defer proc.Stop() - - cx.t.Log("waiting for etcd[0] failure...") - // restarting corrupted member should fail - e2e.WaitReadyExpectProc(context.TODO(), proc, []string{fmt.Sprintf("etcdmain: %016x found data inconsistency with peers", id0)}) -} - -func TestPeriodicCheckDetectsCorruption(t *testing.T) { - checkTime := time.Second - e2e.BeforeTest(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - epc, err := e2e.NewEtcdProcessCluster(ctx, t, - e2e.WithKeepDataDir(true), - e2e.WithCorruptCheckTime(time.Second), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - - for i := 0; i < 10; i++ { - err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{}) - assert.NoError(t, err, "error on put") - } - - members, err := cc.MemberList(ctx) - assert.NoError(t, err, "error on member list") - var memberID uint64 - for _, m := range members.Members { - if m.Name == epc.Procs[0].Config().Name { - memberID = m.ID - } - } - assert.NotZero(t, memberID, "member not found") - epc.Procs[0].Stop() - err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath)) - assert.NoError(t, err) - - err = epc.Procs[0].Restart(context.TODO()) - assert.NoError(t, err) - time.Sleep(checkTime * 11 / 10) - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: memberID}}, alarmResponse.Alarms) -} - -func TestCompactHashCheckDetectCorruption(t *testing.T) { - checkTime := time.Second - e2e.BeforeTest(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - epc, err := e2e.NewEtcdProcessCluster(ctx, t, - e2e.WithKeepDataDir(true), - e2e.WithCompactHashCheckEnabled(true), - e2e.WithCompactHashCheckTime(checkTime), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - - for i := 0; i < 10; i++ { - err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i), config.PutOptions{}) - assert.NoError(t, err, "error on put") - } - members, err := cc.MemberList(ctx) - assert.NoError(t, err, "error on member list") - var memberID uint64 - for _, m := range members.Members { - if m.Name == epc.Procs[0].Config().Name { - memberID = m.ID - } - } - - epc.Procs[0].Stop() - err = testutil.CorruptBBolt(datadir.ToBackendFileName(epc.Procs[0].Config().DataDirPath)) - assert.NoError(t, err) - - err = epc.Procs[0].Restart(ctx) - assert.NoError(t, err) - _, err = cc.Compact(ctx, 5, config.CompactOption{}) - assert.NoError(t, err) - time.Sleep(checkTime * 11 / 10) - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: memberID}}, alarmResponse.Alarms) -} diff --git a/tests/e2e/ctl_v3_auth_cluster_test.go b/tests/e2e/ctl_v3_auth_cluster_test.go deleted file mode 100644 index 81ee49974a7..00000000000 --- a/tests/e2e/ctl_v3_auth_cluster_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestAuthCluster(t *testing.T) { - e2e.BeforeTest(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - epc, err := e2e.NewEtcdProcessCluster(ctx, t, - e2e.WithClusterSize(1), - e2e.WithSnapshotCount(2), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if err := epc.Close(); err != nil { - t.Fatalf("could not close test cluster (%v)", err) - } - }() - - epcClient := epc.Client() - createUsers(ctx, t, epcClient) - - if err := epcClient.AuthEnable(ctx); err != nil { - t.Fatalf("could not enable Auth: (%v)", err) - } - - testUserClientOpts := e2e.WithAuth("test", "testPassword") - rootUserClientOpts := e2e.WithAuth("root", "rootPassword") - - // write more than SnapshotCount keys to single leader to make sure snapshot is created - for i := 0; i <= 10; i++ { - if err := epc.Client(testUserClientOpts).Put(ctx, fmt.Sprintf("/test/%d", i), "test", config.PutOptions{}); err != nil { - t.Fatalf("failed to Put (%v)", err) - } - } - - // start second process - if err := epc.StartNewProc(ctx, nil, t, rootUserClientOpts); err != nil { - t.Fatalf("could not start second etcd process (%v)", err) - } - - // make sure writes to both endpoints are successful - endpoints := epc.EndpointsV3() - assert.Equal(t, len(endpoints), 2) - for _, endpoint := range epc.EndpointsV3() { - if err := epc.Client(testUserClientOpts, e2e.WithEndpoints([]string{endpoint})).Put(ctx, "/test/key", endpoint, config.PutOptions{}); err != nil { - t.Fatalf("failed to write to Put to %q (%v)", endpoint, err) - } - } - - // verify all nodes have exact same revision and hash - assert.Eventually(t, func() bool { - hashKvs, err := epc.Client(rootUserClientOpts).HashKV(ctx, 0) - if err != nil { - t.Logf("failed to get HashKV: %v", err) - return false - } - if len(hashKvs) != 2 { - t.Logf("not exactly 2 hashkv responses returned: %d", len(hashKvs)) - return false - } - if hashKvs[0].Header.Revision != hashKvs[1].Header.Revision { - t.Logf("The two members' revision (%d, %d) are not equal", hashKvs[0].Header.Revision, hashKvs[1].Header.Revision) - return false - } - assert.Equal(t, hashKvs[0].Hash, hashKvs[1].Hash) - return true - }, time.Second*5, time.Millisecond*100) - -} - -func applyTLSWithRootCommonName() func() { - var ( - oldCertPath = e2e.CertPath - oldPrivateKeyPath = e2e.PrivateKeyPath - oldCaPath = e2e.CaPath - - newCertPath = filepath.Join(e2e.FixturesDir, "CommonName-root.crt") - newPrivateKeyPath = filepath.Join(e2e.FixturesDir, "CommonName-root.key") - newCaPath = filepath.Join(e2e.FixturesDir, "CommonName-root.crt") - ) - - e2e.CertPath = newCertPath - e2e.PrivateKeyPath = newPrivateKeyPath - e2e.CaPath = newCaPath - - return func() { - e2e.CertPath = oldCertPath - e2e.PrivateKeyPath = oldPrivateKeyPath - e2e.CaPath = oldCaPath - } -} - -func createUsers(ctx context.Context, t *testing.T, client *e2e.EtcdctlV3) { - if _, err := client.UserAdd(ctx, "root", "rootPassword", config.UserAddOptions{}); err != nil { - t.Fatalf("could not add root user (%v)", err) - } - if _, err := client.RoleAdd(ctx, "root"); err != nil { - t.Fatalf("could not create 'root' role (%v)", err) - } - if _, err := client.UserGrantRole(ctx, "root", "root"); err != nil { - t.Fatalf("could not grant root role to root user (%v)", err) - } - - if _, err := client.RoleAdd(ctx, "test"); err != nil { - t.Fatalf("could not create 'test' role (%v)", err) - } - if _, err := client.RoleGrantPermission(ctx, "test", "/test/", "/test0", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { - t.Fatalf("could not RoleGrantPermission (%v)", err) - } - if _, err := client.UserAdd(ctx, "test", "testPassword", config.UserAddOptions{}); err != nil { - t.Fatalf("could not add user test (%v)", err) - } - if _, err := client.UserGrantRole(ctx, "test", "test"); err != nil { - t.Fatalf("could not grant test role user (%v)", err) - } -} diff --git a/tests/e2e/ctl_v3_auth_no_proxy_test.go b/tests/e2e/ctl_v3_auth_no_proxy_test.go deleted file mode 100644 index 530f24f6c6f..00000000000 --- a/tests/e2e/ctl_v3_auth_no_proxy_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These tests depend on certificate-based authentication that is NOT supported -// by gRPC proxy. -//go:build !cluster_proxy - -package e2e - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3AuthCertCN(t *testing.T) { - testCtl(t, authTestCertCN, withCfg(*e2e.NewConfigClientTLSCertAuth())) -} -func TestCtlV3AuthCertCNAndUsername(t *testing.T) { - testCtl(t, authTestCertCNAndUsername, withCfg(*e2e.NewConfigClientTLSCertAuth())) -} -func TestCtlV3AuthCertCNAndUsernameNoPassword(t *testing.T) { - testCtl(t, authTestCertCNAndUsernameNoPassword, withCfg(*e2e.NewConfigClientTLSCertAuth())) -} - -func TestCtlV3AuthCertCNWithWithConcurrentOperation(t *testing.T) { - e2e.BeforeTest(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // apply the certificate which has `root` CommonName, - // and reset the setting when the test case finishes. - // TODO(ahrtr): enhance the e2e test framework to support - // certificates with CommonName. - t.Log("Apply certificate with root CommonName") - resetCert := applyTLSWithRootCommonName() - defer resetCert() - - t.Log("Create etcd cluster") - epc, err := e2e.NewEtcdProcessCluster(ctx, t, - e2e.WithClusterSize(1), - e2e.WithClientConnType(e2e.ClientTLS), - e2e.WithClientCertAuthority(true), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if err := epc.Close(); err != nil { - t.Fatalf("could not close test cluster (%v)", err) - } - }() - - epcClient := epc.Client() - t.Log("Create users") - createUsers(ctx, t, epcClient) - - t.Log("Enable auth") - if err := epcClient.AuthEnable(ctx); err != nil { - t.Fatalf("could not enable Auth: (%v)", err) - } - - // Create two goroutines, one goroutine keeps creating & deleting users, - // and the other goroutine keeps writing & deleting K/V entries. - var wg sync.WaitGroup - wg.Add(2) - errs := make(chan error, 2) - donec := make(chan struct{}) - - // Create the first goroutine to create & delete users - t.Log("Create the first goroutine to create & delete users") - go func() { - defer wg.Done() - for i := 0; i < 100; i++ { - user := fmt.Sprintf("testuser-%d", i) - pass := fmt.Sprintf("testpass-%d", i) - if _, err := epcClient.UserAdd(ctx, user, pass, config.UserAddOptions{}); err != nil { - errs <- fmt.Errorf("failed to create user %q: %w", user, err) - break - } - - if _, err := epcClient.UserDelete(ctx, user); err != nil { - errs <- fmt.Errorf("failed to delete user %q: %w", user, err) - break - } - } - t.Log("The first goroutine finished") - }() - - // Create the second goroutine to write & delete K/V entries - t.Log("Create the second goroutine to write & delete K/V entries") - go func() { - defer wg.Done() - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key-%d", i) - value := fmt.Sprintf("value-%d", i) - - if err := epcClient.Put(ctx, key, value, config.PutOptions{}); err != nil { - errs <- fmt.Errorf("failed to put key %q: %w", key, err) - break - } - - if _, err := epcClient.Delete(ctx, key, config.DeleteOptions{}); err != nil { - errs <- fmt.Errorf("failed to delete key %q: %w", key, err) - break - } - } - t.Log("The second goroutine finished") - }() - - t.Log("Waiting for the two goroutines to complete") - go func() { - wg.Wait() - close(donec) - }() - - t.Log("Waiting for test result") - select { - case err := <-errs: - t.Fatalf("Unexpected error: %v", err) - case <-donec: - t.Log("All done!") - case <-time.After(30 * time.Second): - t.Fatal("Test case timeout after 20 seconds") - } -} diff --git a/tests/e2e/ctl_v3_auth_test.go b/tests/e2e/ctl_v3_auth_test.go deleted file mode 100644 index 3ac32af8e4f..00000000000 --- a/tests/e2e/ctl_v3_auth_test.go +++ /dev/null @@ -1,1267 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/stretchr/testify/require" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3AuthWriteKey(t *testing.T) { testCtl(t, authCredWriteKeyTest) } -func TestCtlV3AuthRoleUpdate(t *testing.T) { testCtl(t, authRoleUpdateTest) } -func TestCtlV3AuthUserDeleteDuringOps(t *testing.T) { testCtl(t, authUserDeleteDuringOpsTest) } -func TestCtlV3AuthRoleRevokeDuringOps(t *testing.T) { testCtl(t, authRoleRevokeDuringOpsTest) } -func TestCtlV3AuthTxn(t *testing.T) { testCtl(t, authTestTxn) } -func TestCtlV3AuthTxnJWT(t *testing.T) { testCtl(t, authTestTxn, withCfg(*e2e.NewConfigJWT())) } -func TestCtlV3AuthPrefixPerm(t *testing.T) { testCtl(t, authTestPrefixPerm) } -func TestCtlV3AuthMemberAdd(t *testing.T) { testCtl(t, authTestMemberAdd) } -func TestCtlV3AuthMemberRemove(t *testing.T) { - testCtl(t, authTestMemberRemove, withQuorum(), withDisableStrictReconfig()) -} -func TestCtlV3AuthMemberUpdate(t *testing.T) { testCtl(t, authTestMemberUpdate) } -func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWithDelete) } -func TestCtlV3AuthInvalidMgmt(t *testing.T) { testCtl(t, authTestInvalidMgmt) } -func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) } -func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) } -func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(*e2e.NewConfigJWT())) } - -func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) } -func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) { - testCtl(t, authLeaseTestTimeToLiveExpired) -} -func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) } -func TestCtlV3AuthLeaseGrantLeasesJWT(t *testing.T) { - testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(*e2e.NewConfigJWT())) -} -func TestCtlV3AuthLeaseRevoke(t *testing.T) { testCtl(t, authLeaseTestLeaseRevoke) } - -func TestCtlV3AuthRoleGet(t *testing.T) { testCtl(t, authTestRoleGet) } -func TestCtlV3AuthUserGet(t *testing.T) { testCtl(t, authTestUserGet) } -func TestCtlV3AuthRoleList(t *testing.T) { testCtl(t, authTestRoleList) } - -func TestCtlV3AuthDefrag(t *testing.T) { testCtl(t, authTestDefrag) } -func TestCtlV3AuthEndpointHealth(t *testing.T) { - testCtl(t, authTestEndpointHealth, withQuorum()) -} -func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) } -func TestCtlV3AuthSnapshotJWT(t *testing.T) { - testCtl(t, authTestSnapshot, withCfg(*e2e.NewConfigJWT())) -} -func TestCtlV3AuthJWTExpire(t *testing.T) { - testCtl(t, authTestJWTExpire, withCfg(*e2e.NewConfigJWT())) -} -func TestCtlV3AuthRevisionConsistency(t *testing.T) { testCtl(t, authTestRevisionConsistency) } -func TestCtlV3AuthTestCacheReload(t *testing.T) { testCtl(t, authTestCacheReload) } - -func authEnable(cx ctlCtx) error { - // create root user with root role - if err := ctlV3User(cx, []string{"add", "root", "--interactive=false"}, "User root created", []string{"root"}); err != nil { - return fmt.Errorf("failed to create root user %v", err) - } - if err := ctlV3User(cx, []string{"grant-role", "root", "root"}, "Role root is granted to user root", nil); err != nil { - return fmt.Errorf("failed to grant root user root role %v", err) - } - if err := ctlV3AuthEnable(cx); err != nil { - return fmt.Errorf("authEnableTest ctlV3AuthEnable error (%v)", err) - } - return nil -} - -func ctlV3AuthEnable(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgs(), "auth", "enable") - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled") -} - -func authCredWriteKeyTest(cx ctlCtx) { - // baseline key to check for failed puts - if err := ctlV3Put(cx, "foo", "a", ""); err != nil { - cx.t.Fatal(err) - } - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // confirm root role can access to all keys - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // try invalid user - cx.user, cx.pass = "a", "b" - err := ctlV3PutFailAuth(cx, "foo", "bar") - require.ErrorContains(cx.t, err, "authentication failed") - - // confirm put failed - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // try good user - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "foo", "bar2", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar2"}}...); err != nil { - cx.t.Fatal(err) - } - - // try bad password - cx.user, cx.pass = "test-user", "badpass" - err = ctlV3PutFailAuth(cx, "foo", "baz") - require.ErrorContains(cx.t, err, "authentication failed") - - // confirm put failed - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar2"}}...); err != nil { - cx.t.Fatal(err) - } -} - -func authRoleUpdateTest(cx ctlCtx) { - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // try put to not granted key - cx.user, cx.pass = "test-user", "pass" - err := ctlV3PutFailPerm(cx, "hoo", "bar") - require.ErrorContains(cx.t, err, "permission denied") - - // grant a new key - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "hoo", "", false}); err != nil { - cx.t.Fatal(err) - } - - // try a newly granted key - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // revoke the newly granted key - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleRevokePermission(cx, "test-role", "hoo", "", false); err != nil { - cx.t.Fatal(err) - } - - // try put to the revoked key - cx.user, cx.pass = "test-user", "pass" - err = ctlV3PutFailPerm(cx, "hoo", "bar") - require.ErrorContains(cx.t, err, "permission denied") - - // confirm a key still granted can be accessed - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } -} - -func authUserDeleteDuringOpsTest(cx ctlCtx) { - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // create a key - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // delete the user - cx.user, cx.pass = "root", "root" - err := ctlV3User(cx, []string{"delete", "test-user"}, "User test-user deleted", []string{}) - if err != nil { - cx.t.Fatal(err) - } - - // check the user is deleted - cx.user, cx.pass = "test-user", "pass" - err = ctlV3PutFailAuth(cx, "foo", "baz") - require.ErrorContains(cx.t, err, "authentication failed") -} - -func authRoleRevokeDuringOpsTest(cx ctlCtx) { - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // create a key - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"foo"}, []kv{{"foo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // create a new role - cx.user, cx.pass = "root", "root" - if err := ctlV3Role(cx, []string{"add", "test-role2"}, "Role test-role2 created"); err != nil { - cx.t.Fatal(err) - } - // grant a new key to the new role - if err := ctlV3RoleGrantPermission(cx, "test-role2", grantingPerm{true, true, "hoo", "", false}); err != nil { - cx.t.Fatal(err) - } - // grant the new role to the user - if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role2"}, "Role test-role2 is granted to user test-user", nil); err != nil { - cx.t.Fatal(err) - } - - // try a newly granted key - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar"}}...); err != nil { - cx.t.Fatal(err) - } - - // revoke a role from the user - cx.user, cx.pass = "root", "root" - err := ctlV3User(cx, []string{"revoke-role", "test-user", "test-role"}, "Role test-role is revoked from user test-user", []string{}) - if err != nil { - cx.t.Fatal(err) - } - - // check the role is revoked and permission is lost from the user - cx.user, cx.pass = "test-user", "pass" - err = ctlV3PutFailPerm(cx, "foo", "baz") - require.ErrorContains(cx.t, err, "permission denied") - - // try a key that can be accessed from the remaining role - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "hoo", "bar2", ""); err != nil { - cx.t.Fatal(err) - } - // confirm put succeeded - if err := ctlV3Get(cx, []string{"hoo"}, []kv{{"hoo", "bar2"}}...); err != nil { - cx.t.Fatal(err) - } -} - -func ctlV3PutFailAuth(cx ctlCtx, key, val string) error { - return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed") -} - -func ctlV3PutFailPerm(cx ctlCtx, key, val string) error { - return e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied") -} - -func authSetupTestUser(cx ctlCtx) { - if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil { - cx.t.Fatal(err) - } - if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil); err != nil { - cx.t.Fatal(err) - } - cmd := append(cx.PrefixArgs(), "role", "grant-permission", "test-role", "readwrite", "foo") - if err := e2e.SpawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil { - cx.t.Fatal(err) - } -} - -func authTestTxn(cx ctlCtx) { - // keys with 1 suffix aren't granted to test-user - // keys with 2 suffix are granted to test-user - - keys := []string{"c1", "s1", "f1"} - grantedKeys := []string{"c2", "s2", "f2"} - for _, key := range keys { - if err := ctlV3Put(cx, key, "v", ""); err != nil { - cx.t.Fatal(err) - } - } - - for _, key := range grantedKeys { - if err := ctlV3Put(cx, key, "v", ""); err != nil { - cx.t.Fatal(err) - } - } - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // grant keys to test-user - cx.user, cx.pass = "root", "root" - for _, key := range grantedKeys { - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, key, "", false}); err != nil { - cx.t.Fatal(err) - } - } - - // now test txn - cx.interactive = true - cx.user, cx.pass = "test-user", "pass" - - rqs := txnRequests{ - compare: []string{`version("c2") = "1"`}, - ifSuccess: []string{"get s2"}, - ifFail: []string{"get f2"}, - results: []string{"SUCCESS", "s2", "v"}, - } - if err := ctlV3Txn(cx, rqs, false); err != nil { - cx.t.Fatal(err) - } - - // a key of compare case isn't granted - rqs = txnRequests{ - compare: []string{`version("c1") = "1"`}, - ifSuccess: []string{"get s2"}, - ifFail: []string{"get f2"}, - results: []string{"Error: etcdserver: permission denied"}, - } - if err := ctlV3Txn(cx, rqs, true); err != nil { - cx.t.Fatal(err) - } - - // a key of success case isn't granted - rqs = txnRequests{ - compare: []string{`version("c2") = "1"`}, - ifSuccess: []string{"get s1"}, - ifFail: []string{"get f2"}, - results: []string{"Error: etcdserver: permission denied"}, - } - if err := ctlV3Txn(cx, rqs, true); err != nil { - cx.t.Fatal(err) - } - - // a key of failure case isn't granted - rqs = txnRequests{ - compare: []string{`version("c2") = "1"`}, - ifSuccess: []string{"get s2"}, - ifFail: []string{"get f1"}, - results: []string{"Error: etcdserver: permission denied"}, - } - if err := ctlV3Txn(cx, rqs, true); err != nil { - cx.t.Fatal(err) - } -} - -func authTestPrefixPerm(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - prefix := "/prefix/" // directory like prefix - // grant keys to test-user - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, prefix, "", true}); err != nil { - cx.t.Fatal(err) - } - - // try a prefix granted permission - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("%s%d", prefix, i) - if err := ctlV3Put(cx, key, "val", ""); err != nil { - cx.t.Fatal(err) - } - } - - err := ctlV3PutFailPerm(cx, clientv3.GetPrefixRangeEnd(prefix), "baz") - require.ErrorContains(cx.t, err, "permission denied") - - // grant the entire keys to test-user - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "", "", true}); err != nil { - cx.t.Fatal(err) - } - - prefix2 := "/prefix2/" - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("%s%d", prefix2, i) - if err := ctlV3Put(cx, key, "val", ""); err != nil { - cx.t.Fatal(err) - } - } -} - -func authTestMemberAdd(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) - // ordinary user cannot add a new member - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3MemberAdd(cx, peerURL, false); err == nil { - cx.t.Fatalf("ordinary user must not be allowed to add a member") - } - - // root can add a new member - cx.user, cx.pass = "root", "root" - if err := ctlV3MemberAdd(cx, peerURL, false); err != nil { - cx.t.Fatal(err) - } -} - -func authTestMemberRemove(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - ep, memIDToRemove, clusterID := cx.memberToRemove() - - // ordinary user cannot remove a member - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err == nil { - cx.t.Fatalf("ordinary user must not be allowed to remove a member") - } - - // root can remove a member - cx.user, cx.pass = "root", "root" - if err := ctlV3MemberRemove(cx, ep, memIDToRemove, clusterID); err != nil { - cx.t.Fatal(err) - } -} - -func authTestMemberUpdate(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - mr, err := getMemberList(cx) - if err != nil { - cx.t.Fatal(err) - } - - // ordinary user cannot update a member - cx.user, cx.pass = "test-user", "pass" - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) - memberID := fmt.Sprintf("%x", mr.Members[0].ID) - if err = ctlV3MemberUpdate(cx, memberID, peerURL); err == nil { - cx.t.Fatalf("ordinary user must not be allowed to update a member") - } - - // root can update a member - cx.user, cx.pass = "root", "root" - if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil { - cx.t.Fatal(err) - } -} - -func authTestCertCN(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil { - cx.t.Fatal(err) - } - if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil); err != nil { - cx.t.Fatal(err) - } - - // grant a new key - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "hoo", "", false}); err != nil { - cx.t.Fatal(err) - } - - // try a granted key - cx.user, cx.pass = "", "" - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Error(err) - } - - // try a non-granted key - cx.user, cx.pass = "", "" - err := ctlV3PutFailPerm(cx, "baz", "bar") - require.ErrorContains(cx.t, err, "permission denied") -} - -func authTestRevokeWithDelete(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // create a new role - cx.user, cx.pass = "root", "root" - if err := ctlV3Role(cx, []string{"add", "test-role2"}, "Role test-role2 created"); err != nil { - cx.t.Fatal(err) - } - - // grant the new role to the user - if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role2"}, "Role test-role2 is granted to user test-user", nil); err != nil { - cx.t.Fatal(err) - } - - // check the result - if err := ctlV3User(cx, []string{"get", "test-user"}, "Roles: test-role test-role2", nil); err != nil { - cx.t.Fatal(err) - } - - // delete the role, test-role2 must be revoked from test-user - if err := ctlV3Role(cx, []string{"delete", "test-role2"}, "Role test-role2 deleted"); err != nil { - cx.t.Fatal(err) - } - - // check the result - if err := ctlV3User(cx, []string{"get", "test-user"}, "Roles: test-role", nil); err != nil { - cx.t.Fatal(err) - } -} - -func authTestInvalidMgmt(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - if err := ctlV3Role(cx, []string{"delete", "root"}, "Error: etcdserver: invalid auth management"); err == nil { - cx.t.Fatal("deleting the role root must not be allowed") - } - - if err := ctlV3User(cx, []string{"revoke-role", "root", "root"}, "Error: etcdserver: invalid auth management", []string{}); err == nil { - cx.t.Fatal("revoking the role root from the user root must not be allowed") - } -} - -func authTestFromKeyPerm(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // grant keys after z to test-user - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "z", "\x00", false}); err != nil { - cx.t.Fatal(err) - } - - // try the granted open ended permission - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("z%d", i) - if err := ctlV3Put(cx, key, "val", ""); err != nil { - cx.t.Fatal(err) - } - } - largeKey := "" - for i := 0; i < 10; i++ { - largeKey += "\xff" - if err := ctlV3Put(cx, largeKey, "val", ""); err != nil { - cx.t.Fatal(err) - } - } - - // try a non granted key - err := ctlV3PutFailPerm(cx, "x", "baz") - require.ErrorContains(cx.t, err, "permission denied") - - // revoke the open ended permission - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleRevokePermission(cx, "test-role", "z", "", true); err != nil { - cx.t.Fatal(err) - } - - // try the revoked open ended permission - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("z%d", i) - err := ctlV3PutFailPerm(cx, key, "val") - require.ErrorContains(cx.t, err, "permission denied") - } - - // grant the entire keys - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "", "\x00", false}); err != nil { - cx.t.Fatal(err) - } - - // try keys, of course it must be allowed because test-role has a permission of the entire keys - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("z%d", i) - if err := ctlV3Put(cx, key, "val", ""); err != nil { - cx.t.Fatal(err) - } - } - - // revoke the entire keys - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleRevokePermission(cx, "test-role", "", "", true); err != nil { - cx.t.Fatal(err) - } - - // try the revoked entire key permission - cx.user, cx.pass = "test-user", "pass" - for i := 0; i < 10; i++ { - key := fmt.Sprintf("z%d", i) - err := ctlV3PutFailPerm(cx, key, "val") - require.ErrorContains(cx.t, err, "permission denied") - } -} - -func authLeaseTestKeepAlive(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - // put with TTL 10 seconds and keep-alive - leaseID, err := ctlV3LeaseGrant(cx, 10) - if err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseGrant error (%v)", err) - } - if err := ctlV3Put(cx, "key", "val", leaseID); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3Put error (%v)", err) - } - if err := ctlV3LeaseKeepAlive(cx, leaseID); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseKeepAlive error (%v)", err) - } - if err := ctlV3Get(cx, []string{"key"}, kv{"key", "val"}); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3Get error (%v)", err) - } -} - -func authLeaseTestTimeToLiveExpired(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - ttl := 3 - err := leaseTestTimeToLiveExpire(cx, ttl) - require.NoError(cx.t, err) -} - -func leaseTestTimeToLiveExpire(cx ctlCtx, ttl int) error { - leaseID, err := ctlV3LeaseGrant(cx, ttl) - if err != nil { - return fmt.Errorf("ctlV3LeaseGrant error (%v)", err) - } - - if err = ctlV3Put(cx, "key", "val", leaseID); err != nil { - return fmt.Errorf("ctlV3Put error (%v)", err) - } - // eliminate false positive - time.Sleep(time.Duration(ttl+1) * time.Second) - cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", leaseID) - exp := fmt.Sprintf("lease %s already expired", leaseID) - if err = e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil { - return fmt.Errorf("lease not properly expired: (%v)", err) - } - if err := ctlV3Get(cx, []string{"key"}); err != nil { - return fmt.Errorf("ctlV3Get error (%v)", err) - } - return nil -} - -func authLeaseTestLeaseGrantLeases(cx ctlCtx) { - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - if err := leaseTestGrantLeasesList(cx); err != nil { - cx.t.Fatalf("authLeaseTestLeaseGrantLeases: error (%v)", err) - } -} - -func leaseTestGrantLeasesList(cx ctlCtx) error { - id, err := ctlV3LeaseGrant(cx, 10) - if err != nil { - return fmt.Errorf("ctlV3LeaseGrant error (%v)", err) - } - - cmdArgs := append(cx.PrefixArgs(), "lease", "list") - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return fmt.Errorf("lease list failed (%v)", err) - } - _, err = proc.Expect(id) - if err != nil { - return fmt.Errorf("lease id not in returned list (%v)", err) - } - return proc.Close() -} - -func authLeaseTestLeaseRevoke(cx ctlCtx) { - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // put with TTL 10 seconds and revoke - leaseID, err := ctlV3LeaseGrant(cx, 10) - if err != nil { - cx.t.Fatalf("ctlV3LeaseGrant error (%v)", err) - } - if err := ctlV3Put(cx, "key", "val", leaseID); err != nil { - cx.t.Fatalf("ctlV3Put error (%v)", err) - } - if err := ctlV3LeaseRevoke(cx, leaseID); err != nil { - cx.t.Fatalf("ctlV3LeaseRevoke error (%v)", err) - } - if err := ctlV3GetWithErr(cx, []string{"key"}, []string{"retrying of unary invoker failed"}); err != nil { // expect errors - cx.t.Fatalf("ctlV3GetWithErr error (%v)", err) - } -} - -func authTestWatch(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // grant a key range - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "key", "key4", false}); err != nil { - cx.t.Fatal(err) - } - - tests := []struct { - puts []kv - args []string - - wkv []kvExec - want bool - }{ - { // watch 1 key, should be successful - []kv{{"key", "value"}}, - []string{"key", "--rev", "1"}, - []kvExec{{key: "key", val: "value"}}, - true, - }, - { // watch 3 keys by range, should be successful - []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}}, - []string{"key", "key3", "--rev", "1"}, - []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}}, - true, - }, - - { // watch 1 key, should not be successful - []kv{}, - []string{"key5", "--rev", "1"}, - []kvExec{}, - false, - }, - { // watch 3 keys by range, should not be successful - []kv{}, - []string{"key", "key6", "--rev", "1"}, - []kvExec{}, - false, - }, - } - - cx.user, cx.pass = "test-user", "pass" - for i, tt := range tests { - donec := make(chan struct{}) - go func(i int, puts []kv) { - defer close(donec) - for j := range puts { - if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil { - cx.t.Errorf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err) - } - } - }(i, tt.puts) - - var err error - if tt.want { - err = ctlV3Watch(cx, tt.args, tt.wkv...) - if err != nil && cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err) - } - } else { - err = ctlV3WatchFailPerm(cx, tt.args) - // this will not have any meaningful error output, but the process fails due to the cancellation - require.ErrorContains(cx.t, err, "unexpected exit code") - } - - <-donec - } - -} - -func authTestRoleGet(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - expected := []string{ - "Role test-role", - "KV Read:", "foo", - "KV Write:", "foo", - } - if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { - cx.t.Fatal(err) - } - - // test-user can get the information of test-role because it belongs to the role - cx.user, cx.pass = "test-user", "pass" - if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil { - cx.t.Fatal(err) - } - - // test-user cannot get the information of root because it doesn't belong to the role - expected = []string{ - "Error: etcdserver: permission denied", - } - err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...) - require.ErrorContains(cx.t, err, "permission denied") -} - -func authTestUserGet(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - expected := []string{ - "User: test-user", - "Roles: test-role", - } - - if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { - cx.t.Fatal(err) - } - - // test-user can get the information of test-user itself - cx.user, cx.pass = "test-user", "pass" - if err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil { - cx.t.Fatal(err) - } - - // test-user cannot get the information of root - expected = []string{ - "Error: etcdserver: permission denied", - } - err := e2e.SpawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...) - require.ErrorContains(cx.t, err, "permission denied") -} - -func authTestRoleList(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil { - cx.t.Fatal(err) - } -} - -func authTestDefrag(cx ctlCtx) { - maintenanceInitKeys(cx) - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // ordinary user cannot defrag - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3OnlineDefrag(cx); err == nil { - cx.t.Fatal("ordinary user should not be able to issue a defrag request") - } - - // root can defrag - cx.user, cx.pass = "root", "root" - if err := ctlV3OnlineDefrag(cx); err != nil { - cx.t.Fatal(err) - } -} - -func authTestSnapshot(cx ctlCtx) { - maintenanceInitKeys(cx) - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - fpath := "test-auth.snapshot" - defer os.RemoveAll(fpath) - - // ordinary user cannot save a snapshot - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3SnapshotSave(cx, fpath); err == nil { - cx.t.Fatal("ordinary user should not be able to save a snapshot") - } - - // root can save a snapshot - cx.user, cx.pass = "root", "root" - if err := ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) - } - - st, err := getSnapshotStatus(cx, fpath) - if err != nil { - cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err) - } - if st.Revision != 4 { - cx.t.Fatalf("expected 4, got %d", st.Revision) - } - if st.TotalKey < 3 { - cx.t.Fatalf("expected at least 3, got %d", st.TotalKey) - } -} - -func authTestEndpointHealth(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } - - // health checking with an ordinary user "succeeds" since permission denial goes through consensus - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } - - // succeed if permissions granted for ordinary user - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "health", "", false}); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } -} - -func certCNAndUsername(cx ctlCtx, noPassword bool) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - if noPassword { - if err := ctlV3User(cx, []string{"add", "example.com", "--no-password"}, "User example.com created", []string{""}); err != nil { - cx.t.Fatal(err) - } - } else { - if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil { - cx.t.Fatal(err) - } - } - if err := e2e.SpawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil); err != nil { - cx.t.Fatal(err) - } - - // grant a new key for CN based user - if err := ctlV3RoleGrantPermission(cx, "test-role-cn", grantingPerm{true, true, "hoo", "", false}); err != nil { - cx.t.Fatal(err) - } - - // grant a new key for username based user - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "bar", "", false}); err != nil { - cx.t.Fatal(err) - } - - // try a granted key for CN based user - cx.user, cx.pass = "", "" - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Error(err) - } - - // try a granted key for username based user - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Put(cx, "bar", "bar", ""); err != nil { - cx.t.Error(err) - } - - // try a non-granted key for both of them - cx.user, cx.pass = "", "" - err := ctlV3PutFailPerm(cx, "baz", "bar") - require.ErrorContains(cx.t, err, "permission denied") - - cx.user, cx.pass = "test-user", "pass" - err = ctlV3PutFailPerm(cx, "baz", "bar") - require.ErrorContains(cx.t, err, "permission denied") -} - -func authTestCertCNAndUsername(cx ctlCtx) { - certCNAndUsername(cx, false) -} - -func authTestCertCNAndUsernameNoPassword(cx ctlCtx) { - certCNAndUsername(cx, true) -} - -func authTestJWTExpire(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // try a granted key - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Error(err) - } - - // wait an expiration of my JWT token - <-time.After(3 * time.Second) - - if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { - cx.t.Error(err) - } -} - -func authTestRevisionConsistency(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "root", "root" - - // add user - if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil { - cx.t.Fatal(err) - } - // delete the same user - if err := ctlV3User(cx, []string{"delete", "test-user"}, "User test-user deleted", []string{}); err != nil { - cx.t.Fatal(err) - } - - // get node0 auth revision - node0 := cx.epc.Procs[0] - endpoint := node0.EndpointsV3()[0] - cli, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: cx.user, Password: cx.pass, DialTimeout: 3 * time.Second}) - if err != nil { - cx.t.Fatal(err) - } - defer cli.Close() - - sresp, err := cli.AuthStatus(context.TODO()) - if err != nil { - cx.t.Fatal(err) - } - oldAuthRevision := sresp.AuthRevision - - // restart the node - if err := node0.Restart(context.TODO()); err != nil { - cx.t.Fatal(err) - } - - // get node0 auth revision again - sresp, err = cli.AuthStatus(context.TODO()) - if err != nil { - cx.t.Fatal(err) - } - newAuthRevision := sresp.AuthRevision - - // assert AuthRevision equal - if newAuthRevision != oldAuthRevision { - cx.t.Fatalf("auth revison shouldn't change when restarting etcd, expected: %d, got: %d", oldAuthRevision, newAuthRevision) - } -} - -func ctlV3EndpointHealth(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgs(), "endpoint", "health") - lines := make([]string, cx.epc.Cfg.ClusterSize) - for i := range lines { - lines[i] = "is healthy" - } - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) -} - -func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error { - cmdArgs := append(cx.PrefixArgs(), "user") - cmdArgs = append(cmdArgs, args...) - - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return err - } - defer proc.Close() - - // Send 'stdIn' strings as input. - for _, s := range stdIn { - if err = proc.Send(s + "\r"); err != nil { - return err - } - } - - _, err = proc.Expect(expStr) - return err -} - -// authTestCacheReload tests the permissions when a member restarts -func authTestCacheReload(cx ctlCtx) { - - authData := []struct { - user string - role string - pass string - }{ - { - user: "root", - role: "root", - pass: "123", - }, - { - user: "user0", - role: "role0", - pass: "123", - }, - } - - node0 := cx.epc.Procs[0] - endpoint := node0.EndpointsV3()[0] - - // create a client - c, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, DialTimeout: 3 * time.Second}) - if err != nil { - cx.t.Fatal(err) - } - defer c.Close() - - for _, authObj := range authData { - // add role - if _, err = c.RoleAdd(context.TODO(), authObj.role); err != nil { - cx.t.Fatal(err) - } - - // add user - if _, err = c.UserAdd(context.TODO(), authObj.user, authObj.pass); err != nil { - cx.t.Fatal(err) - } - - // grant role to user - if _, err = c.UserGrantRole(context.TODO(), authObj.user, authObj.role); err != nil { - cx.t.Fatal(err) - } - } - - // role grant permission to role0 - if _, err = c.RoleGrantPermission(context.TODO(), authData[1].role, "foo", "", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { - cx.t.Fatal(err) - } - - // enable auth - if _, err = c.AuthEnable(context.TODO()); err != nil { - cx.t.Fatal(err) - } - - // create another client with ID:Password - c2, err := clientv3.New(clientv3.Config{Endpoints: []string{endpoint}, Username: authData[1].user, Password: authData[1].pass, DialTimeout: 3 * time.Second}) - if err != nil { - cx.t.Fatal(err) - } - defer c2.Close() - - // create foo since that is within the permission set - // expectation is to succeed - if _, err = c2.Put(context.TODO(), "foo", "bar"); err != nil { - cx.t.Fatal(err) - } - - // restart the node - if err := node0.Restart(context.TODO()); err != nil { - cx.t.Fatal(err) - } - - // nothing has changed, but it fails without refreshing cache after restart - if _, err = c2.Put(context.TODO(), "foo", "bar2"); err != nil { - cx.t.Fatal(err) - } -} diff --git a/tests/e2e/ctl_v3_completion_test.go b/tests/e2e/ctl_v3_completion_test.go deleted file mode 100644 index 3b4113f342b..00000000000 --- a/tests/e2e/ctl_v3_completion_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cov - -package e2e - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "testing" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3CompletionBash(t *testing.T) { - testShellCompletion(t, e2e.BinPath.Etcdctl, "bash") -} - -func TestUtlV3CompletionBash(t *testing.T) { - testShellCompletion(t, e2e.BinPath.Etcdutl, "bash") -} - -// testShellCompletion can only run in non-coverage mode. The etcdctl and etcdutl -// built with `-tags cov` mode will show go-test result after each execution, like -// -// PASS -// coverage: 0.0% of statements in ./... -// -// Since the PASS is not real command, the `source completion" fails with -// command-not-found error. -func testShellCompletion(t *testing.T, binPath, shellName string) { - e2e.BeforeTest(t) - - stdout := new(bytes.Buffer) - completionCmd := exec.Command(binPath, "completion", shellName) - completionCmd.Stdout = stdout - completionCmd.Stderr = os.Stderr - require.NoError(t, completionCmd.Run()) - - filename := fmt.Sprintf("etcdctl-%s.completion", shellName) - require.NoError(t, os.WriteFile(filename, stdout.Bytes(), 0644)) - - shellCmd := exec.Command(shellName, "-c", "source "+filename) - require.NoError(t, shellCmd.Run()) -} diff --git a/tests/e2e/ctl_v3_defrag_test.go b/tests/e2e/ctl_v3_defrag_test.go deleted file mode 100644 index 07f6bd44343..00000000000 --- a/tests/e2e/ctl_v3_defrag_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3DefragOffline(t *testing.T) { - testCtlWithOffline(t, maintenanceInitKeys, defragOfflineTest) -} - -func maintenanceInitKeys(cx ctlCtx) { - var kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}} - for i := range kvs { - if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil { - cx.t.Fatal(err) - } - } -} - -func ctlV3OnlineDefrag(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgs(), "defrag") - lines := make([]string, cx.epc.Cfg.ClusterSize) - for i := range lines { - lines[i] = "Finished defragmenting etcd member" - } - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) -} - -func ctlV3OfflineDefrag(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgsUtl(), "defrag", "--data-dir", cx.dataDir) - lines := []string{"finished defragmenting directory"} - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) -} - -func defragOfflineTest(cx ctlCtx) { - if err := ctlV3OfflineDefrag(cx); err != nil { - cx.t.Fatalf("defragTest ctlV3Defrag error (%v)", err) - } -} diff --git a/tests/e2e/ctl_v3_elect_test.go b/tests/e2e/ctl_v3_elect_test.go deleted file mode 100644 index 386a5f7dbdb..00000000000 --- a/tests/e2e/ctl_v3_elect_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Elect(t *testing.T) { - testCtl(t, testElect) -} - -func testElect(cx ctlCtx) { - name := "a" - - holder, ch, err := ctlV3Elect(cx, name, "p1", false) - if err != nil { - cx.t.Fatal(err) - } - - l1 := "" - select { - case <-time.After(2 * time.Second): - cx.t.Fatalf("timed out electing") - case l1 = <-ch: - if !strings.HasPrefix(l1, name) { - cx.t.Errorf("got %q, expected %q prefix", l1, name) - } - } - - // blocked process that won't win the election - blocked, ch, err := ctlV3Elect(cx, name, "p2", true) - if err != nil { - cx.t.Fatal(err) - } - select { - case <-time.After(100 * time.Millisecond): - case <-ch: - cx.t.Fatalf("should block") - } - - // overlap with a blocker that will win the election - blockAcquire, ch, err := ctlV3Elect(cx, name, "p2", false) - if err != nil { - cx.t.Fatal(err) - } - defer func(blockAcquire *expect.ExpectProcess) { - err = blockAcquire.Stop() - require.NoError(cx.t, err) - blockAcquire.Wait() - }(blockAcquire) - - select { - case <-time.After(100 * time.Millisecond): - case <-ch: - cx.t.Fatalf("should block") - } - - // kill blocked process with clean shutdown - if err = blocked.Signal(os.Interrupt); err != nil { - cx.t.Fatal(err) - } - err = e2e.CloseWithTimeout(blocked, time.Second) - if err != nil { - // due to being blocked, this can potentially get killed and thus exit non-zero sometimes - require.ErrorContains(cx.t, err, "unexpected exit code") - } - - // kill the holder with clean shutdown - if err = holder.Signal(os.Interrupt); err != nil { - cx.t.Fatal(err) - } - if err = e2e.CloseWithTimeout(holder, time.Second); err != nil { - cx.t.Fatal(err) - } - - // blockAcquire should win the election - select { - case <-time.After(time.Second): - cx.t.Fatalf("timed out from waiting to holding") - case l2 := <-ch: - if l1 == l2 || !strings.HasPrefix(l2, name) { - cx.t.Fatalf("expected different elect name, got l1=%q, l2=%q", l1, l2) - } - } -} - -// ctlV3Elect creates a elect process with a channel listening for when it wins the election. -func ctlV3Elect(cx ctlCtx, name, proposal string, expectFailure bool) (*expect.ExpectProcess, <-chan string, error) { - cmdArgs := append(cx.PrefixArgs(), "elect", name, proposal) - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - outc := make(chan string, 1) - if err != nil { - close(outc) - return proc, outc, err - } - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - s, xerr := proc.ExpectFunc(ctx, func(string) bool { return true }) - if xerr != nil { - if !expectFailure { - cx.t.Errorf("expect failed (%v)", xerr) - } - } - outc <- s - }() - return proc, outc, err -} diff --git a/tests/e2e/ctl_v3_grpc_test.go b/tests/e2e/ctl_v3_grpc_test.go deleted file mode 100644 index a26b5d2a9e3..00000000000 --- a/tests/e2e/ctl_v3_grpc_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package e2e - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestAuthority(t *testing.T) { - tcs := []struct { - name string - useTLS bool - useInsecureTLS bool - clientURLPattern string - expectAuthorityPattern string - }{ - { - name: "http://domain[:port]", - clientURLPattern: "http://localhost:${MEMBER_PORT}", - expectAuthorityPattern: "localhost:${MEMBER_PORT}", - }, - { - name: "http://address[:port]", - clientURLPattern: "http://127.0.0.1:${MEMBER_PORT}", - expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}", - }, - { - name: "https://domain[:port] insecure", - useTLS: true, - useInsecureTLS: true, - clientURLPattern: "https://localhost:${MEMBER_PORT}", - expectAuthorityPattern: "localhost:${MEMBER_PORT}", - }, - { - name: "https://address[:port] insecure", - useTLS: true, - useInsecureTLS: true, - clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}", - expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}", - }, - { - name: "https://domain[:port]", - useTLS: true, - clientURLPattern: "https://localhost:${MEMBER_PORT}", - expectAuthorityPattern: "localhost:${MEMBER_PORT}", - }, - { - name: "https://address[:port]", - useTLS: true, - clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}", - expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}", - }, - } - for _, tc := range tcs { - for _, clusterSize := range []int{1, 3} { - t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { - e2e.BeforeTest(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - cfg := e2e.NewConfigNoTLS() - cfg.ClusterSize = clusterSize - if tc.useTLS { - cfg.Client.ConnectionType = e2e.ClientTLS - } - cfg.Client.AutoTLS = tc.useInsecureTLS - // Enable debug mode to get logs with http2 headers (including authority) - cfg.EnvVars = map[string]string{"GODEBUG": "http2debug=2"} - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer epc.Close() - endpoints := templateEndpoints(t, tc.clientURLPattern, epc) - - client, err := e2e.NewEtcdctl(cfg.Client, endpoints) - assert.NoError(t, err) - err = client.Put(ctx, "foo", "bar", config.PutOptions{}) - if err != nil { - t.Fatal(err) - } - - testutils.ExecuteWithTimeout(t, 5*time.Second, func() { - assertAuthority(t, strings.ReplaceAll(tc.expectAuthorityPattern, "${MEMBER_PORT}", "20000"), epc) - }) - }) - - } - } -} - -func templateEndpoints(t *testing.T, pattern string, clus *e2e.EtcdProcessCluster) []string { - t.Helper() - var endpoints []string - for i := 0; i < clus.Cfg.ClusterSize; i++ { - ent := pattern - ent = strings.ReplaceAll(ent, "${MEMBER_PORT}", fmt.Sprintf("%d", e2e.EtcdProcessBasePort+i*5)) - endpoints = append(endpoints, ent) - } - return endpoints -} - -func assertAuthority(t *testing.T, expectAurhority string, clus *e2e.EtcdProcessCluster) { - var logs []e2e.LogsExpect - for _, proc := range clus.Procs { - logs = append(logs, proc.Logs()) - } - line := firstMatch(t, `http2: decoded hpack field header field ":authority"`, logs...) - line = strings.TrimSuffix(line, "\n") - line = strings.TrimSuffix(line, "\r") - expectLine := fmt.Sprintf(`http2: decoded hpack field header field ":authority" = %q`, expectAurhority) - assert.True(t, strings.HasSuffix(line, expectLine), fmt.Sprintf("Got %q expected suffix %q", line, expectLine)) -} - -func firstMatch(t *testing.T, expectLine string, logs ...e2e.LogsExpect) string { - t.Helper() - match := make(chan string, len(logs)) - for i := range logs { - go func(l e2e.LogsExpect) { - line, _ := l.ExpectWithContext(context.TODO(), expectLine) - match <- line - }(logs[i]) - } - return <-match -} diff --git a/tests/e2e/ctl_v3_kv_test.go b/tests/e2e/ctl_v3_kv_test.go deleted file mode 100644 index 9df307ba073..00000000000 --- a/tests/e2e/ctl_v3_kv_test.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3PutTimeout(t *testing.T) { testCtl(t, putTest, withDialTimeout(0)) } -func TestCtlV3PutClientTLSFlagByEnv(t *testing.T) { - testCtl(t, putTest, withCfg(*e2e.NewConfigClientTLS()), withFlagByEnv()) -} -func TestCtlV3PutIgnoreValue(t *testing.T) { testCtl(t, putTestIgnoreValue) } -func TestCtlV3PutIgnoreLease(t *testing.T) { testCtl(t, putTestIgnoreLease) } - -func TestCtlV3GetTimeout(t *testing.T) { testCtl(t, getTest, withDialTimeout(0)) } - -func TestCtlV3GetFormat(t *testing.T) { testCtl(t, getFormatTest) } -func TestCtlV3GetRev(t *testing.T) { testCtl(t, getRevTest) } -func TestCtlV3GetKeysOnly(t *testing.T) { testCtl(t, getKeysOnlyTest) } -func TestCtlV3GetCountOnly(t *testing.T) { testCtl(t, getCountOnlyTest) } - -func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) } - -func TestCtlV3GetRevokedCRL(t *testing.T) { - cfg := e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithClientConnType(e2e.ClientTLS), - e2e.WithClientRevokeCerts(true), - e2e.WithClientCertAuthority(true), - ) - testCtl(t, testGetRevokedCRL, withCfg(*cfg)) -} - -func testGetRevokedCRL(cx ctlCtx) { - // test reject - err := ctlV3Put(cx, "k", "v", "") - require.ErrorContains(cx.t, err, "context deadline exceeded") - - // test accept - cx.epc.Cfg.Client.RevokeCerts = false - if err := ctlV3Put(cx, "k", "v", ""); err != nil { - cx.t.Fatal(err) - } -} - -func putTest(cx ctlCtx) { - key, value := "foo", "bar" - - if err := ctlV3Put(cx, key, value, ""); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Fatalf("putTest ctlV3Put error (%v)", err) - } - } - if err := ctlV3Get(cx, []string{key}, kv{key, value}); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Fatalf("putTest ctlV3Get error (%v)", err) - } - } -} - -func putTestIgnoreValue(cx ctlCtx) { - if err := ctlV3Put(cx, "foo", "bar", ""); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Put(cx, "foo", "", "", "--ignore-value"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}); err != nil { - cx.t.Fatal(err) - } -} - -func putTestIgnoreLease(cx ctlCtx) { - leaseID, err := ctlV3LeaseGrant(cx, 10) - if err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3LeaseGrant error (%v)", err) - } - if err := ctlV3Put(cx, "foo", "bar", leaseID); err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3Put error (%v)", err) - } - if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar"}); err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3Get error (%v)", err) - } - if err := ctlV3Put(cx, "foo", "bar1", "", "--ignore-lease"); err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3Put error (%v)", err) - } - if err := ctlV3Get(cx, []string{"foo"}, kv{"foo", "bar1"}); err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3Get error (%v)", err) - } - if err := ctlV3LeaseRevoke(cx, leaseID); err != nil { - cx.t.Fatalf("putTestIgnoreLease: ctlV3LeaseRevok error (%v)", err) - } - if err := ctlV3Get(cx, []string{"key"}); err != nil { // expect no output - cx.t.Fatalf("putTestIgnoreLease: ctlV3Get error (%v)", err) - } -} - -func getTest(cx ctlCtx) { - var ( - kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} - revkvs = []kv{{"key3", "val3"}, {"key2", "val2"}, {"key1", "val1"}} - ) - for i := range kvs { - if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil { - cx.t.Fatalf("getTest #%d: ctlV3Put error (%v)", i, err) - } - } - - tests := []struct { - args []string - - wkv []kv - }{ - {[]string{"key1"}, []kv{{"key1", "val1"}}}, - {[]string{"", "--prefix"}, kvs}, - {[]string{"", "--from-key"}, kvs}, - {[]string{"key", "--prefix"}, kvs}, - {[]string{"key", "--prefix", "--limit=2"}, kvs[:2]}, - {[]string{"key", "--prefix", "--order=ASCEND", "--sort-by=MODIFY"}, kvs}, - {[]string{"key", "--prefix", "--order=ASCEND", "--sort-by=VERSION"}, kvs}, - {[]string{"key", "--prefix", "--sort-by=CREATE"}, kvs}, // ASCEND by default - {[]string{"key", "--prefix", "--order=DESCEND", "--sort-by=CREATE"}, revkvs}, - {[]string{"key", "--prefix", "--order=DESCEND", "--sort-by=KEY"}, revkvs}, - } - for i, tt := range tests { - if err := ctlV3Get(cx, tt.args, tt.wkv...); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Errorf("getTest #%d: ctlV3Get error (%v)", i, err) - } - } - } -} - -func getFormatTest(cx ctlCtx) { - if err := ctlV3Put(cx, "abc", "123", ""); err != nil { - cx.t.Fatal(err) - } - - tests := []struct { - format string - valueOnly bool - - wstr string - }{ - {"simple", false, "abc"}, - {"simple", true, "123"}, - {"json", false, `"kvs":[{"key":"YWJj"`}, - {"protobuf", false, "\x17\b\x93\xe7\xf6\x93\xd4ņ\xe14\x10\xed"}, - } - - for i, tt := range tests { - cmdArgs := append(cx.PrefixArgs(), "get") - cmdArgs = append(cmdArgs, "--write-out="+tt.format) - if tt.valueOnly { - cmdArgs = append(cmdArgs, "--print-value-only") - } - cmdArgs = append(cmdArgs, "abc") - if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil { - cx.t.Errorf("#%d: error (%v), wanted %v", i, err, tt.wstr) - } - } -} - -func getRevTest(cx ctlCtx) { - var ( - kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}} - ) - for i := range kvs { - if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil { - cx.t.Fatalf("getRevTest #%d: ctlV3Put error (%v)", i, err) - } - } - - tests := []struct { - args []string - - wkv []kv - }{ - {[]string{"key", "--rev", "2"}, kvs[:1]}, - {[]string{"key", "--rev", "3"}, kvs[1:2]}, - {[]string{"key", "--rev", "4"}, kvs[2:]}, - } - - for i, tt := range tests { - if err := ctlV3Get(cx, tt.args, tt.wkv...); err != nil { - cx.t.Errorf("getTest #%d: ctlV3Get error (%v)", i, err) - } - } -} - -func getKeysOnlyTest(cx ctlCtx) { - if err := ctlV3Put(cx, "key", "val", ""); err != nil { - cx.t.Fatal(err) - } - cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...) - if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil { - cx.t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - lines, err := e2e.SpawnWithExpectLines(ctx, cmdArgs, cx.envMap, "key") - require.NoError(cx.t, err) - require.NotContains(cx.t, lines, "val", "got value but passed --keys-only") -} - -func getCountOnlyTest(cx ctlCtx) { - cmdArgs := append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Put(cx, "key", "val", ""); err != nil { - cx.t.Fatal(err) - } - cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Put(cx, "key1", "val", ""); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Put(cx, "key1", "val", ""); err != nil { - cx.t.Fatal(err) - } - cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Put(cx, "key2", "val", ""); err != nil { - cx.t.Fatal(err) - } - cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...) - if err := e2e.SpawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil { - cx.t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key3", "--prefix", "--write-out=fields"}...) - lines, err := e2e.SpawnWithExpectLines(ctx, cmdArgs, cx.envMap, "\"Count\"") - require.NoError(cx.t, err) - require.NotContains(cx.t, lines, "\"Count\" : 3") -} - -func delTest(cx ctlCtx) { - tests := []struct { - puts []kv - args []string - - deletedNum int - }{ - { // delete all keys - []kv{{"foo1", "bar"}, {"foo2", "bar"}, {"foo3", "bar"}}, - []string{"", "--prefix"}, - 3, - }, - { // delete all keys - []kv{{"foo1", "bar"}, {"foo2", "bar"}, {"foo3", "bar"}}, - []string{"", "--from-key"}, - 3, - }, - { - []kv{{"this", "value"}}, - []string{"that"}, - 0, - }, - { - []kv{{"sample", "value"}}, - []string{"sample"}, - 1, - }, - { - []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}, - []string{"key", "--prefix"}, - 3, - }, - { - []kv{{"zoo1", "bar"}, {"zoo2", "bar2"}, {"zoo3", "bar3"}}, - []string{"zoo1", "--from-key"}, - 3, - }, - } - - for i, tt := range tests { - for j := range tt.puts { - if err := ctlV3Put(cx, tt.puts[j].key, tt.puts[j].val, ""); err != nil { - cx.t.Fatalf("delTest #%d-%d: ctlV3Put error (%v)", i, j, err) - } - } - if err := ctlV3Del(cx, tt.args, tt.deletedNum); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Fatalf("delTest #%d: ctlV3Del error (%v)", i, err) - } - } - } -} - -func ctlV3Put(cx ctlCtx, key, value, leaseID string, flags ...string) error { - skipValue := false - skipLease := false - for _, f := range flags { - if f == "--ignore-value" { - skipValue = true - } - if f == "--ignore-lease" { - skipLease = true - } - } - cmdArgs := append(cx.PrefixArgs(), "put", key) - if !skipValue { - cmdArgs = append(cmdArgs, value) - } - if leaseID != "" && !skipLease { - cmdArgs = append(cmdArgs, "--lease", leaseID) - } - if len(flags) != 0 { - cmdArgs = append(cmdArgs, flags...) - } - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK") -} - -type kv struct { - key, val string -} - -func ctlV3Get(cx ctlCtx, args []string, kvs ...kv) error { - cmdArgs := append(cx.PrefixArgs(), "get") - cmdArgs = append(cmdArgs, args...) - if !cx.quorum { - cmdArgs = append(cmdArgs, "--consistency", "s") - } - var lines []string - for _, elem := range kvs { - lines = append(lines, elem.key, elem.val) - } - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) -} - -// ctlV3GetWithErr runs "get" command expecting no output but error -func ctlV3GetWithErr(cx ctlCtx, args []string, errs []string) error { - cmdArgs := append(cx.PrefixArgs(), "get") - cmdArgs = append(cmdArgs, args...) - if !cx.quorum { - cmdArgs = append(cmdArgs, "--consistency", "s") - } - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, errs...) -} - -func ctlV3Del(cx ctlCtx, args []string, num int) error { - cmdArgs := append(cx.PrefixArgs(), "del") - cmdArgs = append(cmdArgs, args...) - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num)) -} diff --git a/tests/e2e/ctl_v3_lease_test.go b/tests/e2e/ctl_v3_lease_test.go deleted file mode 100644 index 6ac44c44f24..00000000000 --- a/tests/e2e/ctl_v3_lease_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3LeaseKeepAlive(t *testing.T) { testCtl(t, leaseTestKeepAlive) } -func TestCtlV3LeaseKeepAliveNoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigNoTLS())) -} -func TestCtlV3LeaseKeepAliveClientTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientTLS())) -} -func TestCtlV3LeaseKeepAliveClientAutoTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigClientAutoTLS())) -} -func TestCtlV3LeaseKeepAlivePeerTLS(t *testing.T) { - testCtl(t, leaseTestKeepAlive, withCfg(*e2e.NewConfigPeerTLS())) -} - -func leaseTestKeepAlive(cx ctlCtx) { - // put with TTL 10 seconds and keep-alive - leaseID, err := ctlV3LeaseGrant(cx, 10) - if err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseGrant error (%v)", err) - } - if err := ctlV3Put(cx, "key", "val", leaseID); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3Put error (%v)", err) - } - if err := ctlV3LeaseKeepAlive(cx, leaseID); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3LeaseKeepAlive error (%v)", err) - } - if err := ctlV3Get(cx, []string{"key"}, kv{"key", "val"}); err != nil { - cx.t.Fatalf("leaseTestKeepAlive: ctlV3Get error (%v)", err) - } -} - -func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) { - cmdArgs := append(cx.PrefixArgs(), "lease", "grant", strconv.Itoa(ttl)) - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return "", err - } - - line, err := proc.Expect(" granted with TTL(") - if err != nil { - return "", err - } - if err = proc.Close(); err != nil { - return "", err - } - - // parse 'line LEASE_ID granted with TTL(5s)' to get lease ID - hs := strings.Split(line, " ") - if len(hs) < 2 { - return "", fmt.Errorf("lease grant failed with %q", line) - } - return hs[1], nil -} - -func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error { - cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", leaseID) - - proc, err := e2e.SpawnCmd(cmdArgs, nil) - if err != nil { - return err - } - - if _, err = proc.Expect(fmt.Sprintf("lease %s keepalived with TTL(", leaseID)); err != nil { - return err - } - return proc.Stop() -} - -func ctlV3LeaseRevoke(cx ctlCtx, leaseID string) error { - cmdArgs := append(cx.PrefixArgs(), "lease", "revoke", leaseID) - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID)) -} diff --git a/tests/e2e/ctl_v3_lock_test.go b/tests/e2e/ctl_v3_lock_test.go deleted file mode 100644 index cc0822f2644..00000000000 --- a/tests/e2e/ctl_v3_lock_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Lock(t *testing.T) { - testCtl(t, testLock) -} - -func TestCtlV3LockWithCmd(t *testing.T) { - testCtl(t, testLockWithCmd) -} - -func testLock(cx ctlCtx) { - name := "a" - - holder, ch, err := ctlV3Lock(cx, name) - if err != nil { - cx.t.Fatal(err) - } - - l1 := "" - select { - case <-time.After(2 * time.Second): - cx.t.Fatalf("timed out locking") - case l1 = <-ch: - if !strings.HasPrefix(l1, name) { - cx.t.Errorf("got %q, expected %q prefix", l1, name) - } - } - - // blocked process that won't acquire the lock - blocked, ch, err := ctlV3Lock(cx, name) - if err != nil { - cx.t.Fatal(err) - } - select { - case <-time.After(100 * time.Millisecond): - case <-ch: - cx.t.Fatalf("should block") - } - - // overlap with a blocker that will acquire the lock - blockAcquire, ch, err := ctlV3Lock(cx, name) - if err != nil { - cx.t.Fatal(err) - } - defer func(blockAcquire *expect.ExpectProcess) { - err = blockAcquire.Stop() - require.NoError(cx.t, err) - blockAcquire.Wait() - }(blockAcquire) - - select { - case <-time.After(100 * time.Millisecond): - case <-ch: - cx.t.Fatalf("should block") - } - - // kill blocked process with clean shutdown - if err = blocked.Signal(os.Interrupt); err != nil { - cx.t.Fatal(err) - } - err = e2e.CloseWithTimeout(blocked, time.Second) - if err != nil { - // due to being blocked, this can potentially get killed and thus exit non-zero sometimes - require.ErrorContains(cx.t, err, "unexpected exit code") - } - - // kill the holder with clean shutdown - if err = holder.Signal(os.Interrupt); err != nil { - cx.t.Fatal(err) - } - if err = e2e.CloseWithTimeout(holder, 200*time.Millisecond+time.Second); err != nil { - cx.t.Fatal(err) - } - - // blockAcquire should acquire the lock - select { - case <-time.After(time.Second): - cx.t.Fatalf("timed out from waiting to holding") - case l2 := <-ch: - if l1 == l2 || !strings.HasPrefix(l2, name) { - cx.t.Fatalf("expected different lock name, got l1=%q, l2=%q", l1, l2) - } - } -} - -func testLockWithCmd(cx ctlCtx) { - // exec command with zero exit code - echoCmd := []string{"echo"} - if err := ctlV3LockWithCmd(cx, echoCmd, ""); err != nil { - cx.t.Fatal(err) - } - - // exec command with non-zero exit code - code := 3 - awkCmd := []string{"awk", fmt.Sprintf("BEGIN{exit %d}", code)} - expect := fmt.Sprintf("Error: exit status %d", code) - err := ctlV3LockWithCmd(cx, awkCmd, expect) - require.ErrorContains(cx.t, err, expect) -} - -// ctlV3Lock creates a lock process with a channel listening for when it acquires the lock. -func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, error) { - cmdArgs := append(cx.PrefixArgs(), "lock", name) - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - outc := make(chan string, 1) - if err != nil { - close(outc) - return proc, outc, err - } - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - s, xerr := proc.ExpectFunc(ctx, func(string) bool { return true }) - if xerr != nil { - require.ErrorContains(cx.t, xerr, "Error: context canceled") - } - outc <- s - }() - return proc, outc, err -} - -// ctlV3LockWithCmd creates a lock process to exec command. -func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...string) error { - // use command as lock name - cmdArgs := append(cx.PrefixArgs(), "lock", execCmd[0]) - cmdArgs = append(cmdArgs, execCmd...) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - return e2e.SpawnWithExpectsContext(ctx, cmdArgs, cx.envMap, as...) -} diff --git a/tests/e2e/ctl_v3_make_mirror_test.go b/tests/e2e/ctl_v3_make_mirror_test.go deleted file mode 100644 index 28491c29ae0..00000000000 --- a/tests/e2e/ctl_v3_make_mirror_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3MakeMirror(t *testing.T) { testCtl(t, makeMirrorTest) } -func TestCtlV3MakeMirrorModifyDestPrefix(t *testing.T) { testCtl(t, makeMirrorModifyDestPrefixTest) } -func TestCtlV3MakeMirrorNoDestPrefix(t *testing.T) { testCtl(t, makeMirrorNoDestPrefixTest) } -func TestCtlV3MakeMirrorWithWatchRev(t *testing.T) { testCtl(t, makeMirrorWithWatchRev) } - -func makeMirrorTest(cx ctlCtx) { - var ( - flags []string - kvs = []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} - kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}} - prefix = "key" - ) - testMirrorCommand(cx, flags, kvs, kvs2, prefix, prefix) -} - -func makeMirrorModifyDestPrefixTest(cx ctlCtx) { - var ( - flags = []string{"--prefix", "o_", "--dest-prefix", "d_"} - kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}} - kvs2 = []kvExec{{key: "d_key1", val: "val1"}, {key: "d_key2", val: "val2"}, {key: "d_key3", val: "val3"}} - srcprefix = "o_" - destprefix = "d_" - ) - testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix) -} - -func makeMirrorNoDestPrefixTest(cx ctlCtx) { - var ( - flags = []string{"--prefix", "o_", "--no-dest-prefix"} - kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}} - kvs2 = []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}} - srcprefix = "o_" - destprefix = "key" - ) - - testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix) -} - -func makeMirrorWithWatchRev(cx ctlCtx) { - var ( - flags = []string{"--prefix", "o_", "--no-dest-prefix", "--rev", "4"} - kvs = []kv{{"o_key1", "val1"}, {"o_key2", "val2"}, {"o_key3", "val3"}, {"o_key4", "val4"}} - kvs2 = []kvExec{{key: "key3", val: "val3"}, {key: "key4", val: "val4"}} - srcprefix = "o_" - destprefix = "key" - ) - - testMirrorCommand(cx, flags, kvs, kvs2, srcprefix, destprefix) -} - -func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvExec, srcprefix, destprefix string) { - // set up another cluster to mirror with - mirrorcfg := e2e.NewConfigAutoTLS() - mirrorcfg.ClusterSize = 1 - mirrorcfg.BasePort = 10000 - mirrorctx := ctlCtx{ - t: cx.t, - cfg: *mirrorcfg, - dialTimeout: 7 * time.Second, - } - - mirrorepc, err := e2e.NewEtcdProcessCluster(context.TODO(), cx.t, e2e.WithConfig(&mirrorctx.cfg)) - if err != nil { - cx.t.Fatalf("could not start etcd process cluster (%v)", err) - } - mirrorctx.epc = mirrorepc - - defer func() { - if err = mirrorctx.epc.Close(); err != nil { - cx.t.Fatalf("error closing etcd processes (%v)", err) - } - }() - - cmdArgs := append(cx.PrefixArgs(), "make-mirror") - cmdArgs = append(cmdArgs, flags...) - cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.BasePort)) - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - cx.t.Fatal(err) - } - defer func() { - err = proc.Stop() - if err != nil { - cx.t.Fatal(err) - } - }() - - for i := range sourcekvs { - if err = ctlV3Put(cx, sourcekvs[i].key, sourcekvs[i].val, ""); err != nil { - cx.t.Fatal(err) - } - } - if err = ctlV3Get(cx, []string{srcprefix, "--prefix"}, sourcekvs...); err != nil { - cx.t.Fatal(err) - } - - if err = ctlV3Watch(mirrorctx, []string{destprefix, "--rev", "1", "--prefix"}, destkvs...); err != nil { - cx.t.Fatal(err) - } -} diff --git a/tests/e2e/ctl_v3_member_test.go b/tests/e2e/ctl_v3_member_test.go deleted file mode 100644 index 6ebd73597be..00000000000 --- a/tests/e2e/ctl_v3_member_test.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - "testing" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3MemberList(t *testing.T) { testCtl(t, memberListTest) } -func TestCtlV3MemberListWithHex(t *testing.T) { testCtl(t, memberListWithHexTest) } - -func TestCtlV3MemberAdd(t *testing.T) { testCtl(t, memberAddTest) } -func TestCtlV3MemberAddAsLearner(t *testing.T) { testCtl(t, memberAddAsLearnerTest) } - -func TestCtlV3MemberUpdate(t *testing.T) { testCtl(t, memberUpdateTest) } -func TestCtlV3MemberUpdateNoTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigNoTLS())) -} -func TestCtlV3MemberUpdateClientTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientTLS())) -} -func TestCtlV3MemberUpdateClientAutoTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigClientAutoTLS())) -} -func TestCtlV3MemberUpdatePeerTLS(t *testing.T) { - testCtl(t, memberUpdateTest, withCfg(*e2e.NewConfigPeerTLS())) -} - -func memberListTest(cx ctlCtx) { - if err := ctlV3MemberList(cx); err != nil { - cx.t.Fatalf("memberListTest ctlV3MemberList error (%v)", err) - } -} - -func ctlV3MemberList(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgs(), "member", "list") - lines := make([]string, cx.cfg.ClusterSize) - for i := range lines { - lines[i] = "started" - } - return e2e.SpawnWithExpects(cmdArgs, cx.envMap, lines...) -} - -func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) { - cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "member", "list") - - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return etcdserverpb.MemberListResponse{}, err - } - var txt string - txt, err = proc.Expect("members") - if err != nil { - return etcdserverpb.MemberListResponse{}, err - } - if err = proc.Close(); err != nil { - return etcdserverpb.MemberListResponse{}, err - } - - resp := etcdserverpb.MemberListResponse{} - dec := json.NewDecoder(strings.NewReader(txt)) - if err := dec.Decode(&resp); err == io.EOF { - return etcdserverpb.MemberListResponse{}, err - } - return resp, nil -} - -func memberListWithHexTest(cx ctlCtx) { - resp, err := getMemberList(cx) - if err != nil { - cx.t.Fatalf("getMemberList error (%v)", err) - } - - cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "--hex", "member", "list") - - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - cx.t.Fatalf("memberListWithHexTest error (%v)", err) - } - var txt string - txt, err = proc.Expect("members") - if err != nil { - cx.t.Fatalf("memberListWithHexTest error (%v)", err) - } - if err = proc.Close(); err != nil { - cx.t.Fatalf("memberListWithHexTest error (%v)", err) - } - hexResp := etcdserverpb.MemberListResponse{} - dec := json.NewDecoder(strings.NewReader(txt)) - if err := dec.Decode(&hexResp); err == io.EOF { - cx.t.Fatalf("memberListWithHexTest error (%v)", err) - } - num := len(resp.Members) - hexNum := len(hexResp.Members) - if num != hexNum { - cx.t.Fatalf("member number,expected %d,got %d", num, hexNum) - } - if num == 0 { - cx.t.Fatal("member number is 0") - } - - if resp.Header.RaftTerm != hexResp.Header.RaftTerm { - cx.t.Fatalf("Unexpected raft_term, expected %d, got %d", resp.Header.RaftTerm, hexResp.Header.RaftTerm) - } - - for i := 0; i < num; i++ { - if resp.Members[i].Name != hexResp.Members[i].Name { - cx.t.Fatalf("Unexpected member name,expected %v, got %v", resp.Members[i].Name, hexResp.Members[i].Name) - } - if !reflect.DeepEqual(resp.Members[i].PeerURLs, hexResp.Members[i].PeerURLs) { - cx.t.Fatalf("Unexpected member peerURLs, expected %v, got %v", resp.Members[i].PeerURLs, hexResp.Members[i].PeerURLs) - } - if !reflect.DeepEqual(resp.Members[i].ClientURLs, hexResp.Members[i].ClientURLs) { - cx.t.Fatalf("Unexpected member clientURLs, expected %v, got %v", resp.Members[i].ClientURLs, hexResp.Members[i].ClientURLs) - } - } -} - -func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error { - cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID) - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID)) -} - -func memberAddTest(cx ctlCtx) { - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) - if err := ctlV3MemberAdd(cx, peerURL, false); err != nil { - cx.t.Fatal(err) - } -} - -func memberAddAsLearnerTest(cx ctlCtx) { - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) - if err := ctlV3MemberAdd(cx, peerURL, true); err != nil { - cx.t.Fatal(err) - } -} - -func ctlV3MemberAdd(cx ctlCtx, peerURL string, isLearner bool) error { - cmdArgs := append(cx.PrefixArgs(), "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)) - asLearner := " " - if isLearner { - cmdArgs = append(cmdArgs, "--learner") - asLearner = " as learner " - } - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf(" added%sto cluster ", asLearner)) -} - -func memberUpdateTest(cx ctlCtx) { - mr, err := getMemberList(cx) - if err != nil { - cx.t.Fatal(err) - } - - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+11) - memberID := fmt.Sprintf("%x", mr.Members[0].ID) - if err = ctlV3MemberUpdate(cx, memberID, peerURL); err != nil { - cx.t.Fatal(err) - } -} - -func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error { - cmdArgs := append(cx.PrefixArgs(), "member", "update", memberID, fmt.Sprintf("--peer-urls=%s", peerURL)) - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ") -} diff --git a/tests/e2e/ctl_v3_move_leader_test.go b/tests/e2e/ctl_v3_move_leader_test.go deleted file mode 100644 index b22a832cbb5..00000000000 --- a/tests/e2e/ctl_v3_move_leader_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "crypto/tls" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3MoveLeaderScenarios(t *testing.T) { - securityParent := map[string]struct { - cfg e2e.EtcdProcessClusterConfig - }{ - "Secure": {cfg: *e2e.NewConfigTLS()}, - "Insecure": {cfg: *e2e.NewConfigNoTLS()}, - } - - tests := map[string]struct { - env map[string]string - }{ - "happy path": {env: map[string]string{}}, - "with env": {env: map[string]string{"ETCDCTL_ENDPOINTS": "something-else-is-set"}}, - } - - for testName, tc := range securityParent { - for subTestName, tx := range tests { - t.Run(testName+" "+subTestName, func(t *testing.T) { - testCtlV3MoveLeader(t, tc.cfg, tx.env) - }) - } - } -} - -func testCtlV3MoveLeader(t *testing.T, cfg e2e.EtcdProcessClusterConfig, envVars map[string]string) { - epc := setupEtcdctlTest(t, &cfg, true) - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - var tcfg *tls.Config - if cfg.Client.ConnectionType == e2e.ClientTLS { - tinfo := transport.TLSInfo{ - CertFile: e2e.CertPath, - KeyFile: e2e.PrivateKeyPath, - TrustedCAFile: e2e.CaPath, - } - var err error - tcfg, err = tinfo.ClientConfig() - if err != nil { - t.Fatal(err) - } - } - - var leadIdx int - var leaderID uint64 - var transferee uint64 - for i, ep := range epc.EndpointsV3() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{ep}, - DialTimeout: 3 * time.Second, - TLS: tcfg, - }) - if err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - resp, err := cli.Status(ctx, ep) - if err != nil { - t.Fatalf("failed to get status from endpoint %s: %v", ep, err) - } - cancel() - cli.Close() - - if resp.Header.GetMemberId() == resp.Leader { - leadIdx = i - leaderID = resp.Leader - } else { - transferee = resp.Header.GetMemberId() - } - } - - cx := ctlCtx{ - t: t, - cfg: *e2e.NewConfigNoTLS(), - dialTimeout: 7 * time.Second, - epc: epc, - envMap: envVars, - } - - tests := []struct { - eps []string - expect string - expectErr bool - }{ - { // request to non-leader - []string{cx.epc.EndpointsV3()[(leadIdx+1)%3]}, - "no leader endpoint given at ", - true, - }, - { // request to leader - []string{cx.epc.EndpointsV3()[leadIdx]}, - fmt.Sprintf("Leadership transferred from %s to %s", types.ID(leaderID), types.ID(transferee)), - false, - }, - { // request to all endpoints - cx.epc.EndpointsV3(), - "Leadership transferred", - false, - }, - } - for i, tc := range tests { - prefix := cx.prefixArgs(tc.eps) - cmdArgs := append(prefix, "move-leader", types.ID(transferee).String()) - err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect) - if tc.expectErr { - require.ErrorContains(t, err, tc.expect) - } else { - require.Nilf(t, err, "#%d: %v", i, err) - } - } -} - -func setupEtcdctlTest(t *testing.T, cfg *e2e.EtcdProcessClusterConfig, quorum bool) *e2e.EtcdProcessCluster { - if !quorum { - cfg = e2e.ConfigStandalone(*cfg) - } - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - return epc -} diff --git a/tests/e2e/ctl_v3_role_test.go b/tests/e2e/ctl_v3_role_test.go deleted file mode 100644 index 7bdd7459804..00000000000 --- a/tests/e2e/ctl_v3_role_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -// TestCtlV3RoleAddTimeout tests add role with 0 grpc dial timeout while it tolerates dial timeout error. -// This is unique in e2e test -func TestCtlV3RoleAddTimeout(t *testing.T) { testCtl(t, roleAddTest, withDialTimeout(0)) } - -func roleAddTest(cx ctlCtx) { - cmdSet := []struct { - args []string - expectedStr string - }{ - // Add a role. - { - args: []string{"add", "root"}, - expectedStr: "Role root created", - }, - // Try adding the same role. - { - args: []string{"add", "root"}, - expectedStr: "role name already exists", - }, - } - - for i, cmd := range cmdSet { - if err := ctlV3Role(cx, cmd.args, cmd.expectedStr); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Fatalf("roleAddTest #%d: ctlV3Role error (%v)", i, err) - } - } - } -} - -func ctlV3Role(cx ctlCtx, args []string, expStr string) error { - cmdArgs := append(cx.PrefixArgs(), "role") - cmdArgs = append(cmdArgs, args...) - - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr) -} - -func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) error { - cmdArgs := append(cx.PrefixArgs(), "role", "grant-permission") - if perm.prefix { - cmdArgs = append(cmdArgs, "--prefix") - } else if len(perm.rangeEnd) == 1 && perm.rangeEnd[0] == '\x00' { - cmdArgs = append(cmdArgs, "--from-key") - } - - cmdArgs = append(cmdArgs, rolename) - cmdArgs = append(cmdArgs, grantingPermToArgs(perm)...) - - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return err - } - defer proc.Close() - - expStr := fmt.Sprintf("Role %s updated", rolename) - _, err = proc.Expect(expStr) - return err -} - -func ctlV3RoleRevokePermission(cx ctlCtx, rolename string, key, rangeEnd string, fromKey bool) error { - cmdArgs := append(cx.PrefixArgs(), "role", "revoke-permission") - cmdArgs = append(cmdArgs, rolename) - cmdArgs = append(cmdArgs, key) - var expStr string - if len(rangeEnd) != 0 { - cmdArgs = append(cmdArgs, rangeEnd) - expStr = fmt.Sprintf("Permission of range [%s, %s) is revoked from role %s", key, rangeEnd, rolename) - } else if fromKey { - cmdArgs = append(cmdArgs, "--from-key") - expStr = fmt.Sprintf("Permission of range [%s, is revoked from role %s", key, rolename) - } else { - expStr = fmt.Sprintf("Permission of key %s is revoked from role %s", key, rolename) - } - - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return err - } - defer proc.Close() - _, err = proc.Expect(expStr) - return err -} - -type grantingPerm struct { - read bool - write bool - key string - rangeEnd string - prefix bool -} - -func grantingPermToArgs(perm grantingPerm) []string { - permstr := "" - - if perm.read { - permstr += "read" - } - - if perm.write { - permstr += "write" - } - - if len(permstr) == 0 { - panic("invalid granting permission") - } - - if len(perm.rangeEnd) == 0 { - return []string{permstr, perm.key} - } - - if len(perm.rangeEnd) == 1 && perm.rangeEnd[0] == '\x00' { - return []string{permstr, perm.key} - } - - return []string{permstr, perm.key, perm.rangeEnd} -} diff --git a/tests/e2e/ctl_v3_snapshot_test.go b/tests/e2e/ctl_v3_snapshot_test.go deleted file mode 100644 index df0c2c33419..00000000000 --- a/tests/e2e/ctl_v3_snapshot_test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/etcdutl/v3/snapshot" - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Snapshot(t *testing.T) { testCtl(t, snapshotTest) } - -func snapshotTest(cx ctlCtx) { - maintenanceInitKeys(cx) - - leaseID, err := ctlV3LeaseGrant(cx, 100) - if err != nil { - cx.t.Fatalf("snapshot: ctlV3LeaseGrant error (%v)", err) - } - if err = ctlV3Put(cx, "withlease", "withlease", leaseID); err != nil { - cx.t.Fatalf("snapshot: ctlV3Put error (%v)", err) - } - - fpath := filepath.Join(cx.t.TempDir(), "snapshot") - defer os.RemoveAll(fpath) - - if err = ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) - } - - st, err := getSnapshotStatus(cx, fpath) - if err != nil { - cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err) - } - if st.Revision != 5 { - cx.t.Fatalf("expected 4, got %d", st.Revision) - } - if st.TotalKey < 4 { - cx.t.Fatalf("expected at least 4, got %d", st.TotalKey) - } -} - -func TestCtlV3SnapshotCorrupt(t *testing.T) { testCtl(t, snapshotCorruptTest) } - -func snapshotCorruptTest(cx ctlCtx) { - fpath := filepath.Join(cx.t.TempDir(), "snapshot") - defer os.RemoveAll(fpath) - - if err := ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) - } - - // corrupt file - f, oerr := os.OpenFile(fpath, os.O_WRONLY, 0) - if oerr != nil { - cx.t.Fatal(oerr) - } - if _, err := f.Write(make([]byte, 512)); err != nil { - cx.t.Fatal(err) - } - f.Close() - - datadir := cx.t.TempDir() - - serr := e2e.SpawnWithExpectWithEnv( - append(cx.PrefixArgsUtl(), "snapshot", "restore", - "--data-dir", datadir, - fpath), - cx.envMap, - "expected sha256") - require.ErrorContains(cx.t, serr, "Error: expected sha256") -} - -// TestCtlV3SnapshotStatusBeforeRestore ensures that the snapshot -// status does not modify the snapshot file -func TestCtlV3SnapshotStatusBeforeRestore(t *testing.T) { - testCtl(t, snapshotStatusBeforeRestoreTest) -} - -func snapshotStatusBeforeRestoreTest(cx ctlCtx) { - fpath := filepath.Join(cx.t.TempDir(), "snapshot") - defer os.RemoveAll(fpath) - - if err := ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) - } - - // snapshot status on the fresh snapshot file - _, err := getSnapshotStatus(cx, fpath) - if err != nil { - cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err) - } - - dataDir := cx.t.TempDir() - defer os.RemoveAll(dataDir) - serr := e2e.SpawnWithExpectWithEnv( - append(cx.PrefixArgsUtl(), "snapshot", "restore", - "--data-dir", dataDir, - fpath), - cx.envMap, - "added member") - if serr != nil { - cx.t.Fatal(serr) - } -} - -func ctlV3SnapshotSave(cx ctlCtx, fpath string) error { - cmdArgs := append(cx.PrefixArgs(), "snapshot", "save", fpath) - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath)) -} - -func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) { - cmdArgs := append(cx.PrefixArgsUtl(), "--write-out", "json", "snapshot", "status", fpath) - - proc, err := e2e.SpawnCmd(cmdArgs, nil) - if err != nil { - return snapshot.Status{}, err - } - var txt string - txt, err = proc.Expect("totalKey") - if err != nil { - return snapshot.Status{}, err - } - if err = proc.Close(); err != nil { - return snapshot.Status{}, err - } - - resp := snapshot.Status{} - dec := json.NewDecoder(strings.NewReader(txt)) - if err := dec.Decode(&resp); err == io.EOF { - return snapshot.Status{}, err - } - return resp, nil -} - -func TestIssue6361(t *testing.T) { testIssue6361(t) } - -// TestIssue6361 ensures new member that starts with snapshot correctly -// syncs up with other members and serve correct data. -func testIssue6361(t *testing.T) { - { - // This tests is pretty flaky on semaphoreci as of 2021-01-10. - // TODO: Remove when the flakiness source is identified. - oldenv := os.Getenv("EXPECT_DEBUG") - defer os.Setenv("EXPECT_DEBUG", oldenv) - os.Setenv("EXPECT_DEBUG", "1") - } - - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithKeepDataDir(true), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - dialTimeout := 10 * time.Second - prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} - - t.Log("Writing some keys...") - kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}} - for i := range kvs { - if err = e2e.SpawnWithExpect(append(prefixArgs, "put", kvs[i].key, kvs[i].val), "OK"); err != nil { - t.Fatal(err) - } - } - - fpath := filepath.Join(t.TempDir(), "test.snapshot") - - t.Log("etcdctl saving snapshot...") - if err = e2e.SpawnWithExpects(append(prefixArgs, "snapshot", "save", fpath), - nil, - fmt.Sprintf("Snapshot saved at %s", fpath), - ); err != nil { - t.Fatal(err) - } - - t.Log("Stopping the original server...") - if err = epc.Procs[0].Stop(); err != nil { - t.Fatal(err) - } - - newDataDir := filepath.Join(t.TempDir(), "test.data") - t.Log("etcdctl restoring the snapshot...") - err = e2e.SpawnWithExpect([]string{e2e.BinPath.Etcdutl, "snapshot", "restore", fpath, "--name", epc.Procs[0].Config().Name, "--initial-cluster", epc.Procs[0].Config().InitialCluster, "--initial-cluster-token", epc.Procs[0].Config().InitialToken, "--initial-advertise-peer-urls", epc.Procs[0].Config().PeerURL.String(), "--data-dir", newDataDir}, "added member") - if err != nil { - t.Fatal(err) - } - - t.Log("(Re)starting the etcd member using the restored snapshot...") - epc.Procs[0].Config().DataDirPath = newDataDir - for i := range epc.Procs[0].Config().Args { - if epc.Procs[0].Config().Args[i] == "--data-dir" { - epc.Procs[0].Config().Args[i+1] = newDataDir - } - } - if err = epc.Procs[0].Restart(context.TODO()); err != nil { - t.Fatal(err) - } - - t.Log("Ensuring the restored member has the correct data...") - for i := range kvs { - if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { - t.Fatal(err) - } - } - - t.Log("Adding new member into the cluster") - clientURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+30) - peerURL := fmt.Sprintf("http://localhost:%d", e2e.EtcdProcessBasePort+31) - err = e2e.SpawnWithExpect(append(prefixArgs, "member", "add", "newmember", fmt.Sprintf("--peer-urls=%s", peerURL)), " added to cluster ") - if err != nil { - t.Fatal(err) - } - - newDataDir2 := t.TempDir() - defer os.RemoveAll(newDataDir2) - - name2 := "infra2" - initialCluster2 := epc.Procs[0].Config().InitialCluster + fmt.Sprintf(",%s=%s", name2, peerURL) - - t.Log("Starting the new member") - // start the new member - var nepc *expect.ExpectProcess - nepc, err = e2e.SpawnCmd([]string{epc.Procs[0].Config().ExecPath, "--name", name2, - "--listen-client-urls", clientURL, "--advertise-client-urls", clientURL, - "--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL, - "--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2}, nil) - if err != nil { - t.Fatal(err) - } - if _, err = nepc.Expect("ready to serve client requests"); err != nil { - t.Fatal(err) - } - - prefixArgs = []string{e2e.BinPath.Etcdctl, "--endpoints", clientURL, "--dial-timeout", dialTimeout.String()} - - t.Log("Ensuring added member has data from incoming snapshot...") - for i := range kvs { - if err = e2e.SpawnWithExpect(append(prefixArgs, "get", kvs[i].key), kvs[i].val); err != nil { - t.Fatal(err) - } - } - - t.Log("Stopping the second member") - if err = nepc.Stop(); err != nil { - t.Fatal(err) - } - t.Log("Test logic done") -} - -// TestCtlV3SnapshotVersion is for storageVersion to be stored, all fields -// expected 3.6 fields need to be set. This happens after first WAL snapshot. -// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered. -func TestCtlV3SnapshotVersion(t *testing.T) { - testCtl(t, snapshotVersionTest, withCfg(*e2e.NewConfig(e2e.WithSnapshotCount(1)))) -} - -func snapshotVersionTest(cx ctlCtx) { - maintenanceInitKeys(cx) - - fpath := filepath.Join(cx.t.TempDir(), "snapshot") - defer os.RemoveAll(fpath) - - if err := ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotVersionTest ctlV3SnapshotSave error (%v)", err) - } - - st, err := getSnapshotStatus(cx, fpath) - if err != nil { - cx.t.Fatalf("snapshotVersionTest getSnapshotStatus error (%v)", err) - } - if st.Version != "3.6.0" { - cx.t.Fatalf("expected %q, got %q", "3.6.0", st.Version) - } -} diff --git a/tests/e2e/ctl_v3_test.go b/tests/e2e/ctl_v3_test.go deleted file mode 100644 index 53a27309834..00000000000 --- a/tests/e2e/ctl_v3_test.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/pkg/v3/flags" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Version(t *testing.T) { testCtl(t, versionTest) } - -func TestClusterVersion(t *testing.T) { - e2e.BeforeTest(t) - - tests := []struct { - name string - rollingStart bool - }{ - { - name: "When start servers at the same time", - rollingStart: false, - }, - { - name: "When start servers one by one", - rollingStart: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e2e.BeforeTest(t) - cfg := e2e.NewConfig( - e2e.WithSnapshotCount(3), - e2e.WithBaseScheme("unix"), // to avoid port conflict) - e2e.WithRollingStart(tt.rollingStart), - ) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - ctx := ctlCtx{ - t: t, - cfg: *cfg, - epc: epc, - } - cv := version.Cluster(version.Version) - clusterVersionTest(ctx, `"etcdcluster":"`+cv) - }) - } -} - -func versionTest(cx ctlCtx) { - if err := ctlV3Version(cx); err != nil { - cx.t.Fatalf("versionTest ctlV3Version error (%v)", err) - } -} - -func clusterVersionTest(cx ctlCtx, expected string) { - var err error - for i := 0; i < 35; i++ { - if err = e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: "/version", Expected: expected}); err != nil { - cx.t.Logf("#%d: v3 is not ready yet (%v)", i, err) - time.Sleep(200 * time.Millisecond) - continue - } - break - } - if err != nil { - cx.t.Fatalf("failed cluster version test expected %v got (%v)", expected, err) - } -} - -func ctlV3Version(cx ctlCtx) error { - cmdArgs := append(cx.PrefixArgs(), "version") - return e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version) -} - -// TestCtlV3DialWithHTTPScheme ensures that client handles Endpoints with HTTPS scheme. -func TestCtlV3DialWithHTTPScheme(t *testing.T) { - testCtl(t, dialWithSchemeTest, withCfg(*e2e.NewConfigClientTLS())) -} - -func dialWithSchemeTest(cx ctlCtx) { - cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar") - if err := e2e.SpawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil { - cx.t.Fatal(err) - } -} - -type ctlCtx struct { - t *testing.T - apiPrefix string - cfg e2e.EtcdProcessClusterConfig - - corruptFunc func(string) error - disableStrictReconfigCheck bool - - epc *e2e.EtcdProcessCluster - - envMap map[string]string - - dialTimeout time.Duration - testTimeout time.Duration - - quorum bool // if true, set up 3-node cluster and linearizable read - interactive bool - - user string - pass string - - initialCorruptCheck bool - - // dir that was used during the test - dataDir string -} - -type ctlOption func(*ctlCtx) - -func (cx *ctlCtx) applyOpts(opts []ctlOption) { - for _, opt := range opts { - opt(cx) - } - - cx.initialCorruptCheck = true -} - -func withCfg(cfg e2e.EtcdProcessClusterConfig) ctlOption { - return func(cx *ctlCtx) { cx.cfg = cfg } -} - -func withDialTimeout(timeout time.Duration) ctlOption { - return func(cx *ctlCtx) { cx.dialTimeout = timeout } -} - -func withTestTimeout(timeout time.Duration) ctlOption { - return func(cx *ctlCtx) { cx.testTimeout = timeout } -} - -func withQuorum() ctlOption { - return func(cx *ctlCtx) { cx.quorum = true } -} - -func withInteractive() ctlOption { - return func(cx *ctlCtx) { cx.interactive = true } -} - -func withInitialCorruptCheck() ctlOption { - return func(cx *ctlCtx) { cx.initialCorruptCheck = true } -} - -func withCorruptFunc(f func(string) error) ctlOption { - return func(cx *ctlCtx) { cx.corruptFunc = f } -} - -func withDisableStrictReconfig() ctlOption { - return func(cx *ctlCtx) { cx.disableStrictReconfigCheck = true } -} - -func withApiPrefix(p string) ctlOption { - return func(cx *ctlCtx) { cx.apiPrefix = p } -} - -func withFlagByEnv() ctlOption { - return func(cx *ctlCtx) { cx.envMap = make(map[string]string) } -} - -// This function must be called after the `withCfg`, otherwise its value -// may be overwritten by `withCfg`. -func withMaxConcurrentStreams(streams uint32) ctlOption { - return func(cx *ctlCtx) { - cx.cfg.MaxConcurrentStreams = streams - } -} - -func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) { - testCtlWithOffline(t, testFunc, nil, opts...) -} - -func getDefaultCtlCtx(t *testing.T) ctlCtx { - return ctlCtx{ - t: t, - cfg: *e2e.NewConfigAutoTLS(), - dialTimeout: 7 * time.Second, - } -} - -func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), opts ...ctlOption) { - e2e.BeforeTest(t) - - ret := getDefaultCtlCtx(t) - ret.applyOpts(opts) - - if !ret.quorum { - ret.cfg = *e2e.ConfigStandalone(ret.cfg) - } - ret.cfg.StrictReconfigCheck = !ret.disableStrictReconfigCheck - if ret.initialCorruptCheck { - ret.cfg.InitialCorruptCheck = ret.initialCorruptCheck - } - if testOfflineFunc != nil { - ret.cfg.KeepDataDir = true - } - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&ret.cfg)) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - ret.epc = epc - ret.dataDir = epc.Procs[0].Config().DataDirPath - - runCtlTest(t, testFunc, testOfflineFunc, ret) -} - -func runCtlTest(t *testing.T, testFunc func(ctlCtx), testOfflineFunc func(ctlCtx), cx ctlCtx) { - defer func() { - if cx.envMap != nil { - for k := range cx.envMap { - os.Unsetenv(k) - } - cx.envMap = make(map[string]string) - } - if cx.epc != nil { - cx.epc.Stop() - cx.epc.Close() - } - }() - - donec := make(chan struct{}) - go func() { - defer close(donec) - testFunc(cx) - t.Log("---testFunc logic DONE") - }() - - timeout := cx.getTestTimeout() - - select { - case <-time.After(timeout): - testutil.FatalStack(t, fmt.Sprintf("test timed out after %v", timeout)) - case <-donec: - } - - t.Log("closing test cluster...") - assert.NoError(t, cx.epc.Stop()) - assert.NoError(t, cx.epc.Close()) - cx.epc = nil - t.Log("closed test cluster...") - - if testOfflineFunc != nil { - testOfflineFunc(cx) - } -} - -func (cx *ctlCtx) getTestTimeout() time.Duration { - timeout := cx.testTimeout - if timeout == 0 { - timeout = 2*cx.dialTimeout + time.Second - if cx.dialTimeout == 0 { - timeout = 30 * time.Second - } - } - return timeout -} - -func (cx *ctlCtx) prefixArgs(eps []string) []string { - fmap := make(map[string]string) - fmap["endpoints"] = strings.Join(eps, ",") - fmap["dial-timeout"] = cx.dialTimeout.String() - if cx.epc.Cfg.Client.ConnectionType == e2e.ClientTLS { - if cx.epc.Cfg.Client.AutoTLS { - fmap["insecure-transport"] = "false" - fmap["insecure-skip-tls-verify"] = "true" - } else if cx.epc.Cfg.Client.RevokeCerts { - fmap["cacert"] = e2e.CaPath - fmap["cert"] = e2e.RevokedCertPath - fmap["key"] = e2e.RevokedPrivateKeyPath - } else { - fmap["cacert"] = e2e.CaPath - fmap["cert"] = e2e.CertPath - fmap["key"] = e2e.PrivateKeyPath - } - } - if cx.user != "" { - fmap["user"] = cx.user + ":" + cx.pass - } - - useEnv := cx.envMap != nil - - cmdArgs := []string{e2e.BinPath.Etcdctl} - for k, v := range fmap { - if useEnv { - ek := flags.FlagToEnv("ETCDCTL", k) - cx.envMap[ek] = v - } else { - cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v)) - } - } - return cmdArgs -} - -// PrefixArgs prefixes etcdctl command. -// Make sure to unset environment variables after tests. -func (cx *ctlCtx) PrefixArgs() []string { - return cx.prefixArgs(cx.epc.EndpointsV3()) -} - -// PrefixArgsUtl returns prefix of the command that is etcdutl -// Please not thet 'utl' compatible commands does not consume --endpoints flag. -func (cx *ctlCtx) PrefixArgsUtl() []string { - return []string{e2e.BinPath.Etcdutl} -} - -func isGRPCTimedout(err error) bool { - return strings.Contains(err.Error(), "grpc: timed out trying to connect") -} - -func (cx *ctlCtx) memberToRemove() (ep string, memberID string, clusterID string) { - n1 := cx.cfg.ClusterSize - if n1 < 2 { - cx.t.Fatalf("%d-node is too small to test 'member remove'", n1) - } - - resp, err := getMemberList(*cx) - if err != nil { - cx.t.Fatal(err) - } - if n1 != len(resp.Members) { - cx.t.Fatalf("expected %d, got %d", n1, len(resp.Members)) - } - - ep = resp.Members[0].ClientURLs[0] - clusterID = fmt.Sprintf("%x", resp.Header.ClusterId) - memberID = fmt.Sprintf("%x", resp.Members[1].ID) - - return ep, memberID, clusterID -} diff --git a/tests/e2e/ctl_v3_txn_test.go b/tests/e2e/ctl_v3_txn_test.go deleted file mode 100644 index bc05cb05575..00000000000 --- a/tests/e2e/ctl_v3_txn_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -type txnRequests struct { - compare []string - ifSuccess []string - ifFail []string - results []string -} - -func ctlV3Txn(cx ctlCtx, rqs txnRequests, expectedExitErr bool) error { - // TODO: support non-interactive mode - cmdArgs := append(cx.PrefixArgs(), "txn") - if cx.interactive { - cmdArgs = append(cmdArgs, "--interactive") - } - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - if err != nil { - return err - } - _, err = proc.Expect("compares:") - if err != nil { - return err - } - for _, req := range rqs.compare { - if err = proc.Send(req + "\r"); err != nil { - return err - } - } - if err = proc.Send("\r"); err != nil { - return err - } - - _, err = proc.Expect("success requests (get, put, del):") - if err != nil { - return err - } - for _, req := range rqs.ifSuccess { - if err = proc.Send(req + "\r"); err != nil { - return err - } - } - if err = proc.Send("\r"); err != nil { - return err - } - - _, err = proc.Expect("failure requests (get, put, del):") - if err != nil { - return err - } - for _, req := range rqs.ifFail { - if err = proc.Send(req + "\r"); err != nil { - return err - } - } - if err = proc.Send("\r"); err != nil { - return err - } - - for _, line := range rqs.results { - _, err = proc.Expect(line) - if err != nil { - return err - } - } - - err = proc.Close() - if expectedExitErr { - return nil - } - - return err -} diff --git a/tests/e2e/ctl_v3_watch_cov_test.go b/tests/e2e/ctl_v3_watch_cov_test.go deleted file mode 100644 index 0e17b95669e..00000000000 --- a/tests/e2e/ctl_v3_watch_cov_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build cov - -package e2e - -import ( - "os" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) } -func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) } -func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) } -func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) } -func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) } - -func TestCtlV3WatchInteractive(t *testing.T) { - testCtl(t, watchTest, withInteractive()) -} -func TestCtlV3WatchInteractiveNoTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS())) -} -func TestCtlV3WatchInteractiveClientTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS())) -} -func TestCtlV3WatchInteractivePeerTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS())) -} - -func watchTest(cx ctlCtx) { - tests := []struct { - puts []kv - envKey string - envRange string - args []string - - wkv []kvExec - }{ - { // watch 1 key with env - puts: []kv{{"sample", "value"}}, - envKey: "sample", - args: []string{"--rev", "1"}, - wkv: []kvExec{{key: "sample", val: "value"}}, - }, - // coverage tests get extra arguments: - // ./bin/etcdctl_test -test.coverprofile=e2e.1525392462795198897.coverprofile -test.outputdir=../.. - // do not test watch exec commands - { // watch 3 keys by prefix, with env - puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}, - envKey: "key", - args: []string{"--rev", "1", "--prefix"}, - wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}, - }, - { // watch 3 keys by range, with env - puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}}, - envKey: "key", - envRange: "key3", - args: []string{"--rev", "1"}, - wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}}, - }, - } - - for i, tt := range tests { - donec := make(chan struct{}) - go func(i int, puts []kv) { - for j := range puts { - if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil { - cx.t.Fatalf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err) - } - } - close(donec) - }(i, tt.puts) - - unsetEnv := func() {} - if tt.envKey != "" || tt.envRange != "" { - if tt.envKey != "" { - os.Setenv("ETCDCTL_WATCH_KEY", tt.envKey) - unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_KEY") } - } - if tt.envRange != "" { - os.Setenv("ETCDCTL_WATCH_RANGE_END", tt.envRange) - unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_RANGE_END") } - } - if tt.envKey != "" && tt.envRange != "" { - unsetEnv = func() { - os.Unsetenv("ETCDCTL_WATCH_KEY") - os.Unsetenv("ETCDCTL_WATCH_RANGE_END") - } - } - } - if err := ctlV3Watch(cx, tt.args, tt.wkv...); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err) - } - } - unsetEnv() - <-donec - } -} diff --git a/tests/e2e/ctl_v3_watch_no_cov_test.go b/tests/e2e/ctl_v3_watch_no_cov_test.go deleted file mode 100644 index 300a83cfa63..00000000000 --- a/tests/e2e/ctl_v3_watch_no_cov_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cov - -package e2e - -import ( - "os" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestCtlV3Watch(t *testing.T) { testCtl(t, watchTest) } -func TestCtlV3WatchNoTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigNoTLS())) } -func TestCtlV3WatchClientTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigClientTLS())) } -func TestCtlV3WatchPeerTLS(t *testing.T) { testCtl(t, watchTest, withCfg(*e2e.NewConfigPeerTLS())) } -func TestCtlV3WatchTimeout(t *testing.T) { testCtl(t, watchTest, withDialTimeout(0)) } - -func TestCtlV3WatchInteractive(t *testing.T) { - testCtl(t, watchTest, withInteractive()) -} -func TestCtlV3WatchInteractiveNoTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigNoTLS())) -} -func TestCtlV3WatchInteractiveClientTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigClientTLS())) -} -func TestCtlV3WatchInteractivePeerTLS(t *testing.T) { - testCtl(t, watchTest, withInteractive(), withCfg(*e2e.NewConfigPeerTLS())) -} - -func watchTest(cx ctlCtx) { - tests := []struct { - puts []kv - envKey string - envRange string - args []string - - wkv []kvExec - }{ - { // watch 1 key with env - puts: []kv{{"sample", "value"}}, - envKey: "sample", - args: []string{"--rev", "1"}, - wkv: []kvExec{{key: "sample", val: "value"}}, - }, - { // watch 1 key with ${ETCD_WATCH_VALUE} - puts: []kv{{"sample", "value"}}, - args: []string{"sample", "--rev", "1", "--", "env"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: `ETCD_WATCH_VALUE="value"`}}, - }, - { // watch 1 key with "echo watch event received", with env - puts: []kv{{"sample", "value"}}, - envKey: "sample", - args: []string{"--rev", "1", "--", "echo", "watch event received"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}}, - }, - { // watch 1 key with "echo watch event received" - puts: []kv{{"sample", "value"}}, - args: []string{"--rev", "1", "sample", "--", "echo", "watch event received"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}}, - }, - { // watch 1 key with "echo \"Hello World!\"" - puts: []kv{{"sample", "value"}}, - args: []string{"--rev", "1", "sample", "--", "echo", "\"Hello World!\""}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "Hello World!"}}, - }, - { // watch 1 key with "echo watch event received" - puts: []kv{{"sample", "value"}}, - args: []string{"sample", "samplx", "--rev", "1", "--", "echo", "watch event received"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}}, - }, - { // watch 1 key with "echo watch event received" - puts: []kv{{"sample", "value"}}, - envKey: "sample", - envRange: "samplx", - args: []string{"--rev", "1", "--", "echo", "watch event received"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}}, - }, - { // watch 1 key with "echo watch event received" - puts: []kv{{"sample", "value"}}, - args: []string{"sample", "--rev", "1", "samplx", "--", "echo", "watch event received"}, - wkv: []kvExec{{key: "sample", val: "value", execOutput: "watch event received"}}, - }, - { // watch 3 keys by prefix, with env - puts: []kv{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}}, - envKey: "key", - args: []string{"--rev", "1", "--prefix"}, - wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}, {key: "key3", val: "val3"}}, - }, - { // watch 3 keys by range, with env - puts: []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}}, - envKey: "key", - envRange: "key3", - args: []string{"--rev", "1"}, - wkv: []kvExec{{key: "key1", val: "val1"}, {key: "key2", val: "val2"}}, - }, - } - - for i, tt := range tests { - donec := make(chan struct{}) - go func(i int, puts []kv) { - for j := range puts { - if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil { - cx.t.Errorf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err) - } - } - close(donec) - }(i, tt.puts) - - unsetEnv := func() {} - if tt.envKey != "" || tt.envRange != "" { - if tt.envKey != "" { - os.Setenv("ETCDCTL_WATCH_KEY", tt.envKey) - unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_KEY") } - } - if tt.envRange != "" { - os.Setenv("ETCDCTL_WATCH_RANGE_END", tt.envRange) - unsetEnv = func() { os.Unsetenv("ETCDCTL_WATCH_RANGE_END") } - } - if tt.envKey != "" && tt.envRange != "" { - unsetEnv = func() { - os.Unsetenv("ETCDCTL_WATCH_KEY") - os.Unsetenv("ETCDCTL_WATCH_RANGE_END") - } - } - } - if err := ctlV3Watch(cx, tt.args, tt.wkv...); err != nil { - if cx.dialTimeout > 0 && !isGRPCTimedout(err) { - cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err) - } - } - unsetEnv() - <-donec - } -} diff --git a/tests/e2e/ctl_v3_watch_test.go b/tests/e2e/ctl_v3_watch_test.go deleted file mode 100644 index bec43224e4c..00000000000 --- a/tests/e2e/ctl_v3_watch_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "strings" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -type kvExec struct { - key, val string - execOutput string -} - -func setupWatchArgs(cx ctlCtx, args []string) []string { - cmdArgs := append(cx.PrefixArgs(), "watch") - if cx.interactive { - cmdArgs = append(cmdArgs, "--interactive") - } else { - cmdArgs = append(cmdArgs, args...) - } - - return cmdArgs -} - -func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error { - cmdArgs := setupWatchArgs(cx, args) - - proc, err := e2e.SpawnCmd(cmdArgs, nil) - if err != nil { - return err - } - - if cx.interactive { - wl := strings.Join(append([]string{"watch"}, args...), " ") + "\r" - if err = proc.Send(wl); err != nil { - return err - } - } - - for _, elem := range kvs { - if _, err = proc.Expect(elem.key); err != nil { - return err - } - if _, err = proc.Expect(elem.val); err != nil { - return err - } - if elem.execOutput != "" { - if _, err = proc.Expect(elem.execOutput); err != nil { - return err - } - } - } - return proc.Stop() -} - -func ctlV3WatchFailPerm(cx ctlCtx, args []string) error { - cmdArgs := setupWatchArgs(cx, args) - - proc, err := e2e.SpawnCmd(cmdArgs, nil) - if err != nil { - return err - } - - if cx.interactive { - wl := strings.Join(append([]string{"watch"}, args...), " ") + "\r" - if err = proc.Send(wl); err != nil { - return err - } - } - - // TODO(mitake): after printing accurate error message that includes - // "permission denied", the above string argument of proc.Expect() - // should be updated. - _, err = proc.Expect("watch is canceled by the server") - if err != nil { - return err - } - return proc.Close() -} diff --git a/tests/e2e/discovery_test.go b/tests/e2e/discovery_test.go deleted file mode 100644 index 7735282d2e4..00000000000 --- a/tests/e2e/discovery_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "net/http" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/v2" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1, false) } -func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3, false) } -func TestTLSClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3, true) } - -func testClusterUsingDiscovery(t *testing.T, size int, peerTLS bool) { - e2e.BeforeTest(t) - - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - - dc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithBasePort(2000), - e2e.WithVersion(e2e.LastVersion), - e2e.WithClusterSize(1), - e2e.WithEnableV2(true), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer dc.Close() - - dcc := MustNewHTTPClient(t, dc.EndpointsV2(), nil) - dkapi := client.NewKeysAPI(dcc) - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil { - t.Fatal(err) - } - cancel() - - c, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithBasePort(3000), - e2e.WithClusterSize(size), - e2e.WithIsPeerTLS(peerTLS), - e2e.WithDiscovery(dc.EndpointsV2()[0]+"/v2/keys"), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer c.Close() - - kubectl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(c.EndpointsV3(), ",")} - if err := e2e.SpawnWithExpect(append(kubectl, "put", "key", "value"), "OK"); err != nil { - t.Fatal(err) - } - if err := e2e.SpawnWithExpect(append(kubectl, "get", "key"), "value"); err != nil { - t.Fatal(err) - } -} - -func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client { - cfgtls := transport.TLSInfo{} - if tls != nil { - cfgtls = *tls - } - cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps} - c, err := client.New(cfg) - if err != nil { - t.Fatal(err) - } - return c -} - -func mustNewTransport(t testutil.TB, tlsInfo transport.TLSInfo) *http.Transport { - // tick in integration test is short, so 1s dial timeout could play well. - tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) - if err != nil { - t.Fatal(err) - } - return tr -} diff --git a/tests/e2e/discovery_v3_test.go b/tests/e2e/discovery_v3_test.go deleted file mode 100644 index 26eb809dcf6..00000000000 --- a/tests/e2e/discovery_v3_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "strconv" - "strings" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestClusterOf1UsingV3Discovery_1endpoint(t *testing.T) { - testClusterUsingV3Discovery(t, 1, 1, e2e.ClientNonTLS, false) -} -func TestClusterOf3UsingV3Discovery_1endpoint(t *testing.T) { - testClusterUsingV3Discovery(t, 1, 3, e2e.ClientTLS, true) -} -func TestTLSClusterOf5UsingV3Discovery_1endpoint(t *testing.T) { - testClusterUsingV3Discovery(t, 1, 5, e2e.ClientTLS, false) -} - -func TestClusterOf1UsingV3Discovery_3endpoints(t *testing.T) { - testClusterUsingV3Discovery(t, 3, 1, e2e.ClientNonTLS, false) -} -func TestClusterOf3UsingV3Discovery_3endpoints(t *testing.T) { - testClusterUsingV3Discovery(t, 3, 3, e2e.ClientTLS, true) -} -func TestTLSClusterOf5UsingV3Discovery_3endpoints(t *testing.T) { - testClusterUsingV3Discovery(t, 3, 5, e2e.ClientTLS, false) -} - -func testClusterUsingV3Discovery(t *testing.T, discoveryClusterSize, targetClusterSize int, clientTlsType e2e.ClientConnType, isClientAutoTls bool) { - e2e.BeforeTest(t) - - // step 1: start the discovery service - ds, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithBasePort(2000), - e2e.WithClusterSize(discoveryClusterSize), - e2e.WithClientConnType(clientTlsType), - e2e.WithClientAutoTLS(isClientAutoTls), - ) - if err != nil { - t.Fatalf("could not start discovery etcd cluster (%v)", err) - } - defer ds.Close() - - // step 2: configure the cluster size - discoveryToken := "8A591FAB-1D72-41FA-BDF2-A27162FDA1E0" - configSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", discoveryToken) - configSizeValStr := strconv.Itoa(targetClusterSize) - if err := ctlV3Put(ctlCtx{epc: ds}, configSizeKey, configSizeValStr, ""); err != nil { - t.Errorf("failed to configure cluster size to discovery serivce, error: %v", err) - } - - // step 3: start the etcd cluster - epc, err := bootstrapEtcdClusterUsingV3Discovery(t, ds.EndpointsV3(), discoveryToken, targetClusterSize, clientTlsType, isClientAutoTls) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer epc.Close() - - // step 4: sanity test on the etcd cluster - etcdctl := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ",")} - if err := e2e.SpawnWithExpect(append(etcdctl, "put", "key", "value"), "OK"); err != nil { - t.Fatal(err) - } - if err := e2e.SpawnWithExpect(append(etcdctl, "get", "key"), "value"); err != nil { - t.Fatal(err) - } -} - -func bootstrapEtcdClusterUsingV3Discovery(t *testing.T, discoveryEndpoints []string, discoveryToken string, clusterSize int, clientTlsType e2e.ClientConnType, isClientAutoTls bool) (*e2e.EtcdProcessCluster, error) { - // cluster configuration - cfg := e2e.NewConfig( - e2e.WithBasePort(3000), - e2e.WithClusterSize(clusterSize), - e2e.WithIsPeerTLS(true), - e2e.WithIsPeerAutoTLS(true), - e2e.WithDiscoveryToken(discoveryToken), - e2e.WithDiscoveryEndpoints(discoveryEndpoints), - ) - - // initialize the cluster - epc, err := e2e.InitEtcdProcessCluster(t, cfg) - if err != nil { - t.Fatalf("could not initialize etcd cluster (%v)", err) - return epc, err - } - - // populate discovery related security configuration - for _, ep := range epc.Procs { - epCfg := ep.Config() - - if clientTlsType == e2e.ClientTLS { - if isClientAutoTls { - epCfg.Args = append(epCfg.Args, "--discovery-insecure-transport=false") - epCfg.Args = append(epCfg.Args, "--discovery-insecure-skip-tls-verify=true") - } else { - epCfg.Args = append(epCfg.Args, "--discovery-cacert="+e2e.CaPath) - epCfg.Args = append(epCfg.Args, "--discovery-cert="+e2e.CertPath) - epCfg.Args = append(epCfg.Args, "--discovery-key="+e2e.PrivateKeyPath) - } - } - } - - // start the cluster - return e2e.StartEtcdProcessCluster(context.TODO(), epc, cfg) -} diff --git a/tests/e2e/doc.go b/tests/e2e/doc.go deleted file mode 100644 index a6887cfce19..00000000000 --- a/tests/e2e/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package e2e implements tests built upon etcd binaries, and focus on -end-to-end testing. - -Features/goals of the end-to-end tests: -1. test command-line parsing and arguments. -2. test user-facing command-line API. -3. launch full processes and check for expected outputs. -*/ -package e2e diff --git a/tests/e2e/etcd_config_test.go b/tests/e2e/etcd_config_test.go deleted file mode 100644 index 0f130a5f58a..00000000000 --- a/tests/e2e/etcd_config_test.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "golang.org/x/sync/errgroup" - - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -const exampleConfigFile = "../../etcd.conf.yml.sample" - -func TestEtcdExampleConfig(t *testing.T) { - e2e.SkipInShortMode(t) - - proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--config-file", exampleConfigFile}, nil) - if err != nil { - t.Fatal(err) - } - if err = e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines); err != nil { - t.Fatal(err) - } - if err = proc.Stop(); err != nil { - t.Fatal(err) - } -} - -func TestEtcdMultiPeer(t *testing.T) { - e2e.SkipInShortMode(t) - - peers, tmpdirs := make([]string, 3), make([]string, 3) - for i := range peers { - peers[i] = fmt.Sprintf("e%d=http://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) - tmpdirs[i] = t.TempDir() - } - ic := strings.Join(peers, ",") - - procs := make([]*expect.ExpectProcess, len(peers)) - defer func() { - for i := range procs { - if procs[i] != nil { - procs[i].Stop() - procs[i].Close() - } - } - }() - for i := range procs { - args := []string{ - e2e.BinPath.Etcd, - "--name", fmt.Sprintf("e%d", i), - "--listen-client-urls", "http://0.0.0.0:0", - "--data-dir", tmpdirs[i], - "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("http://127.0.0.1:%d,http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), - "--initial-cluster", ic, - } - p, err := e2e.SpawnCmd(args, nil) - if err != nil { - t.Fatal(err) - } - procs[i] = p - } - - for _, p := range procs { - if err := e2e.WaitReadyExpectProc(context.TODO(), p, e2e.EtcdServerReadyLines); err != nil { - t.Fatal(err) - } - } -} - -// TestEtcdUnixPeers checks that etcd will boot with unix socket peers. -func TestEtcdUnixPeers(t *testing.T) { - e2e.SkipInShortMode(t) - - d := t.TempDir() - proc, err := e2e.SpawnCmd( - []string{ - e2e.BinPath.Etcd, - "--data-dir", d, - "--name", "e1", - "--listen-peer-urls", "unix://etcd.unix:1", - "--initial-advertise-peer-urls", "unix://etcd.unix:1", - "--initial-cluster", "e1=unix://etcd.unix:1", - }, nil, - ) - defer os.Remove("etcd.unix:1") - if err != nil { - t.Fatal(err) - } - if err = e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines); err != nil { - t.Fatal(err) - } - if err = proc.Stop(); err != nil { - t.Fatal(err) - } -} - -// TestEtcdPeerCNAuth checks that the inter peer auth based on CN of cert is working correctly. -func TestEtcdPeerCNAuth(t *testing.T) { - e2e.SkipInShortMode(t) - - peers, tmpdirs := make([]string, 3), make([]string, 3) - for i := range peers { - peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) - tmpdirs[i] = t.TempDir() - } - ic := strings.Join(peers, ",") - - procs := make([]*expect.ExpectProcess, len(peers)) - defer func() { - for i := range procs { - if procs[i] != nil { - procs[i].Stop() - procs[i].Close() - } - } - }() - - // node 0 and 1 have a cert with the correct CN, node 2 doesn't - for i := range procs { - commonArgs := []string{ - e2e.BinPath.Etcd, - "--name", fmt.Sprintf("e%d", i), - "--listen-client-urls", "http://0.0.0.0:0", - "--data-dir", tmpdirs[i], - "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), - "--initial-cluster", ic, - } - - var args []string - if i <= 1 { - args = []string{ - "--peer-cert-file", e2e.CertPath, - "--peer-key-file", e2e.PrivateKeyPath, - "--peer-client-cert-file", e2e.CertPath, - "--peer-client-key-file", e2e.PrivateKeyPath, - "--peer-trusted-ca-file", e2e.CaPath, - "--peer-client-cert-auth", - "--peer-cert-allowed-cn", "example.com", - } - } else { - args = []string{ - "--peer-cert-file", e2e.CertPath2, - "--peer-key-file", e2e.PrivateKeyPath2, - "--peer-client-cert-file", e2e.CertPath2, - "--peer-client-key-file", e2e.PrivateKeyPath2, - "--peer-trusted-ca-file", e2e.CaPath, - "--peer-client-cert-auth", - "--peer-cert-allowed-cn", "example2.com", - } - } - - commonArgs = append(commonArgs, args...) - - p, err := e2e.SpawnCmd(commonArgs, nil) - if err != nil { - t.Fatal(err) - } - procs[i] = p - } - - for i, p := range procs { - var expect []string - if i <= 1 { - expect = e2e.EtcdServerReadyLines - } else { - expect = []string{"remote error: tls: bad certificate"} - } - if err := e2e.WaitReadyExpectProc(context.TODO(), p, expect); err != nil { - t.Fatal(err) - } - } -} - -// TestEtcdPeerNameAuth checks that the inter peer auth based on cert name validation is working correctly. -func TestEtcdPeerNameAuth(t *testing.T) { - e2e.SkipInShortMode(t) - - peers, tmpdirs := make([]string, 3), make([]string, 3) - for i := range peers { - peers[i] = fmt.Sprintf("e%d=https://127.0.0.1:%d", i, e2e.EtcdProcessBasePort+i) - tmpdirs[i] = t.TempDir() - } - ic := strings.Join(peers, ",") - - procs := make([]*expect.ExpectProcess, len(peers)) - defer func() { - for i := range procs { - if procs[i] != nil { - procs[i].Stop() - procs[i].Close() - } - os.RemoveAll(tmpdirs[i]) - } - }() - - // node 0 and 1 have a cert with the correct certificate name, node 2 doesn't - for i := range procs { - commonArgs := []string{ - e2e.BinPath.Etcd, - "--name", fmt.Sprintf("e%d", i), - "--listen-client-urls", "http://0.0.0.0:0", - "--data-dir", tmpdirs[i], - "--advertise-client-urls", "http://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d,https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i, e2e.EtcdProcessBasePort+len(peers)+i), - "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort+i), - "--initial-cluster", ic, - } - - var args []string - if i <= 1 { - args = []string{ - "--peer-cert-file", e2e.CertPath, - "--peer-key-file", e2e.PrivateKeyPath, - "--peer-trusted-ca-file", e2e.CaPath, - "--peer-client-cert-auth", - "--peer-cert-allowed-hostname", "localhost", - } - } else { - args = []string{ - "--peer-cert-file", e2e.CertPath2, - "--peer-key-file", e2e.PrivateKeyPath2, - "--peer-trusted-ca-file", e2e.CaPath, - "--peer-client-cert-auth", - "--peer-cert-allowed-hostname", "example2.com", - } - } - - commonArgs = append(commonArgs, args...) - - p, err := e2e.SpawnCmd(commonArgs, nil) - if err != nil { - t.Fatal(err) - } - procs[i] = p - } - - for i, p := range procs { - var expect []string - if i <= 1 { - expect = e2e.EtcdServerReadyLines - } else { - expect = []string{"client certificate authentication failed"} - } - if err := e2e.WaitReadyExpectProc(context.TODO(), p, expect); err != nil { - t.Fatal(err) - } - } -} - -func TestGrpcproxyAndCommonName(t *testing.T) { - e2e.SkipInShortMode(t) - - argsWithNonEmptyCN := []string{ - e2e.BinPath.Etcd, - "grpc-proxy", - "start", - "--cert", e2e.CertPath2, - "--key", e2e.PrivateKeyPath2, - "--cacert", e2e.CaPath, - } - - argsWithEmptyCN := []string{ - e2e.BinPath.Etcd, - "grpc-proxy", - "start", - "--cert", e2e.CertPath3, - "--key", e2e.PrivateKeyPath3, - "--cacert", e2e.CaPath, - } - - err := e2e.SpawnWithExpect(argsWithNonEmptyCN, "cert has non empty Common Name") - require.ErrorContains(t, err, "cert has non empty Common Name") - - p, err := e2e.SpawnCmd(argsWithEmptyCN, nil) - defer func() { - if p != nil { - p.Stop() - } - }() - - if err != nil { - t.Fatal(err) - } -} - -func TestGrpcproxyAndListenCipherSuite(t *testing.T) { - e2e.SkipInShortMode(t) - - cases := []struct { - name string - args []string - }{ - { - name: "ArgsWithCipherSuites", - args: []string{ - e2e.BinPath.Etcd, - "grpc-proxy", - "start", - "--listen-cipher-suites", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - }, - }, - { - name: "ArgsWithoutCipherSuites", - args: []string{ - e2e.BinPath.Etcd, - "grpc-proxy", - "start", - "--listen-cipher-suites", "", - }, - }, - } - - for _, test := range cases { - t.Run(test.name, func(t *testing.T) { - pw, err := e2e.SpawnCmd(test.args, nil) - if err != nil { - t.Fatal(err) - } - if err = pw.Stop(); err != nil { - t.Fatal(err) - } - }) - } -} - -func TestBootstrapDefragFlag(t *testing.T) { - e2e.SkipInShortMode(t) - - proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil) - if err != nil { - t.Fatal(err) - } - if err = e2e.WaitReadyExpectProc(context.TODO(), proc, []string{"Skipping defragmentation"}); err != nil { - t.Fatal(err) - } - if err = proc.Stop(); err != nil { - t.Fatal(err) - } - - // wait for the process to exit, otherwise test will have leaked goroutine - if err := proc.Close(); err != nil { - t.Logf("etcd process closed with error %v", err) - } -} - -func TestSnapshotCatchupEntriesFlag(t *testing.T) { - e2e.SkipInShortMode(t) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--experimental-snapshot-catchup-entries", "1000"}, nil) - require.NoError(t, err) - require.NoError(t, e2e.WaitReadyExpectProc(ctx, proc, []string{"\"snapshot-catchup-entries\":1000"})) - require.NoError(t, e2e.WaitReadyExpectProc(ctx, proc, []string{"serving client traffic"})) - require.NoError(t, proc.Stop()) - - // wait for the process to exit, otherwise test will have leaked goroutine - if err := proc.Close(); err != nil { - t.Logf("etcd process closed with error %v", err) - } -} - -// TestEtcdHealthyWithTinySnapshotCatchupEntries ensures multi-node etcd cluster remains healthy with 1 snapshot catch up entry -func TestEtcdHealthyWithTinySnapshotCatchupEntries(t *testing.T) { - e2e.BeforeTest(t) - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(3), - e2e.WithSnapshotCount(1), - e2e.WithSnapshotCatchUpEntries(1), - ) - require.NoErrorf(t, err, "could not start etcd process cluster (%v)", err) - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - - // simulate 10 clients keep writing to etcd in parallel with no error - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - g, ctx := errgroup.WithContext(ctx) - for i := 0; i < 10; i++ { - clientId := i - g.Go(func() error { - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - if err != nil { - return err - } - for j := 0; j < 100; j++ { - if err = cc.Put(ctx, "foo", fmt.Sprintf("bar%d", clientId), config.PutOptions{}); err != nil { - return err - } - } - return nil - }) - } - require.NoError(t, g.Wait()) -} - -func TestEtcdTLSVersion(t *testing.T) { - e2e.SkipInShortMode(t) - - d := t.TempDir() - proc, err := e2e.SpawnCmd( - []string{ - e2e.BinPath.Etcd, - "--data-dir", d, - "--name", "e1", - "--listen-client-urls", "https://0.0.0.0:0", - "--advertise-client-urls", "https://0.0.0.0:0", - "--listen-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort), - "--initial-advertise-peer-urls", fmt.Sprintf("https://127.0.0.1:%d", e2e.EtcdProcessBasePort), - "--initial-cluster", fmt.Sprintf("e1=https://127.0.0.1:%d", e2e.EtcdProcessBasePort), - "--peer-cert-file", e2e.CertPath, - "--peer-key-file", e2e.PrivateKeyPath, - "--cert-file", e2e.CertPath2, - "--key-file", e2e.PrivateKeyPath2, - - "--tls-min-version", "TLS1.2", - "--tls-max-version", "TLS1.3", - }, nil, - ) - assert.NoError(t, err) - assert.NoError(t, e2e.WaitReadyExpectProc(context.TODO(), proc, e2e.EtcdServerReadyLines), "did not receive expected output from etcd process") - assert.NoError(t, proc.Stop()) - -} diff --git a/tests/e2e/etcd_grpcproxy_test.go b/tests/e2e/etcd_grpcproxy_test.go deleted file mode 100644 index db9ad7b4016..00000000000 --- a/tests/e2e/etcd_grpcproxy_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -func TestGrpcProxyAutoSync(t *testing.T) { - e2e.SkipInShortMode(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - epc, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithClusterSize(1)) - require.NoError(t, err) - defer func() { - assert.NoError(t, epc.Close()) - }() - - var ( - node1ClientURL = epc.Procs[0].Config().ClientURL - proxyClientURL = "127.0.0.1:32379" - ) - - // Run independent grpc-proxy instance - proxyProc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "grpc-proxy", "start", - "--advertise-client-url", proxyClientURL, "--listen-addr", proxyClientURL, - "--endpoints", node1ClientURL, - "--endpoints-auto-sync-interval", "1s", - }, nil) - require.NoError(t, err) - defer func() { - assert.NoError(t, proxyProc.Stop()) - }() - - proxyCtl, err := e2e.NewEtcdctl(e2e.ClientConfig{}, []string{proxyClientURL}) - require.NoError(t, err) - err = proxyCtl.Put(ctx, "k1", "v1", config.PutOptions{}) - require.NoError(t, err) - - // Add and start second member - err = epc.StartNewProc(ctx, nil, t) - require.NoError(t, err) - - // Wait for auto sync of endpoints - err = waitForEndpointInLog(ctx, proxyProc, epc.Procs[1].Config().ClientURL) - require.NoError(t, err) - - err = epc.CloseProc(ctx, func(proc e2e.EtcdProcess) bool { - return proc.Config().ClientURL == node1ClientURL - }) - require.NoError(t, err) - - var resp *clientv3.GetResponse - for i := 0; i < 10; i++ { - resp, err = proxyCtl.Get(ctx, "k1", config.GetOptions{}) - if err != nil && strings.Contains(err.Error(), rpctypes.ErrGRPCLeaderChanged.Error()) { - time.Sleep(500 * time.Millisecond) - continue - } - } - require.NoError(t, err) - - kvs := testutils.KeyValuesFromGetResponse(resp) - assert.Equal(t, []testutils.KV{{Key: "k1", Val: "v1"}}, kvs) -} - -func waitForEndpointInLog(ctx context.Context, proxyProc *expect.ExpectProcess, endpoint string) error { - endpoint = strings.Replace(endpoint, "http://", "", 1) - - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - _, err := proxyProc.ExpectFunc(ctx, func(s string) bool { - if strings.Contains(s, endpoint) { - return true - } - return false - }) - - return err -} diff --git a/tests/e2e/etcd_mix_versions_test.go b/tests/e2e/etcd_mix_versions_test.go deleted file mode 100644 index ae11db6b1c2..00000000000 --- a/tests/e2e/etcd_mix_versions_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -// TestMixVersionsSendSnapshot tests the mix version send snapshots -// TODO(ahrtr): add network partition scenario to trigger snapshots. -func TestMixVersionsSendSnapshot(t *testing.T) { - cases := []struct { - name string - clusterVersion e2e.ClusterVersion - newInstanceVersion e2e.ClusterVersion - }{ - // etcd doesn't support adding a new member of old version into - // a cluster with higher version. For example, etcd cluster - // version is 3.6.x, then a new member of 3.5.x can't join the - // cluster. Please refer to link below, - // https://github.com/etcd-io/etcd/blob/3e903d0b12e399519a4013c52d4635ec8bdd6863/server/etcdserver/cluster_util.go#L222-L230 - /*{ - name: "etcd instance with last version receives snapshot from the leader with current version", - clusterVersion: e2e.CurrentVersion, - newInstaceVersion: e2e.LastVersion, - },*/ - { - name: "etcd instance with current version receives snapshot from the leader with last version", - clusterVersion: e2e.LastVersion, - newInstanceVersion: e2e.CurrentVersion, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - mixVersionsSnapshotTest(t, tc.clusterVersion, tc.newInstanceVersion) - }) - } -} - -func mixVersionsSnapshotTest(t *testing.T, clusterVersion, newInstanceVersion e2e.ClusterVersion) { - e2e.BeforeTest(t) - - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - - // Create an etcd cluster with 1 member - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithSnapshotCount(10), - e2e.WithVersion(clusterVersion), - ) - if err != nil { - t.Fatalf("failed to start etcd cluster: %v", err) - } - defer func() { - if err := epc.Close(); err != nil { - t.Fatalf("failed to close etcd cluster: %v", err) - } - }() - - // Write more than SnapshotCount entries to trigger at least a snapshot. - t.Log("Writing 20 keys to the cluster") - for i := 0; i < 20; i++ { - key := fmt.Sprintf("key-%d", i) - value := fmt.Sprintf("value-%d", i) - if err := epc.Client().Put(context.TODO(), key, value, config.PutOptions{}); err != nil { - t.Fatalf("failed to put %q, error: %v", key, err) - } - } - - // start a new etcd instance, which will receive a snapshot from the leader. - newCfg := *epc.Cfg - newCfg.Version = newInstanceVersion - t.Log("Starting a new etcd instance") - if err := epc.StartNewProc(context.TODO(), &newCfg, t); err != nil { - t.Fatalf("failed to start the new etcd instance: %v", err) - } - defer epc.CloseProc(context.TODO(), nil) - - // verify all nodes have exact same revision and hash - t.Log("Verify all nodes have exact same revision and hash") - assert.Eventually(t, func() bool { - hashKvs, err := epc.Client().HashKV(context.TODO(), 0) - if err != nil { - t.Logf("failed to get HashKV: %v", err) - return false - } - if len(hashKvs) != 2 { - t.Logf("expected 2 hashkv responses, but got: %d", len(hashKvs)) - return false - } - - if hashKvs[0].Header.Revision != hashKvs[1].Header.Revision { - t.Logf("Got different revisions, [%d, %d]", hashKvs[0].Header.Revision, hashKvs[1].Header.Revision) - return false - } - - assert.Equal(t, hashKvs[0].Hash, hashKvs[1].Hash) - - return true - }, 10*time.Second, 500*time.Millisecond) -} diff --git a/tests/e2e/etcd_release_upgrade_test.go b/tests/e2e/etcd_release_upgrade_test.go deleted file mode 100644 index 3b91665e4f2..00000000000 --- a/tests/e2e/etcd_release_upgrade_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -// TestReleaseUpgrade ensures that changes to master branch does not affect -// upgrade from latest etcd releases. -func TestReleaseUpgrade(t *testing.T) { - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithVersion(e2e.LastVersion), - e2e.WithSnapshotCount(3), - e2e.WithBaseScheme("unix"), // to avoid port conflict - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - cx := ctlCtx{ - t: t, - cfg: *e2e.NewConfigNoTLS(), - dialTimeout: 7 * time.Second, - quorum: true, - epc: epc, - } - var kvs []kv - for i := 0; i < 5; i++ { - kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"}) - } - for i := range kvs { - if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil { - cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err) - } - } - - t.Log("Cluster of etcd in old version running") - - for i := range epc.Procs { - t.Logf("Stopping node: %v", i) - if err := epc.Procs[i].Stop(); err != nil { - t.Fatalf("#%d: error closing etcd process (%v)", i, err) - } - t.Logf("Stopped node: %v", i) - epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd - epc.Procs[i].Config().KeepDataDir = true - - t.Logf("Restarting node in the new version: %v", i) - if err := epc.Procs[i].Restart(context.TODO()); err != nil { - t.Fatalf("error restarting etcd process (%v)", err) - } - - t.Logf("Testing reads after node restarts: %v", i) - for j := range kvs { - if err := ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil { - cx.t.Fatalf("#%d-%d: ctlV3Get error (%v)", i, j, err) - } - } - t.Logf("Tested reads after node restarts: %v", i) - } - - t.Log("Waiting for full upgrade...") - // TODO: update after release candidate - // expect upgraded cluster version - // new cluster version needs more time to upgrade - ver := version.Cluster(version.Version) - for i := 0; i < 7; i++ { - if err = e2e.CURLGet(epc, e2e.CURLReq{Endpoint: "/version", Expected: `"etcdcluster":"` + ver}); err != nil { - t.Logf("#%d: %v is not ready yet (%v)", i, ver, err) - time.Sleep(time.Second) - continue - } - break - } - if err != nil { - t.Fatalf("cluster version is not upgraded (%v)", err) - } - t.Log("TestReleaseUpgrade businessLogic DONE") -} - -func TestReleaseUpgradeWithRestart(t *testing.T) { - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithVersion(e2e.LastVersion), - e2e.WithSnapshotCount(10), - e2e.WithBaseScheme("unix"), - ) - - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - cx := ctlCtx{ - t: t, - cfg: *e2e.NewConfigNoTLS(), - dialTimeout: 7 * time.Second, - quorum: true, - epc: epc, - } - var kvs []kv - for i := 0; i < 50; i++ { - kvs = append(kvs, kv{key: fmt.Sprintf("foo%d", i), val: "bar"}) - } - for i := range kvs { - if err := ctlV3Put(cx, kvs[i].key, kvs[i].val, ""); err != nil { - cx.t.Fatalf("#%d: ctlV3Put error (%v)", i, err) - } - } - - for i := range epc.Procs { - if err := epc.Procs[i].Stop(); err != nil { - t.Fatalf("#%d: error closing etcd process (%v)", i, err) - } - } - - var wg sync.WaitGroup - wg.Add(len(epc.Procs)) - for i := range epc.Procs { - go func(i int) { - epc.Procs[i].Config().ExecPath = e2e.BinPath.Etcd - epc.Procs[i].Config().KeepDataDir = true - if err := epc.Procs[i].Restart(context.TODO()); err != nil { - t.Errorf("error restarting etcd process (%v)", err) - } - wg.Done() - }(i) - } - wg.Wait() - - if err := ctlV3Get(cx, []string{kvs[0].key}, []kv{kvs[0]}...); err != nil { - t.Fatal(err) - } -} diff --git a/tests/e2e/gateway_test.go b/tests/e2e/gateway_test.go deleted file mode 100644 index d6d1bc759ff..00000000000 --- a/tests/e2e/gateway_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "strings" - "testing" - - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -var ( - defaultGatewayEndpoint = "127.0.0.1:23790" -) - -func TestGateway(t *testing.T) { - ec, err := e2e.NewEtcdProcessCluster(context.TODO(), t) - if err != nil { - t.Fatal(err) - } - defer ec.Stop() - - eps := strings.Join(ec.EndpointsV3(), ",") - - p := startGateway(t, eps) - defer func() { - p.Stop() - p.Close() - }() - - err = e2e.SpawnWithExpect([]string{e2e.BinPath.Etcdctl, "--endpoints=" + defaultGatewayEndpoint, "put", "foo", "bar"}, "OK\r\n") - if err != nil { - t.Errorf("failed to finish put request through gateway: %v", err) - } -} - -func startGateway(t *testing.T, endpoints string) *expect.ExpectProcess { - p, err := expect.NewExpect(e2e.BinPath.Etcd, "gateway", "--endpoints="+endpoints, "start") - if err != nil { - t.Fatal(err) - } - _, err = p.Expect("ready to proxy client requests") - if err != nil { - t.Fatal(err) - } - return p -} diff --git a/tests/e2e/main_test.go b/tests/e2e/main_test.go deleted file mode 100644 index 58d7efb95da..00000000000 --- a/tests/e2e/main_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package e2e - -import ( - "os" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestMain(m *testing.M) { - e2e.InitFlags() - v := m.Run() - if v == 0 && testutil.CheckLeakedGoroutine() { - os.Exit(1) - } - os.Exit(v) -} diff --git a/tests/e2e/metrics_test.go b/tests/e2e/metrics_test.go deleted file mode 100644 index 68bef7b2d3f..00000000000 --- a/tests/e2e/metrics_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "testing" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestV3MetricsSecure(t *testing.T) { - cfg := e2e.NewConfigTLS() - cfg.ClusterSize = 1 - cfg.MetricsURLScheme = "https" - testCtl(t, metricsTest) -} - -func TestV3MetricsInsecure(t *testing.T) { - cfg := e2e.NewConfigTLS() - cfg.ClusterSize = 1 - cfg.MetricsURLScheme = "http" - testCtl(t, metricsTest) -} - -func metricsTest(cx ctlCtx) { - if err := ctlV3Put(cx, "k", "v", ""); err != nil { - cx.t.Fatal(err) - } - - i := 0 - for _, test := range []struct { - endpoint, expected string - }{ - {"/metrics", "etcd_mvcc_put_total 2"}, - {"/metrics", "etcd_debugging_mvcc_keys_total 1"}, - {"/metrics", "etcd_mvcc_delete_total 3"}, - {"/metrics", fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version)}, - {"/metrics", fmt.Sprintf(`etcd_cluster_version{cluster_version="%s"} 1`, version.Cluster(version.Version))}, - {"/metrics", `grpc_server_handled_total{grpc_code="Canceled",grpc_method="Watch",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"} 6`}, - {"/health", `{"health":"true","reason":""}`}, - } { - i++ - if err := ctlV3Put(cx, fmt.Sprintf("%d", i), "v", ""); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Del(cx, []string{fmt.Sprintf("%d", i)}, 1); err != nil { - cx.t.Fatal(err) - } - if err := ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...); err != nil { - cx.t.Fatal(err) - } - if err := e2e.CURLGet(cx.epc, e2e.CURLReq{Endpoint: test.endpoint, Expected: test.expected, MetricsURLScheme: cx.cfg.MetricsURLScheme}); err != nil { - cx.t.Fatalf("failed get with curl (%v)", err) - } - } -} diff --git a/tests/e2e/no_quorum_ready_test.go b/tests/e2e/no_quorum_ready_test.go deleted file mode 100644 index ff1b32abf35..00000000000 --- a/tests/e2e/no_quorum_ready_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "testing" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestInitDaemonNotifyWithoutQuorum(t *testing.T) { - // Initialize a cluster with 3 members - epc, err := e2e.InitEtcdProcessCluster(t, e2e.NewConfigAutoTLS()) - if err != nil { - t.Fatalf("Failed to initilize the etcd cluster: %v", err) - } - - defer epc.Close() - - // Remove two members, so that only one etcd will get started - epc.Procs = epc.Procs[:1] - - // Start the etcd cluster with only one member - if err := epc.Start(context.TODO()); err != nil { - t.Fatalf("Failed to start the etcd cluster: %v", err) - } - - // Expect log message indicating time out waiting for quorum hit - e2e.AssertProcessLogs(t, epc.Procs[0], "startEtcd: timed out waiting for the ready notification") - // Expect log message indicating systemd notify message has been sent - e2e.AssertProcessLogs(t, epc.Procs[0], "notifying init daemon") -} diff --git a/tests/e2e/promote_experimental_flag_test.go b/tests/e2e/promote_experimental_flag_test.go deleted file mode 100644 index 00a2e55d748..00000000000 --- a/tests/e2e/promote_experimental_flag_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestWarningApplyDuration(t *testing.T) { - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithWarningUnaryRequestDuration(time.Microsecond), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - require.NoError(t, err) - err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{}) - assert.NoError(t, err, "error on put") - - // verify warning - e2e.AssertProcessLogs(t, epc.Procs[0], "request stats") -} - -// TestExperimentalWarningApplyDuration tests the experimental warning apply duration -// TODO: this test is a duplicate of TestWarningApplyDuration except it uses --experimental-warning-unary-request-duration -// Remove this test after --experimental-warning-unary-request-duration flag is removed. -func TestExperimentalWarningApplyDuration(t *testing.T) { - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithExperimentalWarningUnaryRequestDuration(time.Microsecond), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - t.Cleanup(func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }) - - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - require.NoError(t, err) - err = cc.Put(context.TODO(), "foo", "bar", config.PutOptions{}) - assert.NoError(t, err, "error on put") - - // verify warning - e2e.AssertProcessLogs(t, epc.Procs[0], "request stats") -} - -func TestBothWarningApplyDurationFlagsFail(t *testing.T) { - e2e.BeforeTest(t) - - _, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithWarningUnaryRequestDuration(time.Second), - e2e.WithExperimentalWarningUnaryRequestDuration(time.Second), - ) - if err == nil { - t.Fatal("Expected process to fail") - } -} diff --git a/tests/e2e/utl_migrate_test.go b/tests/e2e/utl_migrate_test.go deleted file mode 100644 index 60f6f46a331..00000000000 --- a/tests/e2e/utl_migrate_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestEtctlutlMigrate(t *testing.T) { - lastReleaseBinary := e2e.BinPath.EtcdLastRelease - - tcs := []struct { - name string - targetVersion string - clusterVersion e2e.ClusterVersion - force bool - - expectLogsSubString string - expectStorageVersion *semver.Version - }{ - { - name: "Invalid target version string", - targetVersion: "abc", - expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "abc"`, - expectStorageVersion: &version.V3_6, - }, - { - name: "Invalid target version", - targetVersion: "3.a", - expectLogsSubString: `Error: failed to parse target version: strconv.ParseInt: parsing "a": invalid syntax`, - expectStorageVersion: &version.V3_6, - }, - { - name: "Target with only major version is invalid", - targetVersion: "3", - expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "3"`, - expectStorageVersion: &version.V3_6, - }, - { - name: "Target with patch version is invalid", - targetVersion: "3.6.0", - expectLogsSubString: `Error: wrong target version format, expected "X.Y", got "3.6.0"`, - expectStorageVersion: &version.V3_6, - }, - { - name: "Migrate v3.5 to v3.5 is no-op", - clusterVersion: e2e.LastVersion, - targetVersion: "3.5", - expectLogsSubString: "storage version up-to-date\t" + `{"storage-version": "3.5"}`, - }, - { - name: "Upgrade v3.5 to v3.6 should work", - clusterVersion: e2e.LastVersion, - targetVersion: "3.6", - expectStorageVersion: &version.V3_6, - }, - { - name: "Migrate v3.6 to v3.6 is no-op", - targetVersion: "3.6", - expectLogsSubString: "storage version up-to-date\t" + `{"storage-version": "3.6"}`, - expectStorageVersion: &version.V3_6, - }, - { - name: "Downgrade v3.6 to v3.5 should fail until it's implemented", - targetVersion: "3.5", - expectLogsSubString: "cannot downgrade storage, WAL contains newer entries", - expectStorageVersion: &version.V3_6, - }, - { - name: "Downgrade v3.6 to v3.5 with force should work", - targetVersion: "3.5", - force: true, - expectLogsSubString: "forcefully cleared storage version", - }, - { - name: "Upgrade v3.6 to v3.7 with force should work", - targetVersion: "3.7", - force: true, - expectLogsSubString: "forcefully set storage version\t" + `{"storage-version": "3.7"}`, - expectStorageVersion: &semver.Version{Major: 3, Minor: 7}, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - e2e.BeforeTest(t) - lg := zaptest.NewLogger(t) - if tc.clusterVersion != e2e.CurrentVersion && !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", lastReleaseBinary) - } - dataDirPath := t.TempDir() - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithVersion(tc.clusterVersion), - e2e.WithDataDirPath(dataDirPath), - e2e.WithClusterSize(1), - e2e.WithKeepDataDir(true), - // Set low SnapshotCount to ensure wal snapshot is done - e2e.WithSnapshotCount(1), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - defer func() { - if errC := epc.Close(); errC != nil { - t.Fatalf("error closing etcd processes (%v)", errC) - } - }() - - dialTimeout := 10 * time.Second - prefixArgs := []string{e2e.BinPath.Etcdctl, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} - - t.Log("Write keys to ensure wal snapshot is created and all v3.5 fields are set...") - for i := 0; i < 10; i++ { - if err = e2e.SpawnWithExpect(append(prefixArgs, "put", fmt.Sprintf("%d", i), "value"), "OK"); err != nil { - t.Fatal(err) - } - } - - t.Log("Stopping the server...") - if err = epc.Procs[0].Stop(); err != nil { - t.Fatal(err) - } - - t.Log("etcdutl migrate...") - memberDataDir := epc.Procs[0].Config().DataDirPath - args := []string{e2e.BinPath.Etcdutl, "migrate", "--data-dir", memberDataDir, "--target-version", tc.targetVersion} - if tc.force { - args = append(args, "--force") - } - err = e2e.SpawnWithExpect(args, tc.expectLogsSubString) - if err != nil { - if tc.expectLogsSubString != "" { - require.ErrorContains(t, err, tc.expectLogsSubString) - } else { - t.Fatal(err) - } - } - - t.Log("etcdutl migrate...") - be := backend.NewDefaultBackend(lg, filepath.Join(memberDataDir, "member/snap/db")) - defer be.Close() - - ver := schema.ReadStorageVersion(be.ReadTx()) - assert.Equal(t, tc.expectStorageVersion, ver) - }) - } -} diff --git a/tests/e2e/v2store_deprecation_test.go b/tests/e2e/v2store_deprecation_test.go deleted file mode 100644 index 500e46149bc..00000000000 --- a/tests/e2e/v2store_deprecation_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "bytes" - "context" - "fmt" - "sort" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func createV2store(t testing.TB, dataDirPath string) string { - t.Log("Creating not-yet v2-deprecated etcd") - - cfg := e2e.ConfigStandalone(*e2e.NewConfig( - e2e.WithVersion(e2e.LastVersion), - e2e.WithEnableV2(true), - e2e.WithDataDirPath(dataDirPath), - e2e.WithSnapshotCount(5), - )) - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - assert.NoError(t, err) - memberDataDir := epc.Procs[0].Config().DataDirPath - - defer func() { - assert.NoError(t, epc.Stop()) - }() - - // We need to exceed 'SnapshotCount' such that v2 snapshot is dumped. - for i := 0; i < 10; i++ { - if err := e2e.CURLPut(epc, e2e.CURLReq{ - Endpoint: "/v2/keys/foo", Value: "bar" + fmt.Sprint(i), - Expected: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)}); err != nil { - t.Fatalf("failed put with curl (%v)", err) - } - } - return memberDataDir -} - -func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath string) { - t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode") - proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil) - assert.NoError(t, err) - - _, err = proc.Expect("detected disallowed custom content in v2store for stage --v2-deprecation=write-only") - assert.NoError(t, err) -} - -func assertVerifyCannotStartV2deprecationNotYet(t testing.TB, dataDirPath string) { - t.Log("Verify its infeasible to start etcd with --v2-deprecation=not-yet mode") - proc, err := e2e.SpawnCmd([]string{e2e.BinPath.Etcd, "--v2-deprecation=not-yet", "--data-dir=" + dataDirPath}, nil) - assert.NoError(t, err) - - _, err = proc.Expect(`invalid value "not-yet" for flag -v2-deprecation: invalid value "not-yet"`) - assert.NoError(t, err) -} - -func TestV2DeprecationFlags(t *testing.T) { - e2e.BeforeTest(t) - dataDirPath := t.TempDir() - - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - - var memberDataDir string - t.Run("create-storev2-data", func(t *testing.T) { - memberDataDir = createV2store(t, dataDirPath) - }) - - t.Run("--v2-deprecation=not-yet fails", func(t *testing.T) { - assertVerifyCannotStartV2deprecationNotYet(t, memberDataDir) - }) - - t.Run("--v2-deprecation=write-only fails", func(t *testing.T) { - assertVerifyCannotStartV2deprecationWriteOnly(t, memberDataDir) - }) - -} - -func TestV2DeprecationSnapshotMatches(t *testing.T) { - e2e.BeforeTest(t) - lastReleaseData := t.TempDir() - currentReleaseData := t.TempDir() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - snapshotCount := 10 - epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, lastReleaseData, snapshotCount) - oldMemberDataDir := epc.Procs[0].Config().DataDirPath - cc1, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - members1 := addAndRemoveKeysAndMembers(ctx, t, cc1, snapshotCount) - assert.NoError(t, epc.Close()) - epc = runEtcdAndCreateSnapshot(t, e2e.CurrentVersion, currentReleaseData, snapshotCount) - newMemberDataDir := epc.Procs[0].Config().DataDirPath - cc2, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - members2 := addAndRemoveKeysAndMembers(ctx, t, cc2, snapshotCount) - assert.NoError(t, epc.Close()) - - assertSnapshotsMatch(t, oldMemberDataDir, newMemberDataDir, func(data []byte) []byte { - // Patch cluster version - data = bytes.Replace(data, []byte("3.5.0"), []byte("X.X.X"), -1) - data = bytes.Replace(data, []byte("3.6.0"), []byte("X.X.X"), -1) - // Patch members ids - for i, mid := range members1 { - data = bytes.Replace(data, []byte(fmt.Sprintf("%x", mid)), []byte(fmt.Sprintf("member%d", i+1)), -1) - } - for i, mid := range members2 { - data = bytes.Replace(data, []byte(fmt.Sprintf("%x", mid)), []byte(fmt.Sprintf("member%d", i+1)), -1) - } - return data - }) -} - -func TestV2DeprecationSnapshotRecover(t *testing.T) { - e2e.BeforeTest(t) - dataDir := t.TempDir() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) { - t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease) - } - epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, dataDir, 10) - - cc, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - - lastReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true}) - assert.NoError(t, err) - - lastReleaseMemberListResponse, err := cc.MemberList(ctx) - assert.NoError(t, err) - - assert.NoError(t, epc.Close()) - cfg := e2e.ConfigStandalone(*e2e.NewConfig( - e2e.WithVersion(e2e.CurrentVersion), - e2e.WithDataDirPath(dataDir), - )) - epc, err = e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - assert.NoError(t, err) - - cc, err = e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3()) - assert.NoError(t, err) - currentReleaseGetResponse, err := cc.Get(ctx, "", config.GetOptions{Prefix: true}) - assert.NoError(t, err) - - currentReleaseMemberListResponse, err := cc.MemberList(ctx) - assert.NoError(t, err) - - assert.Equal(t, lastReleaseGetResponse.Kvs, currentReleaseGetResponse.Kvs) - assert.Equal(t, lastReleaseMemberListResponse.Members, currentReleaseMemberListResponse.Members) - assert.NoError(t, epc.Close()) -} - -func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount int) *e2e.EtcdProcessCluster { - cfg := e2e.ConfigStandalone(*e2e.NewConfig( - e2e.WithVersion(serverVersion), - e2e.WithDataDirPath(dataDir), - e2e.WithSnapshotCount(snapshotCount), - e2e.WithKeepDataDir(true), - )) - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg)) - assert.NoError(t, err) - return epc -} - -func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.EtcdctlV3, snapshotCount int) (members []uint64) { - // Execute some non-trivial key&member operation - for i := 0; i < snapshotCount*3; i++ { - err := cc.Put(ctx, fmt.Sprintf("%d", i), "1", config.PutOptions{}) - assert.NoError(t, err) - } - member1, err := cc.MemberAddAsLearner(ctx, "member1", []string{"http://127.0.0.1:2000"}) - assert.NoError(t, err) - members = append(members, member1.Member.ID) - - for i := 0; i < snapshotCount*2; i++ { - _, err = cc.Delete(ctx, fmt.Sprintf("%d", i), config.DeleteOptions{}) - assert.NoError(t, err) - } - _, err = cc.MemberRemove(ctx, member1.Member.ID) - assert.NoError(t, err) - - for i := 0; i < snapshotCount; i++ { - err = cc.Put(ctx, fmt.Sprintf("%d", i), "2", config.PutOptions{}) - assert.NoError(t, err) - } - member2, err := cc.MemberAddAsLearner(ctx, "member2", []string{"http://127.0.0.1:2001"}) - assert.NoError(t, err) - members = append(members, member2.Member.ID) - - for i := 0; i < snapshotCount/2; i++ { - err = cc.Put(ctx, fmt.Sprintf("%d", i), "3", config.PutOptions{}) - assert.NoError(t, err) - } - return members -} - -func filterSnapshotFiles(path string) bool { - return strings.HasSuffix(path, ".snap") -} - -func assertSnapshotsMatch(t testing.TB, firstDataDir, secondDataDir string, patch func([]byte) []byte) { - lg := zaptest.NewLogger(t) - firstFiles, err := fileutil.ListFiles(firstDataDir, filterSnapshotFiles) - if err != nil { - t.Fatal(err) - } - secondFiles, err := fileutil.ListFiles(secondDataDir, filterSnapshotFiles) - if err != nil { - t.Fatal(err) - } - assert.NotEmpty(t, firstFiles) - assert.NotEmpty(t, secondFiles) - assert.Equal(t, len(firstFiles), len(secondFiles)) - sort.Strings(firstFiles) - sort.Strings(secondFiles) - for i := 0; i < len(firstFiles); i++ { - firstSnapshot, err := snap.Read(lg, firstFiles[i]) - if err != nil { - t.Fatal(err) - } - secondSnapshot, err := snap.Read(lg, secondFiles[i]) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, openSnap(patch(firstSnapshot.Data)), openSnap(patch(secondSnapshot.Data))) - } -} - -func openSnap(data []byte) v2store.Store { - st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix) - st.Recovery(data) - return st -} diff --git a/tests/e2e/v3_cipher_suite_test.go b/tests/e2e/v3_cipher_suite_test.go deleted file mode 100644 index 539a41618be..00000000000 --- a/tests/e2e/v3_cipher_suite_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cov && !cluster_proxy - -package e2e - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) } -func TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) } -func testV3CurlCipherSuites(t *testing.T, valid bool) { - cc := e2e.NewConfigClientTLS() - cc.ClusterSize = 1 - cc.CipherSuites = []string{ - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - } - testFunc := cipherSuiteTestValid - if !valid { - testFunc = cipherSuiteTestMismatch - } - testCtl(t, testFunc, withCfg(*cc)) -} - -func cipherSuiteTestValid(cx ctlCtx) { - if err := e2e.CURLGet(cx.epc, e2e.CURLReq{ - Endpoint: "/metrics", - Expected: fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version), - MetricsURLScheme: cx.cfg.MetricsURLScheme, - Ciphers: "ECDHE-RSA-AES128-GCM-SHA256", // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - }); err != nil { - require.ErrorContains(cx.t, err, fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version)) - } -} - -func cipherSuiteTestMismatch(cx ctlCtx) { - err := e2e.CURLGet(cx.epc, e2e.CURLReq{ - Endpoint: "/metrics", - Expected: "failed setting cipher list", - MetricsURLScheme: cx.cfg.MetricsURLScheme, - Ciphers: "ECDHE-RSA-DES-CBC3-SHA", // TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA - }) - require.ErrorContains(cx.t, err, "curl: (59) failed setting cipher list") -} diff --git a/tests/e2e/v3_curl_lease_test.go b/tests/e2e/v3_curl_lease_test.go deleted file mode 100644 index 82c297d00b0..00000000000 --- a/tests/e2e/v3_curl_lease_test.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "fmt" - "testing" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestV3CurlLeaseGrantNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseGrant, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} -func TestV3CurlLeaseRevokeNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseRevoke, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} -func TestV3CurlLeaseLeasesNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseLeases, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} -func TestV3CurlLeaseKeepAliveNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlLeaseKeepAlive, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} - -type v3cURLTest struct { - endpoint string - value string - expected string -} - -func testV3CurlLeaseGrant(cx ctlCtx) { - leaseID := e2e.RandomLeaseID() - - tests := []v3cURLTest{ - { - endpoint: "/lease/grant", - value: gwLeaseGrant(cx, leaseID, 0), - expected: gwLeaseIDExpected(leaseID), - }, - { - endpoint: "/lease/grant", - value: gwLeaseGrant(cx, 0, 20), - expected: `"TTL":"20"`, - }, - { - endpoint: "/kv/put", - value: gwKVPutLease(cx, "foo", "bar", leaseID), - expected: `"revision":"`, - }, - { - endpoint: "/lease/timetolive", - value: gwLeaseTTLWithKeys(cx, leaseID), - expected: `"grantedTTL"`, - }, - } - if err := CURLWithExpected(cx, tests); err != nil { - cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) - } -} - -func testV3CurlLeaseRevoke(cx ctlCtx) { - leaseID := e2e.RandomLeaseID() - - tests := []v3cURLTest{ - { - endpoint: "/lease/grant", - value: gwLeaseGrant(cx, leaseID, 0), - expected: gwLeaseIDExpected(leaseID), - }, - { - endpoint: "/lease/revoke", - value: gwLeaseRevoke(cx, leaseID), - expected: `"revision":"`, - }, - } - if err := CURLWithExpected(cx, tests); err != nil { - cx.t.Fatalf("testV3CurlLeaseRevoke: %v", err) - } -} - -func testV3CurlLeaseLeases(cx ctlCtx) { - leaseID := e2e.RandomLeaseID() - - tests := []v3cURLTest{ - { - endpoint: "/lease/grant", - value: gwLeaseGrant(cx, leaseID, 0), - expected: gwLeaseIDExpected(leaseID), - }, - { - endpoint: "/lease/leases", - value: "{}", - expected: gwLeaseIDExpected(leaseID), - }, - } - if err := CURLWithExpected(cx, tests); err != nil { - cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) - } -} - -func testV3CurlLeaseKeepAlive(cx ctlCtx) { - leaseID := e2e.RandomLeaseID() - - tests := []v3cURLTest{ - { - endpoint: "/lease/grant", - value: gwLeaseGrant(cx, leaseID, 0), - expected: gwLeaseIDExpected(leaseID), - }, - { - endpoint: "/lease/keepalive", - value: gwLeaseKeepAlive(cx, leaseID), - expected: gwLeaseIDExpected(leaseID), - }, - } - if err := CURLWithExpected(cx, tests); err != nil { - cx.t.Fatalf("testV3CurlLeaseGrant: %v", err) - } -} - -func gwLeaseIDExpected(leaseID int64) string { - return fmt.Sprintf(`"ID":"%d"`, leaseID) -} - -func gwLeaseTTLWithKeys(cx ctlCtx, leaseID int64) string { - d := &pb.LeaseTimeToLiveRequest{ID: leaseID, Keys: true} - s, err := e2e.DataMarshal(d) - if err != nil { - cx.t.Fatalf("gwLeaseTTLWithKeys: error (%v)", err) - } - return s -} - -func gwLeaseKeepAlive(cx ctlCtx, leaseID int64) string { - d := &pb.LeaseKeepAliveRequest{ID: leaseID} - s, err := e2e.DataMarshal(d) - if err != nil { - cx.t.Fatalf("gwLeaseKeepAlive: Marshal error (%v)", err) - } - return s -} - -func gwLeaseGrant(cx ctlCtx, leaseID int64, ttl int64) string { - d := &pb.LeaseGrantRequest{ID: leaseID, TTL: ttl} - s, err := e2e.DataMarshal(d) - if err != nil { - cx.t.Fatalf("gwLeaseGrant: Marshal error (%v)", err) - } - return s -} - -func gwLeaseRevoke(cx ctlCtx, leaseID int64) string { - d := &pb.LeaseRevokeRequest{ID: leaseID} - s, err := e2e.DataMarshal(d) - if err != nil { - cx.t.Fatalf("gwLeaseRevoke: Marshal error (%v)", err) - } - return s -} - -func gwKVPutLease(cx ctlCtx, k string, v string, leaseID int64) string { - d := pb.PutRequest{Key: []byte(k), Value: []byte(v), Lease: leaseID} - s, err := e2e.DataMarshal(d) - if err != nil { - cx.t.Fatalf("gwKVPutLease: Marshal error (%v)", err) - } - return s -} diff --git a/tests/e2e/v3_curl_maxstream_test.go b/tests/e2e/v3_curl_maxstream_test.go deleted file mode 100644 index 11a95a9016b..00000000000 --- a/tests/e2e/v3_curl_maxstream_test.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "sync" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -// TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small tests no TLS -func TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small(t *testing.T) { - testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3)) -} - -func TestV3Curl_MaxStreams_BelowLimit_NoTLS_Medium(t *testing.T) { - testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second)) -} - -func TestV3Curl_MaxStreamsNoTLS_BelowLimit_Large(t *testing.T) { - f, err := setRLimit(10240) - if err != nil { - t.Fatal(err) - } - defer f() - testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(1000), withTestTimeout(200*time.Second)) -} - -func TestV3Curl_MaxStreams_ReachLimit_NoTLS_Small(t *testing.T) { - testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3)) -} - -func TestV3Curl_MaxStreams_ReachLimit_NoTLS_Medium(t *testing.T) { - testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second)) -} - -// TestV3Curl_MaxStreams_BelowLimit_TLS_Small tests with TLS -func TestV3Curl_MaxStreams_BelowLimit_TLS_Small(t *testing.T) { - testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3)) -} - -func TestV3Curl_MaxStreams_BelowLimit_TLS_Medium(t *testing.T) { - testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second)) -} - -func TestV3Curl_MaxStreams_ReachLimit_TLS_Small(t *testing.T) { - testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3)) -} - -func TestV3Curl_MaxStreams_ReachLimit_TLS_Medium(t *testing.T) { - testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second)) -} - -func testV3CurlMaxStream(t *testing.T, reachLimit bool, opts ...ctlOption) { - e2e.BeforeTest(t) - - // Step 1: generate configuration for creating cluster - t.Log("Generating configuration for creating cluster.") - cx := getDefaultCtlCtx(t) - cx.applyOpts(opts) - // We must set the `ClusterSize` to 1, otherwise different streams may - // connect to different members, accordingly it's difficult to test the - // behavior. - cx.cfg.ClusterSize = 1 - - // Step 2: create the cluster - t.Log("Creating an etcd cluster") - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(&cx.cfg)) - if err != nil { - t.Fatalf("Failed to start etcd cluster: %v", err) - } - cx.epc = epc - cx.dataDir = epc.Procs[0].Config().DataDirPath - - // Step 3: run test - // (a) generate ${concurrentNumber} concurrent watch streams; - // (b) submit a range request. - var wg sync.WaitGroup - concurrentNumber := cx.cfg.MaxConcurrentStreams - 1 - expectedResponse := `"revision":"` - if reachLimit { - concurrentNumber = cx.cfg.MaxConcurrentStreams - expectedResponse = "Operation timed out" - } - wg.Add(int(concurrentNumber)) - t.Logf("Running the test, MaxConcurrentStreams: %d, concurrentNumber: %d, expected range's response: %s\n", - cx.cfg.MaxConcurrentStreams, concurrentNumber, expectedResponse) - - closeServerCh := make(chan struct{}) - submitConcurrentWatch(cx, int(concurrentNumber), &wg, closeServerCh) - submitRangeAfterConcurrentWatch(cx, expectedResponse) - - // Step 4: Close the cluster - t.Log("Closing test cluster...") - close(closeServerCh) - assert.NoError(t, epc.Close()) - t.Log("Closed test cluster") - - // Step 5: Waiting all watch goroutines to exit. - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - wg.Wait() - }() - - timeout := cx.getTestTimeout() - t.Logf("Waiting test case to finish, timeout: %s", timeout) - select { - case <-time.After(timeout): - testutil.FatalStack(t, fmt.Sprintf("test timed out after %v", timeout)) - case <-doneCh: - t.Log("All watch goroutines exited.") - } - - t.Log("testV3CurlMaxStream done!") -} - -func submitConcurrentWatch(cx ctlCtx, number int, wgDone *sync.WaitGroup, closeCh chan struct{}) { - watchData, err := json.Marshal(&pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), - }, - }) - if err != nil { - cx.t.Fatal(err) - } - - var wgSchedule sync.WaitGroup - - createWatchConnection := func() error { - cluster := cx.epc - member := cluster.Procs[rand.Intn(cluster.Cfg.ClusterSize)] - curlReq := e2e.CURLReq{Endpoint: "/v3/watch", Value: string(watchData)} - - args := e2e.CURLPrefixArgs(cluster.Cfg, member, "POST", curlReq) - proc, err := e2e.SpawnCmd(args, nil) - if err != nil { - return fmt.Errorf("failed to spawn: %w", err) - } - defer proc.Stop() - - // make sure that watch request has been created - expectedLine := `"created":true}}` - _, lerr := proc.ExpectWithContext(context.TODO(), expectedLine) - if lerr != nil { - return fmt.Errorf("%v %v (expected %q). Try EXPECT_DEBUG=TRUE", args, lerr, expectedLine) - } - - wgSchedule.Done() - - // hold the connection and wait for server shutdown - perr := proc.Close() - - // curl process will return - select { - case <-closeCh: - default: - // perr could be nil. - return fmt.Errorf("unexpected connection close before server closes: %v", perr) - } - return nil - } - - testutils.ExecuteWithTimeout(cx.t, cx.getTestTimeout(), func() { - wgSchedule.Add(number) - - for i := 0; i < number; i++ { - go func(i int) { - defer wgDone.Done() - - if err := createWatchConnection(); err != nil { - cx.t.Fatalf("testV3CurlMaxStream watch failed: %d, error: %v", i, err) - } - }(i) - } - - // make sure all goroutines have already been scheduled. - wgSchedule.Wait() - }) -} - -func submitRangeAfterConcurrentWatch(cx ctlCtx, expectedValue string) { - rangeData, err := json.Marshal(&pb.RangeRequest{ - Key: []byte("foo"), - }) - if err != nil { - cx.t.Fatal(err) - } - - cx.t.Log("Submitting range request...") - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: "/v3/kv/range", Value: string(rangeData), Expected: expectedValue, Timeout: 5}); err != nil { - require.ErrorContains(cx.t, err, expectedValue) - } - cx.t.Log("range request done") -} - -// setRLimit sets the open file limitation, and return a function which -// is used to reset the limitation. -func setRLimit(nofile uint64) (func() error, error) { - var rLimit syscall.Rlimit - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit); err != nil { - return nil, fmt.Errorf("failed to get open file limit, error: %v", err) - } - - var wLimit syscall.Rlimit - wLimit.Max = nofile - wLimit.Cur = nofile - if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &wLimit); err != nil { - return nil, fmt.Errorf("failed to set max open file limit, %v", err) - } - - return func() error { - if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit); err != nil { - return fmt.Errorf("failed reset max open file limit, %v", err) - } - return nil - }, nil -} diff --git a/tests/e2e/v3_curl_test.go b/tests/e2e/v3_curl_test.go deleted file mode 100644 index 0753e26e2ad..00000000000 --- a/tests/e2e/v3_curl_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "math/rand" - "path" - "strconv" - "testing" - - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/testutil" - epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "go.etcd.io/etcd/tests/v3/framework/e2e" - - "github.com/grpc-ecosystem/grpc-gateway/runtime" -) - -var apiPrefix = []string{"/v3"} - -func TestV3CurlPutGetNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} -func TestV3CurlPutGetAutoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigAutoTLS())) - } -} -func TestV3CurlPutGetAllTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigTLS())) - } -} -func TestV3CurlPutGetPeerTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigPeerTLS())) - } -} -func TestV3CurlPutGetClientTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlPutGet, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLS())) - } -} -func TestV3CurlWatch(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlWatch, withApiPrefix(p)) - } -} -func TestV3CurlTxn(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlTxn, withApiPrefix(p)) - } -} -func TestV3CurlAuth(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlAuth, withApiPrefix(p)) - } -} -func TestV3CurlAuthClientTLSCertAuth(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlAuth, withApiPrefix(p), withCfg(*e2e.NewConfigClientTLSCertAuthWithNoCN())) - } -} - -func testV3CurlPutGet(cx ctlCtx) { - var ( - key = []byte("foo") - value = []byte("bar") // this will be automatically base64-encoded by Go - - expectPut = `"revision":"` - expectGet = `"value":"` - ) - putData, err := json.Marshal(&pb.PutRequest{ - Key: key, - Value: value, - }) - if err != nil { - cx.t.Fatal(err) - } - rangeData, err := json.Marshal(&pb.RangeRequest{ - Key: key, - }) - if err != nil { - cx.t.Fatal(err) - } - - p := cx.apiPrefix - - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putData), Expected: expectPut}); err != nil { - cx.t.Fatalf("failed testV3CurlPutGet put with curl using prefix (%s) (%v)", p, err) - } - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet}); err != nil { - cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err) - } - if cx.cfg.Client.ConnectionType == e2e.ClientTLSAndNonTLS { - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/range"), Value: string(rangeData), Expected: expectGet, IsTLS: true}); err != nil { - cx.t.Fatalf("failed testV3CurlPutGet get with curl using prefix (%s) (%v)", p, err) - } - } -} - -func testV3CurlWatch(cx ctlCtx) { - // store "bar" into "foo" - putreq, err := json.Marshal(&pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - cx.t.Fatal(err) - } - // watch for first update to "foo" - wcr := &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1} - wreq, err := json.Marshal(wcr) - if err != nil { - cx.t.Fatal(err) - } - // marshaling the grpc to json gives: - // "{"RequestUnion":{"CreateRequest":{"key":"Zm9v","start_revision":1}}}" - // but the gprc-gateway expects a different format.. - wstr := `{"create_request" : ` + string(wreq) + "}" - p := cx.apiPrefix - - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlWatch put with curl using prefix (%s) (%v)", p, err) - } - // expects "bar", timeout after 2 seconds since stream waits forever - err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/watch"), Value: wstr, Expected: `"YmFy"`, Timeout: 2}) - require.ErrorContains(cx.t, err, "unexpected exit code") -} - -func testV3CurlTxn(cx ctlCtx) { - txn := &pb.TxnRequest{ - Compare: []*pb.Compare{ - { - Key: []byte("foo"), - Result: pb.Compare_EQUAL, - Target: pb.Compare_CREATE, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0}, - }, - }, - Success: []*pb.RequestOp{ - { - Request: &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: []byte("foo"), - Value: []byte("bar"), - }, - }, - }, - }, - } - m := &runtime.JSONPb{} - jsonDat, jerr := m.Marshal(txn) - if jerr != nil { - cx.t.Fatal(jerr) - } - expected := `"succeeded":true,"responses":[{"response_put":{"header":{"revision":"2"}}}]` - p := cx.apiPrefix - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: string(jsonDat), Expected: expected}); err != nil { - cx.t.Fatalf("failed testV3CurlTxn txn with curl using prefix (%s) (%v)", p, err) - } - - // was crashing etcd server - malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}` - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/txn"), Value: malformed, Expected: "error"}); err != nil { - cx.t.Fatalf("failed testV3CurlTxn put with curl using prefix (%s) (%v)", p, err) - } - -} - -func testV3CurlAuth(cx ctlCtx) { - p := cx.apiPrefix - usernames := []string{"root", "nonroot", "nooption"} - pwds := []string{"toor", "pass", "pass"} - options := []*authpb.UserAddOptions{{NoPassword: false}, {NoPassword: false}, nil} - - // create users - for i := 0; i < len(usernames); i++ { - user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]}) - testutil.AssertNil(cx.t, err) - - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/add"), Value: string(user), Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth add user %v with curl (%v)", usernames[i], err) - } - } - - // create root role - rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: "root"}) - testutil.AssertNil(cx.t, err) - - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/role/add"), Value: string(rolereq), Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth create role with curl using prefix (%s) (%v)", p, err) - } - - //grant root role - for i := 0; i < len(usernames); i++ { - grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"}) - testutil.AssertNil(cx.t, err) - - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/user/grant"), Value: string(grantroleroot), Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err) - } - } - - // enable auth - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/auth/enable"), Value: "{}", Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth enable auth with curl using prefix (%s) (%v)", p, err) - } - - for i := 0; i < len(usernames); i++ { - // put "bar[i]" into "foo[i]" - putreq, err := json.Marshal(&pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte(fmt.Sprintf("bar%d", i))}) - testutil.AssertNil(cx.t, err) - - // fail put no auth - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Expected: "error"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err) - } - - // auth request - authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: usernames[i], Password: pwds[i]}) - testutil.AssertNil(cx.t, err) - - var ( - authHeader string - cmdArgs []string - lineFunc = func(txt string) bool { return true } - ) - - cmdArgs = e2e.CURLPrefixArgs(cx.epc.Cfg, cx.epc.Procs[rand.Intn(cx.epc.Cfg.ClusterSize)], "POST", e2e.CURLReq{Endpoint: path.Join(p, "/auth/authenticate"), Value: string(authreq)}) - proc, err := e2e.SpawnCmd(cmdArgs, cx.envMap) - testutil.AssertNil(cx.t, err) - defer proc.Close() - - cURLRes, err := proc.ExpectFunc(context.Background(), lineFunc) - testutil.AssertNil(cx.t, err) - - authRes := make(map[string]interface{}) - testutil.AssertNil(cx.t, json.Unmarshal([]byte(cURLRes), &authRes)) - - token, ok := authRes[rpctypes.TokenFieldNameGRPC].(string) - if !ok { - cx.t.Fatalf("failed invalid token in authenticate response with curl using user (%v)", usernames[i]) - } - - authHeader = "Authorization: " + token - - // put with auth - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, "/kv/put"), Value: string(putreq), Header: authHeader, Expected: "revision"}); err != nil { - cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) and user (%v) (%v)", p, usernames[i], err) - } - } -} - -func TestV3CurlCampaignNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlCampaign, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} - -func testV3CurlCampaign(cx ctlCtx) { - cdata, err := json.Marshal(&epb.CampaignRequest{ - Name: []byte("/election-prefix"), - Value: []byte("v1"), - }) - if err != nil { - cx.t.Fatal(err) - } - cargs := e2e.CURLPrefixArgs(cx.epc.Cfg, cx.epc.Procs[rand.Intn(cx.epc.Cfg.ClusterSize)], "POST", e2e.CURLReq{ - Endpoint: path.Join(cx.apiPrefix, "/election/campaign"), - Value: string(cdata), - }) - lines, err := e2e.SpawnWithExpectLines(context.TODO(), cargs, cx.envMap, `"leader":{"name":"`) - if err != nil { - cx.t.Fatalf("failed post campaign request (%s) (%v)", cx.apiPrefix, err) - } - if len(lines) != 1 { - cx.t.Fatalf("len(lines) expected 1, got %+v", lines) - } - - var cresp campaignResponse - if err = json.Unmarshal([]byte(lines[0]), &cresp); err != nil { - cx.t.Fatalf("failed to unmarshal campaign response %v", err) - } - ndata, err := base64.StdEncoding.DecodeString(cresp.Leader.Name) - if err != nil { - cx.t.Fatalf("failed to decode leader key %v", err) - } - kdata, err := base64.StdEncoding.DecodeString(cresp.Leader.Key) - if err != nil { - cx.t.Fatalf("failed to decode leader key %v", err) - } - - rev, _ := strconv.ParseInt(cresp.Leader.Rev, 10, 64) - lease, _ := strconv.ParseInt(cresp.Leader.Lease, 10, 64) - pdata, err := json.Marshal(&epb.ProclaimRequest{ - Leader: &epb.LeaderKey{ - Name: ndata, - Key: kdata, - Rev: rev, - Lease: lease, - }, - Value: []byte("v2"), - }) - if err != nil { - cx.t.Fatal(err) - } - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{ - Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), - Value: string(pdata), - Expected: `"revision":`, - }); err != nil { - cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err) - } -} - -func TestV3CurlProclaimMissiongLeaderKeyNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlProclaimMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} - -func testV3CurlProclaimMissiongLeaderKey(cx ctlCtx) { - pdata, err := json.Marshal(&epb.ProclaimRequest{Value: []byte("v2")}) - if err != nil { - cx.t.Fatal(err) - } - if err = e2e.CURLPost(cx.epc, e2e.CURLReq{ - Endpoint: path.Join(cx.apiPrefix, "/election/proclaim"), - Value: string(pdata), - Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, - }); err != nil { - cx.t.Fatalf("failed post proclaim request (%s) (%v)", cx.apiPrefix, err) - } -} - -func TestV3CurlResignMissiongLeaderKeyNoTLS(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlResignMissiongLeaderKey, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} - -func testV3CurlResignMissiongLeaderKey(cx ctlCtx) { - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{ - Endpoint: path.Join(cx.apiPrefix, "/election/resign"), - Value: `{}`, - Expected: `{"error":"\"leader\" field must be provided","code":2,"message":"\"leader\" field must be provided"}`, - }); err != nil { - cx.t.Fatalf("failed post resign request (%s) (%v)", cx.apiPrefix, err) - } -} - -func TestV3CurlMaintenanceAlarmMissiongAlarm(t *testing.T) { - for _, p := range apiPrefix { - testCtl(t, testV3CurlMaintenanceAlarmMissiongAlarm, withApiPrefix(p), withCfg(*e2e.NewConfigNoTLS())) - } -} - -func testV3CurlMaintenanceAlarmMissiongAlarm(cx ctlCtx) { - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{ - Endpoint: path.Join(cx.apiPrefix, "/maintenance/alarm"), - Value: `{"action": "ACTIVATE"}`, - }); err != nil { - cx.t.Fatalf("failed post maintenance alarm (%s) (%v)", cx.apiPrefix, err) - } -} - -// to manually decode; JSON marshals integer fields with -// string types, so can't unmarshal with epb.CampaignResponse -type campaignResponse struct { - Leader struct { - Name string `json:"name,omitempty"` - Key string `json:"key,omitempty"` - Rev string `json:"rev,omitempty"` - Lease string `json:"lease,omitempty"` - } `json:"leader,omitempty"` -} - -func CURLWithExpected(cx ctlCtx, tests []v3cURLTest) error { - p := cx.apiPrefix - for _, t := range tests { - value := fmt.Sprintf("%v", t.value) - if err := e2e.CURLPost(cx.epc, e2e.CURLReq{Endpoint: path.Join(p, t.endpoint), Value: value, Expected: t.expected}); err != nil { - return fmt.Errorf("prefix (%s) endpoint (%s): error (%v), wanted %v", p, t.endpoint, err, t.expected) - } - } - return nil -} diff --git a/tests/e2e/zap_logging_test.go b/tests/e2e/zap_logging_test.go deleted file mode 100644 index 4b7399c753d..00000000000 --- a/tests/e2e/zap_logging_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cov - -package e2e - -import ( - "context" - "encoding/json" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -func TestServerJsonLogging(t *testing.T) { - e2e.BeforeTest(t) - - epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, - e2e.WithClusterSize(1), - e2e.WithLogLevel("debug"), - ) - if err != nil { - t.Fatalf("could not start etcd process cluster (%v)", err) - } - logs := epc.Procs[0].Logs() - time.Sleep(time.Second) - if err = epc.Close(); err != nil { - t.Fatalf("error closing etcd processes (%v)", err) - } - var entry logEntry - lines := logs.Lines() - if len(lines) == 0 { - t.Errorf("Expected at least one log line") - } - for _, line := range lines { - err := json.Unmarshal([]byte(line), &entry) - if err != nil { - t.Errorf("Failed to parse log line as json, err: %q, line: %s", err, line) - continue - } - if entry.Level == "" { - t.Errorf(`Missing "level" key, line: %s`, line) - } - if entry.Timestamp == "" { - t.Errorf(`Missing "ts" key, line: %s`, line) - } - if _, err := time.Parse("2006-01-02T15:04:05.000Z0700", entry.Timestamp); entry.Timestamp != "" && err != nil { - t.Errorf(`Unexpected "ts" key format, err: %s`, err) - } - if entry.Caller == "" { - t.Errorf(`Missing "caller" key, line: %s`, line) - } - if entry.Message == "" { - t.Errorf(`Missing "message" key, line: %s`, line) - } - } -} - -type logEntry struct { - Level string `json:"level"` - Timestamp string `json:"ts"` - Caller string `json:"caller"` - Message string `json:"msg"` -} diff --git a/tests/fixtures/CommonName-root.crt b/tests/fixtures/CommonName-root.crt deleted file mode 100644 index d786c80d668..00000000000 --- a/tests/fixtures/CommonName-root.crt +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIE5zCCA8+gAwIBAgIJAKooGDZuR2mMMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlqaW5nMQ0wCwYD -VQQKDAREZW1vMQ0wCwYDVQQLDAREZW1vMQ0wCwYDVQQDDARyb290MR8wHQYJKoZI -hvcNAQkBFhB0ZXN0QGV4YW1wbGUuY29tMB4XDTIyMTExNjA2NTI1M1oXDTMyMTEx -MzA2NTI1M1owfzELMAkGA1UEBhMCQ04xEDAOBgNVBAgMB0JlaWppbmcxEDAOBgNV -BAcMB0JlaWppbmcxDTALBgNVBAoMBERlbW8xDTALBgNVBAsMBERlbW8xDTALBgNV -BAMMBHJvb3QxHzAdBgkqhkiG9w0BCQEWEHRlc3RAZXhhbXBsZS5jb20wggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDEAKcjzhtOG3hWbAUCbudE1gPOeteT -0INk2ngN2uCMYjYSZmaGhW/GZk3EvV7wKVuhdTyrh36E5Iajng9d2t1iOU/8jROU -+uAyrS3C/S5P/urq8VBUrt3VG/44bhwTEdafNnAWQ6ojYfmK0tRqoQn1Ftm30l8I -nWof5Jm3loNA2WdNdvAp/D+6OpjUdqGdMkFd0NhkuQODMnycBMw6btUTj5SnmrMk -q7V1aasx4BqN5C4DciZF0pyyR/TT8MoQ5Vcit8rHvQUyz42Lj8+28RkDoi4prJ1i -tLaCt2egDp58vXlYQZTd50inMhnBIapKNdGpg3flW/8AFul1tCTqd8NfAgMBAAGj -ggFkMIIBYDAdBgNVHQ4EFgQUpwwvEqXjA/ArJu1Jnpw7+/sttOAwgbMGA1UdIwSB -qzCBqIAUpwwvEqXjA/ArJu1Jnpw7+/sttOChgYSkgYEwfzELMAkGA1UEBhMCQ04x -EDAOBgNVBAgMB0JlaWppbmcxEDAOBgNVBAcMB0JlaWppbmcxDTALBgNVBAoMBERl -bW8xDTALBgNVBAsMBERlbW8xDTALBgNVBAMMBHJvb3QxHzAdBgkqhkiG9w0BCQEW -EHRlc3RAZXhhbXBsZS5jb22CCQCqKBg2bkdpjDAMBgNVHRMEBTADAQH/MAsGA1Ud -DwQEAwIC/DA2BgNVHREELzAtggtleGFtcGxlLmNvbYINKi5leGFtcGxlLmNvbYIJ -bG9jYWxob3N0hwR/AAABMDYGA1UdEgQvMC2CC2V4YW1wbGUuY29tgg0qLmV4YW1w -bGUuY29tgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAGi48ntm -8cn08FrsCDWapsck7a56/dyFyzLg10c0blu396tzC3ZDCAwQYzHjeXVdeWHyGO+f -KSFlmR6IA0jq6pFhUyJtgaAUJ91jW6s68GTVhlLoFhtYjy6EvhQ0lo+7GWh4qB2s -LI0mJPjaLZY1teAC4TswzwMDVD8QsB06/aFBlA65VjgZiVH+aMwWJ88gKfVGp0Pv -AApsy5MvwQn8WZ2L6foSY04OzXtmAg2gCl0PyDNgieqFDcM1g7mklHNgWl2Gvtte -G6+TiB3gGUUlTsdy0+LS2psL71RS5Jv7g/7XGmSKBPqRmYyQ2t7m2kLPwWKtL5tE -63c0FPtpV0FzKdU= ------END CERTIFICATE----- diff --git a/tests/fixtures/CommonName-root.key b/tests/fixtures/CommonName-root.key deleted file mode 100644 index 046b4a58fbd..00000000000 --- a/tests/fixtures/CommonName-root.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAxACnI84bTht4VmwFAm7nRNYDznrXk9CDZNp4DdrgjGI2EmZm -hoVvxmZNxL1e8ClboXU8q4d+hOSGo54PXdrdYjlP/I0TlPrgMq0twv0uT/7q6vFQ -VK7d1Rv+OG4cExHWnzZwFkOqI2H5itLUaqEJ9RbZt9JfCJ1qH+SZt5aDQNlnTXbw -Kfw/ujqY1HahnTJBXdDYZLkDgzJ8nATMOm7VE4+Up5qzJKu1dWmrMeAajeQuA3Im -RdKcskf00/DKEOVXIrfKx70FMs+Ni4/PtvEZA6IuKaydYrS2grdnoA6efL15WEGU -3edIpzIZwSGqSjXRqYN35Vv/ABbpdbQk6nfDXwIDAQABAoIBAA5AMebTjH6wVp6J -+g9EOwJxQROZMOVparRBgisXt+3dEitiUKAFQaw+MfdVAXsatrPVj1S1ZEiLSRLK -YjmjuSb0HdGx/DN/zh9BIiukNuLQGQp+AyY1FKHzCBfYQahNSrqGvb2Qq+UosXkb -fSBHly6/u5K28/vvXhD1kQudIOvtAc9tOg8LZnM6N3J4E0GtLqWimRZ4jNK4APu1 -YsLIg87Eam+7x25+phz9xc22tZ1H4WY9FnOGprPnievqiV7mgcNGAklTB93C6yX1 -EI+QxQnPg0P732C4EJZFDPqhVRA4E7BUb5uTIXCJBA/FFuRIx9ppyLZKt9vjTchM -8YWIEsECgYEA/5DRR9FkIWJZb0Pv3SCc53PMPT/xpYB6lH2lGtG+u+L71dJNDiPt -da3dPXSBy+aF7BbmRDawRvyOLGArlWiSsoEUVlES8BYzQ1MmfDf+MJooJoBE6/g6 -2OyyNnPde1GqyxsxgNTITvJCTjYH64lxKVRYfMgMAASK49SjYiEgGn8CgYEAxFXs -Oe0sUcc3P1cQ9pJfSVKpSczZq/OGAxqlniqRHvoWgFfKOWB6F9PN0rd8G2aMlfGS -BjyiPe770gtpX8Z4G4lrtkJD8NvGoVC8yX78HbrXL2RA4lPjQfrveUnwXIRbRKWa -6D/GAYPOuNvJmwF4hY/orWyIqvpNczIjTjs1JyECgYEAvhuNAn6JnKfbXYBM+tIa -xbWHFXzula2IAdOhMN0bpApKSZmBxmYFa0elTuTO9M2Li77RFacU5AlU/T+gzCiZ -D34jkb4Hd18cTRWaiEbiqGbUPSennVzu8ZTJUOZJuEVc5m9ZGLuwMcHWfvWEWLrJ -2fOrS09IVe8LHkV8MC/yAKMCgYBmDUdhgK9Fvqgv60Cs+b4/rZDDBJCsOUOSP3qQ -sQ2HrXSet4MsucIcuoJEog0HbRFsKwm85i1qxdrs/fOCzfXGUnLDZMRN4N7pIL9Q -eQnxJhoNzy2Otw3sUNPDFrSyUjXig7X2PJfeV7XPDqdHQ8dynS/TXRPY04wIcao6 -Uro5IQKBgFUz2GjAxI6uc7ihmRv/GYTuXYOlO0IN7MFwQDd0pVnWHkLNZscO9L9/ -ALV4g1p/75CewlQfyC8ynOJJWcDeHHFNsSMsOzAxUOVtVenaF/dgwk95wpXj6Rx6 -4kvQqnJg97fRBbyzvQcdL36kL8+pbmHNoqHPwxbuigYShB74d6/h ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/ca-csr.json b/tests/fixtures/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/fixtures/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/fixtures/ca.crt b/tests/fixtures/ca.crt deleted file mode 100644 index 8e3737207a3..00000000000 --- a/tests/fixtures/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrjCCApagAwIBAgIUNkN+TZ3hgHno+H9j56nWkmb4dBEwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDZwQPFZB+Kt6RIzYvTgbNlRIX/cLVknIy4ZqhLYDQNOdosJn04jjkCfS3k -F5JZuabkUs6d6JcLTbLWV5hCrwZVlCFf3PDn6DvK12GZpybhuqMPZ2T8P2U17AFP -mUj/Rm+25t8Er5r+8ijZmqVi1X1Ef041CFGESr3KjaMjec2kYf38cfEOp2Yq1JWO -0wpVfLElnyDQY9XILdnBepCRZYPq1eW1OSkRk+dZQnJP6BO95IoyREDuBUeTrteR -7dHHTF9AAgR5tnyZ+eLuVUZ2kskcWLxH3y9RyjvVJ+1uCzbdydVPf0H1pBoqWcuA -PYjYkLKMOKBWfYJhSzykhf+QMC7xAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQpJiv07dkY9WB0zgB6wOb/HMi8oDAN -BgkqhkiG9w0BAQsFAAOCAQEA0TQ8rRmLt4wYjz0BKh+jElMIg6LBPsCPpfmGDLmK -fdj4Jp7QFlLmXlQSmm8zKz3ftKoOFPYGQYHUkIirIrQB/tdHoXJLgxCzI0SrdCiM -m/DPVjfOTa9Mm5rPcUR79rGDLj2BgzDB+NTETVDXo8mAL5MjFdUyh6jOGBctkCG/ -TWdUaN33ZLwUl488NLaw98fIZ/F4d/dsyCJvHEaoo++dgjduoQxmH9Scr2Frmd8G -zYxOoZHG3ARBDp2mpr+I3UCR1/KTITF/NXL6gDcNY3wyZzoaGua7Bd/ysMSi1w3j -CyvClSvRPJRLQemGUP7B/Y8FUkbJ2i/7tz6ozn8sLi3V2Q== ------END CERTIFICATE----- diff --git a/tests/fixtures/client-ca-csr-nocn.json b/tests/fixtures/client-ca-csr-nocn.json deleted file mode 100644 index 34425513934..00000000000 --- a/tests/fixtures/client-ca-csr-nocn.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/client-clientusage.crt b/tests/fixtures/client-clientusage.crt deleted file mode 100644 index 71b305fbc01..00000000000 --- a/tests/fixtures/client-clientusage.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIECDCCAvCgAwIBAgIULbzkAv8zbkJzZIRDPnBwXl0/BH0wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDWBNo9tYRoQKv76xabz0EPXGJKHIrUjf0NbXz3d9jbP2sH -3hutXr/A221pULfZYIZdaUtmEuEr1905nYwJ2gnO9Y/iSc6fQ/4EjoT+VZLdINQw -I1dG2rtv2ZuYL5oYfgCjLkV1LzYuyfY/zJ93WoJW0YA0t50MEQNGEqD7pYlhsPej -iGyjagSi7zsoAkAagNprULH6RyAqDG7db+MfJOUzHUv4PWGBXPb0PHY3xA+WayFB -nP5AZO16oDh/UnzvfEAJULXeIOLs4eOmtzKMwZwrWzgCB+jBeVlc1FOwXQcmBamN -eYUs75GoO9aSSLROvnQiw2P0z0xVNmDokDXGsSRxAgMBAAGjgZIwgY8wDgYDVR0P -AQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD -VR0OBBYEFCB4ysDF81d6lkKIvebj08BcRWNoMB8GA1UdIwQYMBaAFCkmK/Tt2Rj1 -YHTOAHrA5v8cyLygMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG -9w0BAQsFAAOCAQEAo2B+piCBTjdpCLFj/kc+A0alZTbNdr0+BTsN+5aBE9k4JlZS -smkIQL0vyzjKw/W/o2EyPVcVKJX52/GQsC3bQrBb2lH1jRYgt5pRo24kKHy4Nlc3 -IaYg++ssfT2ZdpYiL3lzLyOHEumcynz3nI5M81e5CCIdEennxaM8FuiYN5OXDOR3 -j+bCYHLYPaWYZopfiSrnq+Z4gRUS2sMI1yqtiPSUdIJLnTfyEEdexvs/KUtFWvFO -4AcecKvT6HA8oNDiWfE6e854uDLTkbXW1rK+FWPU9pv5NR50+GBCvxvmDGtGXxQu -yu+kOsx2gfgNc4idIv1pjZF/1YzrrKGAhChN2A== ------END CERTIFICATE----- diff --git a/tests/fixtures/client-clientusage.key.insecure b/tests/fixtures/client-clientusage.key.insecure deleted file mode 100644 index ea139257cbc..00000000000 --- a/tests/fixtures/client-clientusage.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA1gTaPbWEaECr++sWm89BD1xiShyK1I39DW1893fY2z9rB94b -rV6/wNttaVC32WCGXWlLZhLhK9fdOZ2MCdoJzvWP4knOn0P+BI6E/lWS3SDUMCNX -Rtq7b9mbmC+aGH4Aoy5FdS82Lsn2P8yfd1qCVtGANLedDBEDRhKg+6WJYbD3o4hs -o2oEou87KAJAGoDaa1Cx+kcgKgxu3W/jHyTlMx1L+D1hgVz29Dx2N8QPlmshQZz+ -QGTteqA4f1J873xACVC13iDi7OHjprcyjMGcK1s4AgfowXlZXNRTsF0HJgWpjXmF -LO+RqDvWkki0Tr50IsNj9M9MVTZg6JA1xrEkcQIDAQABAoIBAAGBZTub5EOLeOo7 -vBv6eD2wa6yTyNI38Xi/tWpUOH1KU+lpQY6VpQmpQXrFK5Xm3OsZS4N7TIQvb4nx -NsP2+aywA4QW+tIZ+1Zy3jKfzXmqunNgPEPuU/U0dai7ZP0ZHc4IDEsHuvzXRNks -Ck8fnt0XeixkwkEMeZZrmSBMCMxcHAWxiv+oXF+olN3vTD2aDC8T6YwahMyQUQfW -IA9fuO8Dzzmk2I7mDHa29cbB+PW4E5tkJmHVZqEu8jPgMjCJGc2IR1YpLAXF8YBB -vgh6ZgI6JOg1OiNETuQekamAMOblFVOdPUjPSxuyJzEE8VpIdD3Z9UMNq+FDQh/F -j1lEEEECgYEA9nYwUh+e0H9c9IRBLNYAbq2PV4SpFKvFrHOTQpylMPisUTgdHKLT -CvO1wbNprElBAulOWobCyKshWGd5ECFsCvsWS6xmGi442q3ov5xtAMmvSmtW8s+8 -tUeVRQGS/Yn5Uxj2msUPe6vJEniLgsxmbFbDYqvr65COrAsCDEY3DkkCgYEA3k09 -EGhiO1joDtJPI21vUzzecBuep32oKiwip3OgS/mct04/QR+6lp1x4sPMYlyxbyk9 -jPdkzU07d8r+mES9RweE5lc1aCaF5eA8y6qtL9vBgsXRiEXlpYLxb0TOQaYNU0qM -aYumYPWjsjwYDvRKaVzThFUkYwapKFqtMV98BOkCgYAkIOkucLIwMCtpMKX5M5m2 -n7yegLTkcdW1VO/mWN4iUqG3+jjSRNAZD+a58VnxRn/ANIEm5hBRqDxoICrwAWY8 -Kdh32VrSRapR7CJtTDnyXp5Sk2+YgnlQPaEVD4kDn6Er3EHyKCb/4wvDqGYTE3GE -OifEJB2eV3+Cms5/DB/v+QKBgFzV8r9saEGSkm7GI2iPJiOj0t0Mm8gksNrTzbES -l4nC91CR69adkoWdwNbLoAof3bWnil3ZXw5hx4jyjDo40rbcDANJvjL9i4OBjsIb -R/Ipmvmq9SMs1Ye2VG98U4qU9xGmm1bkjBoH21HuyLlOCdlQe8DS8bwtJu2EWLm6 -v4cpAoGAP3pqi6iIZAbJVqw6p5Xd/BLzn3iS+lwQjwF/IWp5UnFCGovnVSJG2wqP -kxL9jy4asMDDuMKzEzO3+UT6WBRI+idV8PgDNEYkXcnVAA5lZ+2kCJwRICsC6MYH -1nIHJtPngUrwT3TUhMp/WfpYUjTdiOC3aJmKq/NGZxE8/Sb3G6U= ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/client-nocn.crt b/tests/fixtures/client-nocn.crt deleted file mode 100644 index a75a701913c..00000000000 --- a/tests/fixtures/client-nocn.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID/DCCAuSgAwIBAgIUCzIuVb3586z5C2rQ65jeo4wfbfowDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKmOrIfZ9mH9 -O3wLgGinUXDAG+XAP6P6NG9VkWaCUfOkY8x8RKSeuOri31EgYGmFYmQXCtS/WlHD -GCLrUhTnIrC1/WqvuPJIoMMTw7JLh59IuIWdlxds7FWjyuLmi4oUHvCG6aXiT/Z3 -ylp4r/HBL+R6KKqQpRjFfwhb1bIWpxZe5ghUtx4AuAW7ayQgpC7FJ3aVW/SS5p0m -IxyKqGvl45IsZuZY59Sa/X2AWSRpr+qe0tM4n1R+1bDhjcV6EuhyfubdSkZHfUJp -PaoUdynHT/VuI5xMF4OXbiwXP36XvHiHd9LIrPOyubrRYvn8dKweBJkvNCnlQo09 -zVH5zb9p0DsCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFG5evtY/ -UIPMBcah3B/1BWDI14nUMB8GA1UdIwQYMBaAFCkmK/Tt2Rj1YHTOAHrA5v8cyLyg -MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA -VBjy5UtSe/f66d7dKgZVVfKDiOeSb1knATSy7/JyubxVgq64yTN6fqIYRQg4gyVW -IPf8W4BbhEXeA7VumVuTTKjILoufGecjrjA1Skb4lWGfV21A51Fs9TcMLPiQYZ1b -e2J2Trtd0CsteQj4BDrbgiSxahJBaj+4PfXM1tef51DJs+gEg16DGxdzFBtlY+ih -SwOX6YcUyxYzYX2szafPpVRuQqU0B63FkvBbsNMX1KamtAsLtvf/JxYpPY9eg5t/ -b5L6pXQkp6bK3q8Gv1WApjD8tcwqBkcJrbjgJ6gfW9h3zEbLmxkAv46sJodVLInL -SYrHgrQ7TRd29DybB6cPAQ== ------END CERTIFICATE----- diff --git a/tests/fixtures/client-nocn.key.insecure b/tests/fixtures/client-nocn.key.insecure deleted file mode 100644 index 87f20c64689..00000000000 --- a/tests/fixtures/client-nocn.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAqY6sh9n2Yf07fAuAaKdRcMAb5cA/o/o0b1WRZoJR86RjzHxE -pJ646uLfUSBgaYViZBcK1L9aUcMYIutSFOcisLX9aq+48kigwxPDskuHn0i4hZ2X -F2zsVaPK4uaLihQe8IbppeJP9nfKWniv8cEv5HooqpClGMV/CFvVshanFl7mCFS3 -HgC4BbtrJCCkLsUndpVb9JLmnSYjHIqoa+Xjkixm5ljn1Jr9fYBZJGmv6p7S0zif -VH7VsOGNxXoS6HJ+5t1KRkd9Qmk9qhR3KcdP9W4jnEwXg5duLBc/fpe8eId30sis -87K5utFi+fx0rB4EmS80KeVCjT3NUfnNv2nQOwIDAQABAoIBAECPnM4VhiUFgTLY -RkqS+wWNgJHYw+KyEGkcEcMQeBfnTkC8SH7OGOcG/7UqOMu1CCPISk17lu5u9K/H -HnfrEmBqy1VmF2vZj6z3x5oJ/FgAHpJx0OgQh2SMe2IuGo+23ZkEJc8N/xh/wEL2 -lTfeMVgz02wuq05lVNtf7FxlF7YCSaxxxDtQQTDR3BSq6l12tB81TQvAD+yh35Gs -1jGhPeKHWc1jny309vczpJq4eIK2xhE+MT8YZAiuHCLGOHUlBBpleo5knyMueVE/ -/Ezbz6eFiIFYpoHA3d3pv3Dy+5WVnhD0YDQPe+jCQrzxyFGDiN488JQ2tVeRM85b -q0naaZECgYEA1T8XWPqRkhjMy0vJxTVux+wdD3u9DIvgBfHxjBUS2xlZdOiLLmBD -CDVLKe+Twn0KiTb0eU+zNn4g1qnxLXmAH7xYWPLtqoI4mM0O89SWxr06ExplamHp -w5k5O3eJr0veKyCUqVbZRZsOQLi1zqEbaOqpA7TrsQOOT5io+0vVoV0CgYEAy407 -JRaGBTBNOPayBVFY+7PRsSRPtcjzbOHriCe4rDn8aIPPmzHyWEIL0pXk5I1eW978 -veC/2oZMsxO2vaKta1bSSOrNA8UJQ+t5Ipp6Fj6yAI5dMDcgOIctE8ctxDUfccQM -kS5DDw0W3zYMI7ixyOe6ydX4OAlcpZgqFpNIJncCgYEAuB1pAyIUXZeb+krNQsAH -jgWGcb/cUeDS408pxlDLnvAcFJxSzw+90HBzHRoE8X8UgbQ5ECSIDxyHLdA8s46b -2Mq9XM8h9H3Kb+NcbZm3NJBce/Hmbhtrwb2hdH6ZGgjfIU1YDX02yqo9fBP+pRDk -oYk5tEGY3ZS8YmzkOVQYduECgYACgnNAOc7dMYNCOIhpWF9oewcS0AfLjfayWPa2 -bwbv2KcsArQEjdEXFXlf10lDKBsJtu4WyTaUUyOO8adHH0JUGHXvQDXW3g8HL1gG -/TCUJaG8MAUmGwfiqof7vnDqAl2o4WnmQFPDU738coYjypsmhvTemCy/RB5ITF/4 -d0hkcQKBgAWpzCnPAh4tPWw1OGE2QSsbRR15hR+67BltiZ+nxJnDcXXS2i08QBkA -3VR0ywWsos+Sox6jm8LpH8RiKqZ5laUjHHUUuX1Tgfxn4EmHo6bBffw7k9vkY7xr -w5Nw/gMRevkRrDQ4Z66z2HspyCHfmdPzWX9zsaSc4nzNs7fw2/uf ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/gencert.json b/tests/fixtures/gencert.json deleted file mode 100644 index 3a88f68d9a9..00000000000 --- a/tests/fixtures/gencert.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - }, - "profiles": { - "client-only": { - "usages": [ - "signing", - "key encipherment", - "client auth" - ], - "expiry": "87600h" - }, - "server-only": { - "usages": [ - "signing", - "key encipherment", - "server auth" - ], - "expiry": "87600h" - } - } - } -} diff --git a/tests/fixtures/gencerts.sh b/tests/fixtures/gencerts.sh deleted file mode 100755 index e4226fca012..00000000000 --- a/tests/fixtures/gencerts.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -set -e - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - echo "use: go install -mod mod github.com/cloudflare/cfssl/cmd/cfssl github.com/cloudflare/cfssl/cmd/cfssljson" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt - -if which openssl >/dev/null; then - openssl x509 -in ca.crt -noout -text -fi - -# gencert [config_file.json] [cert-name] -function gencert { - cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - $1 | cfssljson --bare ./$2 - mv $2.pem $2.crt - mv $2-key.pem $2.key.insecure -} - -# generate DNS: localhost, IP: 127.0.0.1, CN: example.com certificates, with dual usage -gencert ./server-ca-csr.json server - -#generates certificate that only has the 'server auth' usage -gencert "--profile=server-only ./server-ca-csr.json" server-serverusage - -#generates certificate that only has the 'client auth' usage -gencert "--profile=client-only ./server-ca-csr.json" client-clientusage - -#generates certificate that does not contain CN, to be used for proxy -> server connections. -gencert ./client-ca-csr-nocn.json client-nocn - -# generate DNS: localhost, IP: 127.0.0.1, CN: example.com certificates (ECDSA) -gencert ./server-ca-csr-ecdsa.json server-ecdsa - -# generate IP: 127.0.0.1, CN: example.com certificates -gencert ./server-ca-csr-ip.json server-ip - -# generate IPv6: [::1], CN: example.com certificates -gencert ./server-ca-csr-ipv6.json server-ipv6 - -# generate DNS: localhost, IP: 127.0.0.1, CN: example2.com certificates -gencert ./server-ca-csr2.json server2 - -# generate DNS: localhost, IP: 127.0.0.1, CN: "" certificates -gencert ./server-ca-csr3.json server3 - -# generate wildcard certificates DNS: *.etcd.local -gencert ./server-ca-csr-wildcard.json server-wildcard - -# generate revoked certificates and crl -cfssl gencert --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json 2>revoked.stderr | cfssljson --bare ./server-revoked -mv server-revoked.pem server-revoked.crt -mv server-revoked-key.pem server-revoked.key.insecure -grep serial revoked.stderr | awk ' { print $9 } ' >revoke.txt -cfssl gencrl revoke.txt ca.crt ca-key.pem | base64 --decode >revoke.crl - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/fixtures/revoke.crl b/tests/fixtures/revoke.crl deleted file mode 100644 index 61862f41b91..00000000000 Binary files a/tests/fixtures/revoke.crl and /dev/null differ diff --git a/tests/fixtures/server-ca-csr-ecdsa.json b/tests/fixtures/server-ca-csr-ecdsa.json deleted file mode 100644 index c9c71f00abd..00000000000 --- a/tests/fixtures/server-ca-csr-ecdsa.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "ecdsa", - "size": 256 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/server-ca-csr-ip.json b/tests/fixtures/server-ca-csr-ip.json deleted file mode 100644 index 2b2c4350ba1..00000000000 --- a/tests/fixtures/server-ca-csr-ip.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "127.0.0.1" - ] -} diff --git a/tests/fixtures/server-ca-csr-ipv6.json b/tests/fixtures/server-ca-csr-ipv6.json deleted file mode 100644 index be8aac42fa5..00000000000 --- a/tests/fixtures/server-ca-csr-ipv6.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "::1" - ] -} diff --git a/tests/fixtures/server-ca-csr-wildcard.json b/tests/fixtures/server-ca-csr-wildcard.json deleted file mode 100644 index 43e35ae6e4f..00000000000 --- a/tests/fixtures/server-ca-csr-wildcard.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "*.etcd.local", - "etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/server-ca-csr.json b/tests/fixtures/server-ca-csr.json deleted file mode 100644 index 272cf841d2d..00000000000 --- a/tests/fixtures/server-ca-csr.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/server-ca-csr2.json b/tests/fixtures/server-ca-csr2.json deleted file mode 100644 index 0907ad23c9b..00000000000 --- a/tests/fixtures/server-ca-csr2.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example2.com", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/server-ca-csr3.json b/tests/fixtures/server-ca-csr3.json deleted file mode 100644 index 34425513934..00000000000 --- a/tests/fixtures/server-ca-csr3.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/fixtures/server-ecdsa.crt b/tests/fixtures/server-ecdsa.crt deleted file mode 100644 index 8900d7a91b5..00000000000 --- a/tests/fixtures/server-ecdsa.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDRzCCAi+gAwIBAgIUVE0fLzH6W4M2gJVJhmQdkQdKpO0wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjO -PQMBBwNCAAREhCklwbvzFozNPkr3Y5PGrQr1ygfL5Q+XhvPOTTEjEN/zwjw9L0Qa -jfhE8Md89qED0j8xHAKeQRrulgv/FWXXo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAd -BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNV -HQ4EFgQUXJTZKpg0EYo+wtiYmacwd1OSKfgwHwYDVR0jBBgwFoAUKSYr9O3ZGPVg -dM4AesDm/xzIvKAwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3 -DQEBCwUAA4IBAQBd3RuqNsDxYS/RRc9Df8gLaXn/QQhATx6s3+pKHYplIH9sGPCh -ybI4MpwLnuoqxew8dxy7oi/BBXPWSUuVznRV/vLKAIuULoKg2Eb06d17OmqOaakl -asGnJ7z9e6mxHPVDzjkORNlJShY4YOG0tUg4hC5/9Qxh6EGNUKtRC3x4Tm8Jl6me -uGLUjsQV7YhQNRDFECUQmKwolEbwXbAi2SN3I37CBFDFwDT/0BxtfGSn0ZiXRHze -k1dmg9V3r9UPcucb3Djoad/N5YClfFtX/ANC8bufkkdfQLQwIBCUwcPlGxrBAVoD -BoqpmQdpQ/yINKesAD/r5dF2SmUEhZhn6GSK ------END CERTIFICATE----- diff --git a/tests/fixtures/server-ecdsa.key.insecure b/tests/fixtures/server-ecdsa.key.insecure deleted file mode 100644 index 9f6b0f8a56b..00000000000 --- a/tests/fixtures/server-ecdsa.key.insecure +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIEmvbcwNyqDHWXBG2IHZffLme5Ti8oHYzaapBvwkRSWWoAoGCCqGSM49 -AwEHoUQDQgAERIQpJcG78xaMzT5K92OTxq0K9coHy+UPl4bzzk0xIxDf88I8PS9E -Go34RPDHfPahA9I/MRwCnkEa7pYL/xVl1w== ------END EC PRIVATE KEY----- diff --git a/tests/fixtures/server-ip.crt b/tests/fixtures/server-ip.crt deleted file mode 100644 index 84acc9c2b60..00000000000 --- a/tests/fixtures/server-ip.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEBzCCAu+gAwIBAgIUIqRH3sc1siaGkZVpWKSoDAIEMucwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDUTSITIyfyfyE+fdQGM8sali6fk4pFCpuODbljr/ywVI81 -/MrLGhX/zySgRHKKG5f23CWbImgtIsPbScKxNNQcvDRXmbsLtlH/o7Eoun/e0aGp -rzr0p0QNGGeKFfUBTnaB+Z7+V92oxjNAuyeMZstqJxjOWDGpCS+yFVvP/ElRsL2H -JVZHWOykwKdLznRUjlw//PcvJrNsO9DzYluZ6tDqlN5nyB6aW0h9ZkcCskGDo+1V -94tjh5rGTscREVIWxVxHgLMFlvaEJlz64pVgc8VWD6famaiqP5nC0WOx5BJtSrmF -3WH97DkfVcXWpqUbEAey6G4sLU0a/08iKoWbJmbvAgMBAAGjgZEwgY4wDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBQiwt5ZVBlE2nrYnp5z4R6ADUnCwTAfBgNVHSMEGDAW -gBQpJiv07dkY9WB0zgB6wOb/HMi8oDAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3 -DQEBCwUAA4IBAQAh5Jxw4TbDnQJMzj53KxEgNxbd++p7LMhZkN5X8jtuDe81rzeQ -CyvJlrLEVPKbXiQF6cFV3TOvrZY/PM8UoAcXv0noEtrlRrrjbk9e4My3Zu4O1IHB -MvfXOuF8JN5L3kCrcUcjhMrx8XTLyathSTQxG6TCh7X4+/vXufbZkkzg8nhtSSWB -t7hWYo3KA25TgjP4E68BpnddNe9ad2sMIpIM1ovhhW6v8Uzux+eKkBkeb2Cdmrhd -CEzK33WrDPCLsxa8hOByCWFj76qQ+Q5kgJvK3F7kBLWijhRKLhJBQzCJDCvx4yE1 -w/e+aG74m1tAID1XjTuCZIcTZTEHZi31ogVM ------END CERTIFICATE----- diff --git a/tests/fixtures/server-ip.key.insecure b/tests/fixtures/server-ip.key.insecure deleted file mode 100644 index 94bf1595f8a..00000000000 --- a/tests/fixtures/server-ip.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA1E0iEyMn8n8hPn3UBjPLGpYun5OKRQqbjg25Y6/8sFSPNfzK -yxoV/88koERyihuX9twlmyJoLSLD20nCsTTUHLw0V5m7C7ZR/6OxKLp/3tGhqa86 -9KdEDRhnihX1AU52gfme/lfdqMYzQLsnjGbLaicYzlgxqQkvshVbz/xJUbC9hyVW -R1jspMCnS850VI5cP/z3LyazbDvQ82JbmerQ6pTeZ8gemltIfWZHArJBg6PtVfeL -Y4eaxk7HERFSFsVcR4CzBZb2hCZc+uKVYHPFVg+n2pmoqj+ZwtFjseQSbUq5hd1h -/ew5H1XF1qalGxAHsuhuLC1NGv9PIiqFmyZm7wIDAQABAoIBAQDP8BSV5fM0guxO -xvOqd4RRMBPOXLYrVW5yvmJ8j1zSYKA8YrNGJvCxM3ROPXxqZQh807dJsXOT8d8f -o6k74+B1nKkvu/UGTbcWyn+0wqaH2Y+cIXN/OW1f3i1bhJIKi41rVNEzkWAb9LUy -i5z62ZwXBuA3Cw7o34SFyoG4vwQZK1efygUXGIKSHdwAW7mwPD3MLIuIJgDrj2P4 -2aLb1jcRyUKCY06QD7tsOH6pn59qkjVnoYMJl7B8DiCRFXjtt1yRZ5RU9sMT8PxV -Px29qIouHvKcLt5cNX/Z1VjoopTHCOvhKMmQzP0ubPp7/Ytu2tPQcc/8DoZ/3+aw -Zr+27SSRAoGBANva1d0wSR17QTjfcKLDup9+ERztyW2fxi7hGgKIqzPCY4E+vGTX -KC5eToNpyo89COa/Z3rHKzLlSYpDaQiB/kWqEm3HPEW2Yq1YHaI7nZsuBPUkMtH+ -xOBFyZUYG2aaqQiBuvvSJRxP5puAXGlIWAQp9qOLwtbQJ0gGRMhq4yutAoGBAPc0 -Y0xzPNpTkjcRGDN7srcZw12tqy5bpi/TW+Ynlfxg2OnO5e4W9TEgFxcsNeqAB1IB -QBd1QhVnpHFANIThX4XNQ59FJ1jOYuRwKWpjBNOol3YWhLlBwPrRLxJNxYXbZha4 -zafhrvv3VMatU9Tc+a4gnZ+ooSM1m/rTAQqfunCLAoGAIAHC4tm1u0IHY8U7u6Zt -E+0hhqmjin8ZNhf1VmsZKYbiP52nhbLBGccG/SC4qZPEKPuyj/BQ/K7evu9Dakaq -gu/YkPzRbIC56uyKG+U786yGcj3b3DCP7uqaB0ekLZLUivWACEs2teF3/Cl6yqUK -k0icrICbU/Sn01d+SgMtoV0CgYBzUx9YFRK4j/BQfEscCYMwZHZ+B30qnVsESMhA -sQsJuGy5dupRjqhIiL3884UbpyrDGQ47Y1q2/aj7pIZbz4BuvXnknbBjf7Um+SR5 -G0SvMaGnV44HlyNeX6RkF6AkeFxCEWjv/xtRNOt53HaVgZmBoHmoeFTkRihEdZew -yx+BTQKBgHMs/wLEaC0zLeYZV90s8cR+LqrRl1IkjEtpHg09ESDLX5IyobivSmbB -wOkkFI0N9KzcDjwj3qmPeyXiFIAOX5ToXAWM1tljUOfpniwxv69bzUkSZqwopI/0 -OK6gMIYt2GakcSIQrqHBuvGEKEM8I7Aa4QdbO55J4T6qYMqO4xyV ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server-ipv6.crt b/tests/fixtures/server-ipv6.crt deleted file mode 100644 index 444800cd833..00000000000 --- a/tests/fixtures/server-ipv6.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEEzCCAvugAwIBAgIUPVE8WTSDgzGco/kjCilmiddHMvwwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDmszDivhUchvYxLdlHvRonIlsQ9VRVO5C4iQwrTzVTjb9y -q0oXGx1uQGNZyjMMStsZ4Vgi7UqBaEe9z3DIQxe4rzEn8B9NqWS6VgGBI3N9mjiI -BBNzUdJOoaelFqFbIyzL6cxCndovL6hURTxJTLI80dM5pdhfddPckfXmjD6qrZ4k -9bhIyQX+TZViHs6il5HWMJi9XdEW+VCBZ+Zaqjb0vMbBh5mEZIpYCdz7WeoowRUl -kcP7AbFg/PzP6Tg5xe5sNmrSWZSB4QuGfTV9EMTFVA5WqI2Z+T388IOuh5DEw6NL -swHME4eMsCwbZYees7xZh0tERDZUeOJFAifNrd0tAgMBAAGjgZ0wgZowDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBRhyokLlyprk+9tLhouzyinfDZuHDAfBgNVHSMEGDAW -gBQpJiv07dkY9WB0zgB6wOb/HMi8oDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAA -AAABMA0GCSqGSIb3DQEBCwUAA4IBAQDBHcGP2z7UVPCh9Tj0M79mLPB76E9BtTNB -5cadSemk/itnGol4K5x+BILqRvQpKbUm7Yif4XVKtBiPnZothEg5mxcTGO5n3EVi -Y7KmeVZxUkPESQQVnM36ymG+jzSf00KeGhras71ddbAKZBVm+nsL3j1pLz+MGksV -m8xzIW/ilM/zL8ivlSy5XBu4JqJET9O5vs4RBYmzNNC9D2WxxNMm2bAxCd+1Kg82 -6TLAFGGla0e8fG39TMfLeQYqHf8FdmGqkhhVStjvgRPnVvEK6Uv6ZESZZBgOb97O -m4BnW5gp9abS3mTYdDMo+TzgcKSlTKbcRV3SHA/9Cjih8IkOdIKW ------END CERTIFICATE----- diff --git a/tests/fixtures/server-ipv6.key.insecure b/tests/fixtures/server-ipv6.key.insecure deleted file mode 100644 index 1181468fcf0..00000000000 --- a/tests/fixtures/server-ipv6.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA5rMw4r4VHIb2MS3ZR70aJyJbEPVUVTuQuIkMK081U42/cqtK -FxsdbkBjWcozDErbGeFYIu1KgWhHvc9wyEMXuK8xJ/AfTalkulYBgSNzfZo4iAQT -c1HSTqGnpRahWyMsy+nMQp3aLy+oVEU8SUyyPNHTOaXYX3XT3JH15ow+qq2eJPW4 -SMkF/k2VYh7OopeR1jCYvV3RFvlQgWfmWqo29LzGwYeZhGSKWAnc+1nqKMEVJZHD -+wGxYPz8z+k4OcXubDZq0lmUgeELhn01fRDExVQOVqiNmfk9/PCDroeQxMOjS7MB -zBOHjLAsG2WHnrO8WYdLREQ2VHjiRQInza3dLQIDAQABAoIBABAXMXKvJVPPCf7W -HtCFHPzbxZRCODaVp/tm+6VNqf+A5Hh///PqnTviW8uYccUKt4tvjzEoccji2BYi -ENC29UGZXolVkylchj0E4Kf8LAL3rbe26RBjBZMcbU/zax+rLWWvkeKXle8ymL// -8DuAkPHzBJOBwLyvwC4jNA53e6t1vKp/j5bpR0VZHFvsGSvGGx3P0l34DwHYjdL7 -+Q6jwaoIYDA8rcZHXzT/UCHa2dsk/m1OzFKUdyJVkZ0JIqRBhBmG7Z16T9xfoEZP -Ycv8TudQeH4sbiedwCYSmkfU+CtPzGWhZ4jjBegujJwrQJwF5TbRsqwHv3JOPE6K -hhJTs0ECgYEA8v2JMqFTwKhmh6EASI0YfLvrP1LNR6K6Hhdoe3/RYP2Qg12Cm7rv -7Eq1kpptu9eSnFRPdS16bTyRzTa1/eEPPjvTxCalnEOVdPSbkw6zr6QFwQh6HMrh -fbLQJy1jY/Gj5JkVLMK9l+iLLY9vJ5ZZPfJYdmlZ1USwbcsbF0u37hECgYEA8w0z -ZE34FsKWXdcMNG97OWYMqXWhOlLyxKtsWGUUEoTdK+PjDTDVQVe7KjjhOFehazD1 -mfDS1VGwBPHQgjxBDqreBc0HvA4o1B+Js0SiQVbhxkvyDHHnxI5MeUROPCeGljzn -lPM9BnrX1KurSNS+j9YwtUiM4TLcMMdBXdv5UV0CgYEAgudHRDlZD08pfSOlLXCl -onzyLPkEkfT+YzulE/M17xRrB/oWZKL+ocNVshbzyuBFoWZiL/RCIhshSPaScKUQ -Oyyr1t4jFd3q5Ejqjvy6nIK2ftl8P4qkk70DGjf/dVY2Pu6hU63NycqDQBYngaIj -jZXDRndW5+fLTDrA63nlKqECgYBxTj4fDJoTQjOHG7F84Fu5rnFIrqWy4uh59tBT -hQuOdpIE3AAFLja8d4GxdULJWVDO/8v/L92ZxLMiGvjxPdW2WMGYQrTQXml6Ohmf -kOdzPmWSY+U7F/7MCuprvgQa1vJPJ6VuMtbIJoxngIAhO8x6kYeze1bxxRwRQVKf -xuS7oQKBgQDrsuXe9dKZU4lLTO+mPCHZPgSwY+3rFvpAFjoM9YT2/H0EwBrBubtn -NYyN3ih9SyaJdO6RKSEn822sz1Vc8A0xqpjyhUWDxyRCtc+GyVKr2WIC0DraGUjn -flBpRAox7c3DiPQLQXV9WxFVscM1WaNOVEuuHwmUmaM6UXJRXT7jOQ== ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server-revoked.crt b/tests/fixtures/server-revoked.crt deleted file mode 100644 index 899453e7ec1..00000000000 --- a/tests/fixtures/server-revoked.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEEjCCAvqgAwIBAgIUSjz5+dzCGgG0oO+u91Rqruj70ZQwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDs90tJyp+StNGHa6spxT4Eeuok40ghqxDwukfOqCe7rIJj -4pOYD2Tk6kFIlCJr+ug1fihWtTVVo0ZKUb4pwhXVDX82HiCyLd7OV7cfEKtnec3q -dUoJ/SR2iyjnVvRubL5szw9hx+XpRC1jo7HweBEFtCBPHPYdM67k9L6yfOoSkqCF -0/aqwbmJop0QSZa4RPEhkvyDLPaNtPnqBcoEF6SRxMSlJl1lVORIsGR5Q4/eZ62k -2ZMy8IwSqFeDtAWSSl9Gqi6cD7WWZsSqYWV06g/8mRj2zRubJuuaZx2a6QMv+Cpz -e32EpX2uZjpT2emCpRFDsy2RBWwLDJrk2+W7Piw5AgMBAAGjgZwwgZkwDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBTEZsOjdDCzMXxW7aVXJsWd8Wii/DAfBgNVHSMEGDAW -gBQpJiv07dkY9WB0zgB6wOb/HMi8oDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A -AAEwDQYJKoZIhvcNAQELBQADggEBAGKuQ05hpgwhR2Rdd82mHFc9ItnlW/G88V/p -49TuPpDWJwSidOTvEkzUdwVPtIWoZp7GNB8/ZkHIf6t69UxEiFqltkDCIo/VJimk -2Zk5cgPKgdBaZaufwAzSZWfpnX9k7IIi+gVjGBhYqw8a159AdP75kNjj4oYjcYAE -8FS0K6srsZVf2ER625psFsJG8ZVOjJOqs7fk32aAlCXSwCnOovf9qVlJA40nWi1u -pPIA3vW8JZ1UaYB3GKFkzdDaIGm9R7STEMx4I1gba3UewzN+a/h2Y7gQAWHs9VuR -/zTcENPQuLafN/+DNDVO/DmCgNzO9H1cdvlyUoh5VaKhmpRrw2M= ------END CERTIFICATE----- diff --git a/tests/fixtures/server-revoked.key.insecure b/tests/fixtures/server-revoked.key.insecure deleted file mode 100644 index 5060bf605ce..00000000000 --- a/tests/fixtures/server-revoked.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA7PdLScqfkrTRh2urKcU+BHrqJONIIasQ8LpHzqgnu6yCY+KT -mA9k5OpBSJQia/roNX4oVrU1VaNGSlG+KcIV1Q1/Nh4gsi3ezle3HxCrZ3nN6nVK -Cf0kdoso51b0bmy+bM8PYcfl6UQtY6Ox8HgRBbQgTxz2HTOu5PS+snzqEpKghdP2 -qsG5iaKdEEmWuETxIZL8gyz2jbT56gXKBBekkcTEpSZdZVTkSLBkeUOP3metpNmT -MvCMEqhXg7QFkkpfRqounA+1lmbEqmFldOoP/JkY9s0bmybrmmcdmukDL/gqc3t9 -hKV9rmY6U9npgqURQ7MtkQVsCwya5Nvluz4sOQIDAQABAoIBAHcRIxFm8Jtko8up -vA13AFx77l6unTXdoNt0nlQmhiB04+eQl5zWT1n+ouL3G/ypzDfkthwrXSs0qUL6 -o9SToyi0aXEl3kPpbIS96lN/qsCJoX/ng1ZVjhbKgbkMJjG+DkjaGd6F9O4qxavF -OsmbauI0ye82nCu8JmsA1zkULwE5GENavHsAnHx7xKLZROWlhO1RH22S6TK78cjJ -Hu0wBoUZQFDAErN1GqM6o3VnL/GGCS1hAqvs4BR8YJPxukIjSL3zG93QH8fy2jXG -suRNgczwfW0f5qxn9CDBA68vIPWsFWYpGgWIyEqNf3LllQ7bSApjXNX+PFGAxFjW -nQdzW2kCgYEA8ZsIBbRraUYrydvfPVPIJzbBWGim408MWY2bsW7Cy5o19VXr4UPA -llFJDAkXmJMDjHKkYu3rHcBrqu3/CvDe0YpqImdCz8CjP5dzy2oJMaHRRa6qNzjm -fbE5GR29ZEofxraaTSZCBicXXu/vpG7W3zqqlDc+sq6+zM+hrcX0p18CgYEA+xV/ -5YlSGp0sxq8HWzBh5syXy9LcGNRM81nrcXQ80/OoNFdba4uCoCa459aZCo2YYanJ -eH+8BzYDetgEcacNVzbJQum38StGB5R/y64NjZgZowIA/68V7qDbRFf1F59zZWpE -vgHa+KE+dCXK1PTUt/wqFIcrIajSyhMXAMr6S2cCgYBR+WPza4+2HFTnHG7WBAM5 -Kt7W/EsDfOKXz/Avd4EoS55bK1fpCm/hkJrUNpGG9vqRQKR93HOVmJ/vUujh8W/o -cKoqGhcVHitFfEGRltyftmOm3Ohr7CZoJyVUXD7SNEQry/D2lDB6nfDUCVyp0eGd -w+30c/oV7ixWmWwl5bBoyQKBgQDlGsERGTQpxLFOufbkZklu59C60zSyE0YD51DG -vWGjpPkeiXeJsksHB05Bfbc3wewBcYO8yBEyIz8ZoHKtodiyc/NBczG8hdfoor/Z -goAra1Y5P2LZ61D/5RcuTXP+kighqc3/8oFzzO3H3ZQurRhMqXNcN9pLZFiyuqiK -uKuakQKBgCXr164GYD3iq8LUNRZ/wHWnDEGvL9u+0ZKviC3SZA1wRdcn5m/GpmAP -fzV6NnPwoUkXA2ees5G2aYGuWxU2laqFFflqzuECs8A4XQUwGqoSs29lt5fFm1Y7 -sNxQwwyEU7u4n2pAGzFjILAIUmUGrw6dpiUtHFDEhxzuHIA1uHxR ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server-serverusage.crt b/tests/fixtures/server-serverusage.crt deleted file mode 100644 index b7aca3e5b86..00000000000 --- a/tests/fixtures/server-serverusage.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIECDCCAvCgAwIBAgIUDVfELNb4NV52PJjENU2DNOIFx6IwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDvx5mE6ZwV61si7QK18X+K6lJSWr8rE+0l4YbvbLh/bCdP -bnPzTsiK+FN4FA9fur73L/RvQDxO9XG99pKIhKPGxiit6DSJ9rL24RpK7tJpgnnK -Kdry6E+6/ZvVXBP3LtdJyX3kpdw+TSivlwi50CeEQA5argOXnyDNGIOWC8iMpeg7 -z3pE0DfpDEgLMjGE/I/9YHRCOiK/8kQchXqVfWHpALFakf9+QPNpNrEIPjV7IPja -GEUeDG9MI0PYbBl45NkKIi0nelV/Nay9kyPSPKfvngJU2dTqsGSVXW+DlZo7OJBh -RLEih+CWGfotGzFzXWdQRvD/VleaxYYUDKkyHVx1AgMBAAGjgZIwgY8wDgYDVR0P -AQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYD -VR0OBBYEFKdzgCE8K0tYt9LaF5N2aQrrHru0MB8GA1UdIwQYMBaAFCkmK/Tt2Rj1 -YHTOAHrA5v8cyLygMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG -9w0BAQsFAAOCAQEAYcD+1ebiJna7dfydgw/yox+b6/KO16evaU4c5Spu5O+VtcZG -rKMi8MT8V0U7kL9Xo9TszbzwpPWr1It0wcmM1cZrwykkT/baVJADaLtfSFUtlCDX -HNB04C2UUBPPosFr1d5YtwyN55qxgyMg+IDeMubYZ4qwDWCYBTIiz9yHoQ6LuuV8 -Tkkpa6X5n4+fO2iUgA6SZUkwZGdbQLOz9VMa1qgyOz3ejuDeMc4sa08iADs4wG/X -ohRGg0Df5THeXhR+Pn0HBf3T0eTAeZzLL5xtlIn9o6o9CEU573uEYQI1BG1kcDeQ -Rs9J/2iuLqr8GAjr7k5aUW3FFYRtqC3YR0G5Eg== ------END CERTIFICATE----- diff --git a/tests/fixtures/server-serverusage.key.insecure b/tests/fixtures/server-serverusage.key.insecure deleted file mode 100644 index a0ac946aa7e..00000000000 --- a/tests/fixtures/server-serverusage.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA78eZhOmcFetbIu0CtfF/iupSUlq/KxPtJeGG72y4f2wnT25z -807IivhTeBQPX7q+9y/0b0A8TvVxvfaSiISjxsYoreg0ifay9uEaSu7SaYJ5yina -8uhPuv2b1VwT9y7XScl95KXcPk0or5cIudAnhEAOWq4Dl58gzRiDlgvIjKXoO896 -RNA36QxICzIxhPyP/WB0Qjoiv/JEHIV6lX1h6QCxWpH/fkDzaTaxCD41eyD42hhF -HgxvTCND2GwZeOTZCiItJ3pVfzWsvZMj0jyn754CVNnU6rBklV1vg5WaOziQYUSx -Iofglhn6LRsxc11nUEbw/1ZXmsWGFAypMh1cdQIDAQABAoIBAQCnpgkisyuc78fy -7YAdslKY0Cjqx+QtvGrtN3he4sdE4FvD39hWX9k7wVCq/muZZTqsHe1r85+3HUl/ -pmzh4suX6Wj73wUNCV4r20vE5KJdfwqkXQtnFyLX/QX98blL9IY2YxkQyx7ouI4f -5xwEvxNCFn9yy4RbeLk4bVFjka2RF/x6qEUCHq5Q74vWvyC1i3kGKgYruM39RQw3 -D5fG8xdUexBc32nfzynP+0NcFAiy+yUQWOLcE4i8XaegFvg+QvWOx1iwjqU3FDeC -JzKrtw9SLBWf7AGraxA59K4WJ63xqGqFugWcFaYh923X8zES/s0wrtV2T14Lgj3Q -aWJ0DfQBAoGBAPNd1Aph0Z9PwZ1zivJprj8XDnFP2XIqlODYuiVddjbvIV13d6F/ -PE/ViW0MVduF0ejkPs9+hSxYOH58EWIt44Li/Nre1U42ny+fJrY1P5Mq5nriM4L4 -lx2YFaWzAoxzpMbbQ14kEMcQSicziDbBx62aaQYu4UwrvqXYdSYp+D+BAoGBAPw6 -Gtv6hytg19GtH6sQG9/4K4cLGX4lJE3pTL3eUicfoEI+hviZdG8FS77Uv05ga6fQ -OlyqvpmmXp6fgTrSlHBeKO75A3sT7fP1v1foq1y+CdMGytOnJENUc80bN0L1dFI1 -zwYm7eLDP0KdUYpf+Rpgcap4StQbotpc6oy705b1AoGBAO9z26VXd+ybifKE9Cru -Zp727aP6IAaf9RqCxCztl9oXUanoWVISoeIfRfeA0p2LPu06Xr7ESv5F01hIdMY4 -RonLE2W7KP+q6NfvbSSMogAIjvxLwslUFUPuFyaRSqmtQ2zR4qgnLkbfNUb7AkR2 -SCT9L+cAi3bp98ywfRvO4c6BAoGANkAJJudry1i5EtA5z4FXfYTTV+h7QzaZ6GgV -qYD4CpIy1gy82xumf3qUICeCPkle3mlbJDNVa5btIxELqqtAYiregwfsR7yxoZdp -4G6a7Qey9UCwv3Vjx1eS0LrZ1/0TV9ta++fDotJ7+Mf9kdWyromv6QqWjaikDnON -v1dm20ECgYEA6i+uvBuomUzqJYjUTNgMtmfYjuXv8D58wLkY10si7T2ayAlFKBG2 -Dno/dojOcixO1dtaq7dA+KNXtESekjT1Y4VleGHWpEglRScE629iow4ASrluP/Ec -F2DvTRW4daFDWQV4je1u0+wDj5B8KZjO/e759BztiRyRqTCzpxTa8Ms= ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server-wildcard.crt b/tests/fixtures/server-wildcard.crt deleted file mode 100644 index 156fd073202..00000000000 --- a/tests/fixtures/server-wildcard.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIELDCCAxSgAwIBAgIUYcN3lj3pyfwV4xtQPKH6l61/GHgwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDpVKzMA1J6nfusMHTkdLzTh25BPsP+LI9qVhGVBnT3Xik3 -yZ5QkbpH3kRRj7krRvydO09FccZ4807rX+pP2V7NNz9k9wuOatZl62/1uQFgtpC6 -IEj0Y22w4BI6RbmWsoXG8k0/vr3r3J+X/RdRI4Zj9qjDD8YE0SDapnmpTZ/s8GKY -fTkv+vadEYvRS2ZD1UivNlMqHjL+YUw24slTYY8vlzgBc9sB8nC4bnXKBbBDCvUd -iHBHqM5SFYzHEdViDRUT6SwBE2QmODsasYJRGUPhqabNFaxpP0xa6D/uQoQZ4UTi -4Ljw0N+j5wpbogJ/Pf8rfewTKobQSO0jnZi5nmwHAgMBAAGjgbYwgbMwDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBQxIm4m73svrwC7gqsRhW8wVL5CyjAfBgNVHSMEGDAW -gBQpJiv07dkY9WB0zgB6wOb/HMi8oDA0BgNVHREELTArggwqLmV0Y2QubG9jYWyC -CmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA -yDzgXQZHgP5uFuCCsoGGCSfOiUxEcFpr19al08mlL88Hw69QExEJK2wNmGoVY/v2 -fv58gnUvDzJ1+V2MTJt7NkT28d05pJ62ud6auH1I3SwREN1mizXAx23P9sftSGln -gdTFnyS/9U15BkRJSYe6w1Bm08GRsosVM5poKbYOol+Cx5WyVQHedf+GFoTSu5lB -16mKjYscvezeIgWCW/X8MGju8HbCOOV/SKPJ8MCK4fBxdESTuTsmM5rZj3n9ROY9 -mvAMcLEDa8xhRHkdls7pd/ZsypN/blbVMBskCldiLLRb6FwI6sarBzihofW4nwef -S8e6/xuD4RxpUMOcMBuGSg== ------END CERTIFICATE----- diff --git a/tests/fixtures/server-wildcard.key.insecure b/tests/fixtures/server-wildcard.key.insecure deleted file mode 100644 index 72e455365e4..00000000000 --- a/tests/fixtures/server-wildcard.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA6VSszANSep37rDB05HS804duQT7D/iyPalYRlQZ0914pN8me -UJG6R95EUY+5K0b8nTtPRXHGePNO61/qT9lezTc/ZPcLjmrWZetv9bkBYLaQuiBI -9GNtsOASOkW5lrKFxvJNP76969yfl/0XUSOGY/aoww/GBNEg2qZ5qU2f7PBimH05 -L/r2nRGL0UtmQ9VIrzZTKh4y/mFMNuLJU2GPL5c4AXPbAfJwuG51ygWwQwr1HYhw -R6jOUhWMxxHVYg0VE+ksARNkJjg7GrGCURlD4ammzRWsaT9MWug/7kKEGeFE4uC4 -8NDfo+cKW6ICfz3/K33sEyqG0EjtI52YuZ5sBwIDAQABAoIBAC3SKRTvWhUmTTQl -V+89VY+cuvQpJUgW7BsPx+giGnoxjZqdB2//Djvq1DPIK67qA9XEve5/R2CdN1RV -w6fmog1e2h4zvZs8M9pT/+qbaD/b2lQS3wDPPc1MU4gKBUYozMii8LSh+p4E93pb -g2a1uUCMQdv8jwCHKRKHOsEas1tOAaZheeauni0e/RIczsgfvGuzgBRO8c5PLmA5 -vh8MGxoKCvXQRrG+l3m77Ni0e6jj+p/3rA41Y1iNhj+jAgcsi5N1l6GsBKr5rFMp -w5pmfkLWnaN1a7Z25KWrVEsvSnxQpoIRXJt9mrnc5IztEtL0Yr49VQRWGSCzrXBr -xZrnyXECgYEA+6ClMRZCubvk4xZewVYGL4we3L3VpwbHkYgIA/wBttZYm2oBl09a -eERj87gC2Qzkpa8HcS20//KFxVNYzW1ExBqicndZfd4I8M06eMsjLnsDP1wLnxQZ -dyroxoqiEkIVWxautq/VrIO2dgAgVDHi2AJsdc0n7qnx9U026vqxx0UCgYEA7WKj -J7lvk/fyVZAe84VAG/kLDgmkcSD+kGf1vIie24dABiMjPxUb8cNrP1e5KAWpe40r -lUhPWroW0xXPrIhS+6PrcTA4/2ebJ4Qf0aAtDL3gHnB2E/BTuDb12PnkrZqg2jBK -YctXIfNIcNXYVP1y6Lq10WvwviBG2nniAPUKZNsCgYAJsdXLf10QxOF7slfyQPs6 -B78EqDe8GLHFtKUCakoyni2Jx1rKVp9YtOHY+QT7EdkZXRX/UVCA7/ohcSWhvI0C -tTf/CwQiqlRT2sRe9Qyk9M5aOZSlC2QzyC5xv9OgunUSLlyK41lrLSPxhe248LcZ -tXYyT7YzJs8QsWnlQcVptQKBgQDPwJ+hyHyKN1ly4Kr13Qx6br7qDi5Ig+PGZfV+ -huLgpcG2nVHfh43pTGm0CgYVrL7jTm1yPNKWSH5pRpF2IejeKluHt/hqLjZvowZl -45UJrbNTcIEmehILCq6msi0cclOMIO84H0mmgNBJUB4AY8AJRj6RhbIv8vePhVPy -GoJ6OQKBgFWdHMY6qm8avVslWekWH5zA4acqg1jUR4t/8HCqEFMrBhxCzWcJJhsH -kqzUxxZ0dceqaYlieG1CayImbONpqhlVtUpjZ1HhCQ3cO1qK8fX2raxdow2BWAaX -QxnME2lB3Uxn673IAF2HyjLKpKwupRpg5QUD7Zw5s7P7FdpeHLlE ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server.crt b/tests/fixtures/server.crt deleted file mode 100644 index 04c3d7b6f49..00000000000 --- a/tests/fixtures/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEEjCCAvqgAwIBAgIUPMH88Ekdi6JPq+5703+qGFH7VmMwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDD3vhO7LT77scWKnsozEg1DiQQsAbgGFfAoQJOvgrRv7V7 -I5+9n7hlpqKEIYkOuX0LSqpLBJ+9ORxXPBNZFKsytryOc3ZWTAoozUkufUOKfUFa -1QpExA5u/FpwtNXGRGC35wus/JVTtcIiifeml2PIdyoxdXfev6y4yJvhO38Osqru -GoySDORGsPrmLdpoUieofhmHWEgONpoY3fVsqAwiP1NMDNuqbVHvDjMykxj9AmQa -WBdspsRXcgcl5Dp7mf1KVPRbvnCOjLuDDiBCwTTgpU3sDFyhHm/Bq29lPwlHWi7Z -WKQYGIwqQOfOjfjhH009/+z11Z/1ovj+FWLbcXP9AgMBAAGjgZwwgZkwDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBT0sUW5xBildDtaySEQYE9vX+8SZDAfBgNVHSMEGDAW -gBQpJiv07dkY9WB0zgB6wOb/HMi8oDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A -AAEwDQYJKoZIhvcNAQELBQADggEBAATMlUra5N3Z4KB++xmGM6h9OhYmbKqn6IEV -o4mqrTimyzgtvVsHh/v3XvBzxAdma6QjkZygfg+EIHSLbJVmZzxzr3YeENu4EyDc -l+FfNPCyFHX9CH2Rk1ZThkQbmqVrzInXmG47G/PbTC2l8+kAZwvp37QfIJNCYIku -XTp9R72sEkfNXxZxsZjwM7Z++LaB+cVEuLNJG0OpMhouTuxoN6pemzBmmFBP2mOr -SeZnuEVtvDIbklJDdgcB/mPd7FE2xCVfa+p5Ol9Fcw5aPxTXAAQ+aVDtzh7jcFWk -SV4K/ZYFqYN4E4H1UXlkHKj/qCmryvPxe8DVzu6Pd0ZyA0H4Lxk= ------END CERTIFICATE----- diff --git a/tests/fixtures/server.key.insecure b/tests/fixtures/server.key.insecure deleted file mode 100644 index b52bdc79d0c..00000000000 --- a/tests/fixtures/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAw974Tuy0++7HFip7KMxINQ4kELAG4BhXwKECTr4K0b+1eyOf -vZ+4ZaaihCGJDrl9C0qqSwSfvTkcVzwTWRSrMra8jnN2VkwKKM1JLn1Din1BWtUK -RMQObvxacLTVxkRgt+cLrPyVU7XCIon3ppdjyHcqMXV33r+suMib4Tt/DrKq7hqM -kgzkRrD65i3aaFInqH4Zh1hIDjaaGN31bKgMIj9TTAzbqm1R7w4zMpMY/QJkGlgX -bKbEV3IHJeQ6e5n9SlT0W75wjoy7gw4gQsE04KVN7AxcoR5vwatvZT8JR1ou2Vik -GBiMKkDnzo344R9NPf/s9dWf9aL4/hVi23Fz/QIDAQABAoIBAQCKvee5UCYqxkoz -Q0gV8A29txSI1YcpOVT/V41g5XCYfmk4nlVKZlahelVnrrF8wpr2Yp8ZoF7eFBQl -HqK92Mwjkhkh9lt+aUJRAIiz63rqICspAfrSFuX6a7pMV2uNk2XHHlvA3vGPaBHp -kTzgvh+qIe67NfAA0liwUzlHY3NunpXW6UQm2OWtabOfc1zZ78E58It0VTaAWKZq -KDOxQGdwS4BGIUbsCvktPgncDDzi8wIZY/6YDTXkYKUJWqVEWf3SregXbX6/cjQa -x8v2v95LiMk0vp9E5GG9QKdTEC+fZyQsq983G9t4O9VPw2JBR2TUxIRrPH0gkvhQ -F1n3yYABAoGBAM1qHX1wQoHiOpVsgaXrAX2QWrBeNpbGAgVh67MsD+FlUH/ZikcK -dsz/pTg+TmUuURKadXWJx43E/IVb6Bw8io0uF30aXeWRSIDK99FAmx2GxxTBge5f -MtAQSTctr//yYLaNZWSdTpmaQYRtenK8zN6OTQ251M0sWyWsv3UcpBABAoGBAPQb -NFqaw8C8JgJtHa7wrYCU9ShNxzQzo/jgu5ZuO6CxKNYyFGR8ZOpWQx8vTrNkjDXN -iq9oZ3gIcm7c4TQOg3ydISNyoAEYrhRgQC6+rDkl4Zgby6ssvDHMqCclMp5Ztn3Y -bfXG2gk3ULLH8TkQ2KWRJrpPT3iSdvLVXmNHHaP9AoGADAxhVm4zOHMQhJssr5Kt -L7Q73YRpJ0bN74rizEuVUt8ibZ1Q4wHWHggQpM/iwUSKNNEiepZuQf5/4UKWxrE2 -XzmI3ymgwEpZOlStXHSxpHW3T5xaBqVG0bVi1f20CQsqaQq6G8CuT4wgs6fIOtqg -GZ23H0r7FF25qugLAs9/QAECgYBsi6ROHb+p9oAYWBj474DXSmVxVJSd+9CQHK6N -h9rv65czF/XFcSMWqOET/t9KGg3W5t0ifpRz5Z2s+n8RvNpvERfpQVEw656M5Pfl -UVgX2WZlUwbPyQauRkkHjxzhGRdzAkhzH8dYjcZOmWYEcB9GEDNeaWH3RXmrJYHh -N4BQqQKBgQCtdULs1ju0Y5zwSdkxhBWJHxCpLjuCpxfpWmnVFlp/Fj9S7rBfzWrt -hYjsNyfMUm13jg4+6CJNdf3pQbdczXWTt/E0t/uoo1m8Azcd1qFPKV8wVOSiZBXv -mMWHW8fw55ygoFyBlQpzwI6M+Zpwv3Smli/H9gQClsS2RmdIjUhqtg== ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server2.crt b/tests/fixtures/server2.crt deleted file mode 100644 index f9602ede541..00000000000 --- a/tests/fixtures/server2.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEEzCCAvugAwIBAgIUJFakdIrS8b6xWLAXMmEpn+awNIgwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMHkxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEVMBMGA1UEAxMMZXhhbXBsZTIuY29tMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA5QaGXSEZtAlH/JFbe6lZWl9WrbVJ2A1ieZroszfBF4u5 -+LidfM/E7t9GrTq35pb5Ie7V1kvoo547r0yNubQPgU/DNGDIV1CXe7tSrkMLGNuF -hVHr2IAnRJL9DZUFDqlkiimBz7m/rcHtwX+5v54YBh8L2A9p0Cd7EfgPEWBQBXBL -XbwOY3ZpdIn6jAtyAr3uME9RkH1veDomI/qlgD6t5/IrucFkhLSqzNYysMYhlUxG -JABPLWJI2qalY6f25o7JtbErjrDphosQdSLw187KvgDaNi++KnOk1HxRBWbR+3va -o3tr5YT8h0yK9ZlsAQwxwGELHtFBSmjfneoF54YuYwIDAQABo4GcMIGZMA4GA1Ud -DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T -AQH/BAIwADAdBgNVHQ4EFgQUzW7RhcPbhjbvEryk5QOQNSBdTPswHwYDVR0jBBgw -FoAUKSYr9O3ZGPVgdM4AesDm/xzIvKAwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/ -AAABMA0GCSqGSIb3DQEBCwUAA4IBAQARWyOreuBLCuo5W4bFFkgcUA7xFwSGqtgN -9LxYpLT1ADipdDvH9/XGA51plGwpNT8/99ipgMjhpGZTS1iK7pHwaBAdAqRRqT1k -MZ5X6KFeQOp0uKkWMJiyYaQmJR26tYDeWzwWxYD/VXI3fb5ZOmNRjDpx2uiqDTPX -kKY3U0vHGzbOWSRKEqTGUJ84yErSdPRK5gD48oqtUWczRbnFl6KV5pFb0Fq1Dhka -GwzBOWLuQTSxBU12LYAcKCLIweE21i6GJkxLDqtzI1RS7BRDsaE2cRWJV/6VqqFg -bs7FtNqLm0rdxdp7jnq9GCAXq5tFghTP56I9YkJLaYkdW7vjXkc9 ------END CERTIFICATE----- diff --git a/tests/fixtures/server2.key.insecure b/tests/fixtures/server2.key.insecure deleted file mode 100644 index e6e486b7cac..00000000000 --- a/tests/fixtures/server2.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA5QaGXSEZtAlH/JFbe6lZWl9WrbVJ2A1ieZroszfBF4u5+Lid -fM/E7t9GrTq35pb5Ie7V1kvoo547r0yNubQPgU/DNGDIV1CXe7tSrkMLGNuFhVHr -2IAnRJL9DZUFDqlkiimBz7m/rcHtwX+5v54YBh8L2A9p0Cd7EfgPEWBQBXBLXbwO -Y3ZpdIn6jAtyAr3uME9RkH1veDomI/qlgD6t5/IrucFkhLSqzNYysMYhlUxGJABP -LWJI2qalY6f25o7JtbErjrDphosQdSLw187KvgDaNi++KnOk1HxRBWbR+3vao3tr -5YT8h0yK9ZlsAQwxwGELHtFBSmjfneoF54YuYwIDAQABAoIBAQCPO6xutBPaJ+/Q -gqv/Q+NxBK02CGo9Z+mNehdMdnMZobZWWkeMVniomBUgo9d9rC/1S+SKmIDPS1ey -g6MjX/xOeC7yJBFHokyLApVsDNv02N3BioGArm1gkrkWdHtsNv589gaMfnPlXKKw -YIwvzdTihyomH0Wi+/4ZN9VcnaqOKwTmTFzuWZ8JdjRQZnXfdyL7opaWahG9P9UI -X2nhre73fxeF1OIO0lOeDtxM21/cZnzlclD5mj3OncXC+dqqqmxyMnynaZ2Wzlsb -CPZA9JaNPCMjWwJsd1xvKH2DcI2hGgSwvsho/mgsMWruHBHiSkIxwOgYYTRDrfvi -pUL1f+55AoGBAPBVR9A/mzRkVIcMIw4efP/DWJHkz68L40+MNsH5qwqGdcru4vpg -HEdi7LyHDn3j1ij9UgJTg9b2+6BX/KlQwXw9cG9Xao8Aqge2kAmjlUIwvsqhU9DN -ZrAIj+B2s4yksCXRqb3IqO3cFjoMPzw9FHV3NMojRMrmwOtiSUQtI9TtAoGBAPP0 -imIJdS2pUzoIo0ceByqn9xQPCLwGK/b5+QCOVDnZAbcPoilJnLn6sEQKoJs7P+ds -pvPRBf3CsUHJ0jrJvf5GOUjpSxTsJkOaan8pHULpSwcSToWUM/5eRxAwrKGMlmSM -iwNjoGh35gfc1eYZbElxVVCyCfz57jKs4jW3MXaPAoGAVNfGYl4SDIzeyk4ekf1x -Y1kzC04bg1BPDuYQ7qmVGEIfk2SB/KGxWgIyUNvc4dRs5kuHiAqzoE/QxOpK5/r6 -U0HdT3EszQ8O92obr0twhc1vjVkmna/lcH+VS0icWipJhRBfPAB6on3v2s44BKwL -bOyIVlPdFUQhFve7pbXJ0IECgYBHeVEF8iFrtF1W9mroDispWzavoMv9Uo2U+Z3z -hL+2hxbSjHkFQbTyZDk6Ziax9ET/x7yOWKI5u831KW03nh3VHrvv2bIOujVnvxkO -knwpO3Ko6rsotcgZ8YM+ghRB7I+ve+HKp2i60s4JZbEhjjdEuTi2wMLeZFdeb3qD -JF4QjwKBgQDiS7vic8gwLq3Txhcos8FJ7SfyjqAT0yHXk2Mmw3YedsjsyGTEqRTQ -LAvp/B4f2jJzc5OurFg0qJSomkQTru0EbVhFAnD5G8HjpcuvHVunamZLT7mlAsg/ -A1DZFvlU5GyiL4DdtRj4NemjiBRVVKRZlgGCmZxQabZHfuk9qVzTeQ== ------END RSA PRIVATE KEY----- diff --git a/tests/fixtures/server3.crt b/tests/fixtures/server3.crt deleted file mode 100644 index f7c080ccc8f..00000000000 --- a/tests/fixtures/server3.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID/DCCAuSgAwIBAgIUQZUvcjm8Dc/5ehERIxy496qys2kwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0yMTAyMjgxMDQ4MDBaFw0zMTAyMjYxMDQ4 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ4GPkb+yxbC -eKNxmv9OT+mpdDW55yfvPTQiVo4iU5pWl6/m91rbcpg0NGfOtZhFLpKnpEs/tjMY -0BRTGsL/szApZ6O/vdsS4D6kzSySuLP8a6eNEQcj5eL1g79yQZQUbs6iAnJXXV0Z -9v3YJ3UW1hU9VahcYb2/myIjIvP8El2ciXehwHENt+YZGMkP7DRYimCQBUYdQ7Of -sY3ojwarrqvjvd05VXgWcfloLujxCJVwwrUHXvvifdByVsU+DVCkZGuhIwO8FKa0 -YaJbp2b/PXHt9AnLNsphyy20Tu7SL2QF8lO59w9044mRvdQ9YT+E5jgUk6CCwwpf -yXVcbRUltbUCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHLNtdcN -WCzXJr5GFWZRQOX0rCMQMB8GA1UdIwQYMBaAFCkmK/Tt2Rj1YHTOAHrA5v8cyLyg -MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA -FCfeTDh1DzpNlcxbublMCbar8ZoWHXd7k5BmWJBqA6qmRPGa6FBZ+8J1jUs+7ZFo -h7Cd4cOfn77+VxGo9Rt5bZ4t/luZrMNGVDO52CD8eSR9+iZ1OMTtYod9cjuX24aQ -oXTGvuwVnSXBxqcettCBfNOCPT8oWIdDY6gTUzl3GjiFlX1Jl9nntCAqxO67rcDZ -KzX33NUewsOHuA6B7Plzhkw+WNUTHvrlrJBolWWYZWArwR9PrmmsmynXB7IXU9vA -HvFpOZ5vkqq2lV0+Sk4Rwg0EQX0Wf9LyEEDBcCNTOqVv+Dbj0+i+cNPz/T05vkA+ -gxtYmjZz+g7NIVBSMyqSQg== ------END CERTIFICATE----- diff --git a/tests/fixtures/server3.key.insecure b/tests/fixtures/server3.key.insecure deleted file mode 100644 index 8c2b2e52ff3..00000000000 --- a/tests/fixtures/server3.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEoQIBAAKCAQEAngY+Rv7LFsJ4o3Ga/05P6al0NbnnJ+89NCJWjiJTmlaXr+b3 -WttymDQ0Z861mEUukqekSz+2MxjQFFMawv+zMClno7+92xLgPqTNLJK4s/xrp40R -ByPl4vWDv3JBlBRuzqICclddXRn2/dgndRbWFT1VqFxhvb+bIiMi8/wSXZyJd6HA -cQ235hkYyQ/sNFiKYJAFRh1Ds5+xjeiPBquuq+O93TlVeBZx+Wgu6PEIlXDCtQde -++J90HJWxT4NUKRka6EjA7wUprRholunZv89ce30Ccs2ymHLLbRO7tIvZAXyU7n3 -D3TjiZG91D1hP4TmOBSToILDCl/JdVxtFSW1tQIDAQABAoIBAByHlAbNSW06fv1D -LXCaeuL8rPZmMc2L68jVyjqvB9j9eTVQxaeppu7DvhJfx3lORDJGAetz/TkMacTB -nDtIXtl7IDL4ExbSOZoVttUtSBt2nxkI5uIbIQ3wtXCC+EP7zGWR6k8qZrjAT09V -DwqcrNn40NYsl5jiVue64DycbdRofeE/GGyp55LkrIavI8ROPDHmXIDleHr/fnS+ -/3cwmRl0YWJjdFEuDsP8SIfH87UC5dlEq0q+6XV+Rm/sYWKnCMDERXLzIUfr6OcE -G/dC8OnjhkLfAykM9runu+QQJUtylqwVZbM71Gr0GxsjU0b+cWsR2H2gd0wChoJN -Au89+wECgYEAw3GZ7VwOn5d4bmYt4t7tVg3XXw1EAurG4mQeuLBxP+bThLCBydEL -e6Zg5xfJlJNqTAGEsTqqFB/36ecLNuf+nm0lSwssTchn0xU3/0kDAw+QOS/6b/zq -xxabtTa0GDOGMTu1aVbM6uKIziEgiPqOTYyJKaAA2ITSCmwiZ/5OxZkCgYEAzvyT -eNIiI/kzzfhadDqL6FVvd4D8A6x18YQySt2VTHQcn9f4p8QSD2DPRlgnYLACxFQV -XD34c6IeFVLM2/f0+ZRFW+Hf03AZ/ytoe2MSYTmYDtWMGjsHNF5xO3nkE3oQla1O -bKXBmJ0oTr4NIL5vZ3rV8FTIFYY05YfaMBi5Sn0CgYEAsQyzPZPcZ3SHE7Oas9/x -LrihNylESEQ44RODxRmJrjLDwHtJR/MIrP3+4Lnq0Z5td+cUNp0HP+3p3sl/nkCx -pwEG/KFlhB0c+NpK/Qc+JEKwCy5Md7CtWqc/bPzeTuI2GVmWsJOCVPHcrqbR22Tn -DpdWFhAtU/eWcvyceoqk/1kCgYBv9L3vg/lja89RgRur8l7qdAuun92wPwAsekyZ -ofC3QbaZ3r9oPu1l0/9JFTV3XrygZLqJAhv4r5+F+RtFf4DJ3iEF6c6fFut40YnZ -82RlojlVDLyTE4p6EPs+KFftEQEXdH4O1jk4ywiaTsHbDCZF2nMNY042FjlWTXz+ -tuDCIQJ/InGW7eqaTUGxOC7XmvtSyuhP93pzEXY5hm+WQSWnkQIKfM5c64fsbnps -+rJgOmykj+QyvEiVgoPdlbd3FDAgK9YR/3vYMsLzQ4OIZLx3b6/cvt1bn9VQg6Sl -rFTa9FlOGBzNF0MazAEGygml8AcGdAkz3ztrWr5qXhJXaSMDvg== ------END RSA PRIVATE KEY----- diff --git a/tests/framework/config/client.go b/tests/framework/config/client.go deleted file mode 100644 index e570c9c119f..00000000000 --- a/tests/framework/config/client.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "time" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -// ClientOption configures the client with additional parameter. -// For example, if Auth is enabled, the common test cases just need to -// use `WithAuth` to return a ClientOption. Note that the common `WithAuth` -// function calls `e2e.WithAuth` or `integration.WithAuth`, depending on the -// build tag (either "e2e" or "integration"). -type ClientOption func(any) - -type GetOptions struct { - Revision int - End string - CountOnly bool - Serializable bool - Prefix bool - FromKey bool - Limit int - Order clientv3.SortOrder - SortBy clientv3.SortTarget - Timeout time.Duration -} - -type PutOptions struct { - LeaseID clientv3.LeaseID -} - -type DeleteOptions struct { - Prefix bool - FromKey bool - End string -} - -type TxnOptions struct { - Interactive bool -} - -type CompactOption struct { - Physical bool - Timeout time.Duration -} - -type DefragOption struct { - Timeout time.Duration -} - -type LeaseOption struct { - WithAttachedKeys bool -} - -type UserAddOptions struct { - NoPassword bool -} - -type WatchOptions struct { - Prefix bool - Revision int64 - RangeEnd string -} diff --git a/tests/framework/config/cluster.go b/tests/framework/config/cluster.go deleted file mode 100644 index a8c23d7082c..00000000000 --- a/tests/framework/config/cluster.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "time" -) - -type TLSConfig string - -const ( - NoTLS TLSConfig = "" - AutoTLS TLSConfig = "auto-tls" - ManualTLS TLSConfig = "manual-tls" - - TickDuration = 10 * time.Millisecond -) - -type ClusterConfig struct { - ClusterSize int - PeerTLS TLSConfig - ClientTLS TLSConfig - QuotaBackendBytes int64 - StrictReconfigCheck bool - AuthToken string - SnapshotCount int - - // ClusterContext is used by "e2e" or "integration" to extend the - // ClusterConfig. The common test cases shouldn't care about what - // data is encoded or included; instead "e2e" or "integration" - // framework should decode or parse it separately. - ClusterContext any -} - -func DefaultClusterConfig() ClusterConfig { - return ClusterConfig{ - ClusterSize: 3, - StrictReconfigCheck: true, - } -} - -func NewClusterConfig(opts ...ClusterOption) ClusterConfig { - c := DefaultClusterConfig() - for _, opt := range opts { - opt(&c) - } - return c -} - -type ClusterOption func(*ClusterConfig) - -func WithClusterConfig(cfg ClusterConfig) ClusterOption { - return func(c *ClusterConfig) { *c = cfg } -} - -func WithClusterSize(size int) ClusterOption { - return func(c *ClusterConfig) { c.ClusterSize = size } -} - -func WithPeerTLS(tls TLSConfig) ClusterOption { - return func(c *ClusterConfig) { c.PeerTLS = tls } -} - -func WithClientTLS(tls TLSConfig) ClusterOption { - return func(c *ClusterConfig) { c.ClientTLS = tls } -} - -func WithQuotaBackendBytes(bytes int64) ClusterOption { - return func(c *ClusterConfig) { c.QuotaBackendBytes = bytes } -} - -func WithSnapshotCount(count int) ClusterOption { - return func(c *ClusterConfig) { c.SnapshotCount = count } -} - -func WithStrictReconfigCheck(strict bool) ClusterOption { - return func(c *ClusterConfig) { c.StrictReconfigCheck = strict } -} diff --git a/tests/framework/e2e/cluster.go b/tests/framework/e2e/cluster.go deleted file mode 100644 index c383ca0120d..00000000000 --- a/tests/framework/e2e/cluster.go +++ /dev/null @@ -1,947 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "errors" - "fmt" - "net/url" - "path" - "path/filepath" - "regexp" - "strings" - "testing" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/pkg/v3/proxy" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -const EtcdProcessBasePort = 20000 - -type ClientConnType int - -const ( - ClientNonTLS ClientConnType = iota - ClientTLS - ClientTLSAndNonTLS -) - -type ClientConfig struct { - ConnectionType ClientConnType - CertAuthority bool - AutoTLS bool - RevokeCerts bool -} - -// allow alphanumerics, underscores and dashes -var testNameCleanRegex = regexp.MustCompile(`[^a-zA-Z0-9 \-_]+`) - -func NewConfigNoTLS() *EtcdProcessClusterConfig { - return DefaultConfig() -} - -func NewConfigAutoTLS() *EtcdProcessClusterConfig { - return NewConfig( - WithIsPeerTLS(true), - WithIsPeerAutoTLS(true), - ) -} - -func NewConfigTLS() *EtcdProcessClusterConfig { - return NewConfig( - WithClientConnType(ClientTLS), - WithIsPeerTLS(true), - ) -} - -func NewConfigClientTLS() *EtcdProcessClusterConfig { - return NewConfig(WithClientConnType(ClientTLS)) -} - -func NewConfigClientAutoTLS() *EtcdProcessClusterConfig { - return NewConfig( - WithClusterSize(1), - WithClientAutoTLS(true), - WithClientConnType(ClientTLS), - ) -} - -func NewConfigPeerTLS() *EtcdProcessClusterConfig { - return NewConfig( - WithIsPeerTLS(true), - ) -} - -func NewConfigClientTLSCertAuth() *EtcdProcessClusterConfig { - return NewConfig( - WithClusterSize(1), - WithClientConnType(ClientTLS), - WithClientCertAuthority(true), - ) -} - -func NewConfigClientTLSCertAuthWithNoCN() *EtcdProcessClusterConfig { - return NewConfig( - WithClusterSize(1), - WithClientConnType(ClientTLS), - WithClientCertAuthority(true), - WithCN(false), - ) -} - -func NewConfigJWT() *EtcdProcessClusterConfig { - return NewConfig( - WithClusterSize(1), - WithAuthTokenOpts("jwt,pub-key="+path.Join(FixturesDir, "server.crt")+ - ",priv-key="+path.Join(FixturesDir, "server.key.insecure")+",sign-method=RS256,ttl=1s"), - ) -} - -func ConfigStandalone(cfg EtcdProcessClusterConfig) *EtcdProcessClusterConfig { - ret := cfg - ret.ClusterSize = 1 - return &ret -} - -type EtcdProcessCluster struct { - lg *zap.Logger - Cfg *EtcdProcessClusterConfig - Procs []EtcdProcess - nextSeq int // sequence number of the next etcd process (if it will be required) -} - -type EtcdProcessClusterConfig struct { - Logger *zap.Logger - Version ClusterVersion - // DataDirPath specifies the data-dir for the members. If test cases - // do not specify `DataDirPath`, then e2e framework creates a - // temporary directory for each member; otherwise, it creates a - // subdirectory (e.g. member-0, member-1 and member-2) under the given - // `DataDirPath` for each member. - DataDirPath string - KeepDataDir bool - EnvVars map[string]string - - ClusterSize int - - BaseScheme string - BasePort int - - MetricsURLScheme string - - SnapshotCount int // default is 100000 - SnapshotCatchUpEntries int // default is 5000 - - Client ClientConfig - IsPeerTLS bool - IsPeerAutoTLS bool - CN bool - - CipherSuites []string - - ForceNewCluster bool - InitialToken string - QuotaBackendBytes int64 - StrictReconfigCheck bool - EnableV2 bool - InitialCorruptCheck bool - AuthTokenOpts string - V2deprecation string - - RollingStart bool - - Discovery string // v2 discovery - - DiscoveryEndpoints []string // v3 discovery - DiscoveryToken string - LogLevel string - - MaxConcurrentStreams uint32 // default is math.MaxUint32 - CorruptCheckTime time.Duration - CompactHashCheckEnabled bool - CompactHashCheckTime time.Duration - GoFailEnabled bool - CompactionBatchLimit int - - WarningUnaryRequestDuration time.Duration - ExperimentalWarningUnaryRequestDuration time.Duration - PeerProxy bool -} - -func DefaultConfig() *EtcdProcessClusterConfig { - return &EtcdProcessClusterConfig{ - ClusterSize: 3, - InitialToken: "new", - StrictReconfigCheck: true, - CN: true, - } -} - -func NewConfig(opts ...EPClusterOption) *EtcdProcessClusterConfig { - c := DefaultConfig() - for _, opt := range opts { - opt(c) - } - return c -} - -type EPClusterOption func(*EtcdProcessClusterConfig) - -func WithConfig(cfg *EtcdProcessClusterConfig) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { *c = *cfg } -} - -func WithVersion(version ClusterVersion) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Version = version } -} - -func WithDataDirPath(path string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.DataDirPath = path } -} - -func WithKeepDataDir(keep bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.KeepDataDir = keep } -} - -func WithSnapshotCount(count int) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.SnapshotCount = count } -} - -func WithSnapshotCatchUpEntries(count int) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.SnapshotCatchUpEntries = count } -} - -func WithClusterSize(size int) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.ClusterSize = size } -} - -func WithBaseScheme(scheme string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.BaseScheme = scheme } -} - -func WithBasePort(port int) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.BasePort = port } -} - -func WithClientConnType(clientConnType ClientConnType) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Client.ConnectionType = clientConnType } -} - -func WithClientCertAuthority(enabled bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Client.CertAuthority = enabled } -} - -func WithIsPeerTLS(isPeerTLS bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.IsPeerTLS = isPeerTLS } -} - -func WithIsPeerAutoTLS(isPeerAutoTLS bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.IsPeerAutoTLS = isPeerAutoTLS } -} - -func WithClientAutoTLS(isClientAutoTLS bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Client.AutoTLS = isClientAutoTLS } -} - -func WithClientRevokeCerts(isClientCRL bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Client.RevokeCerts = isClientCRL } -} - -func WithCN(cn bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.CN = cn } -} - -func WithQuotaBackendBytes(bytes int64) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.QuotaBackendBytes = bytes } -} - -func WithStrictReconfigCheck(strict bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.StrictReconfigCheck = strict } -} - -func WithEnableV2(enable bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.EnableV2 = enable } -} - -func WithAuthTokenOpts(token string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.AuthTokenOpts = token } -} - -func WithRollingStart(rolling bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.RollingStart = rolling } -} - -func WithDiscovery(discovery string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.Discovery = discovery } -} - -func WithDiscoveryEndpoints(endpoints []string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.DiscoveryEndpoints = endpoints } -} - -func WithDiscoveryToken(token string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.DiscoveryToken = token } -} - -func WithLogLevel(level string) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.LogLevel = level } -} - -func WithCorruptCheckTime(time time.Duration) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.CorruptCheckTime = time } -} - -func WithCompactHashCheckEnabled(enabled bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.CompactHashCheckEnabled = enabled } -} - -func WithCompactHashCheckTime(time time.Duration) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.CompactHashCheckTime = time } -} - -func WithGoFailEnabled(enabled bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.GoFailEnabled = enabled } -} - -func WithWarningUnaryRequestDuration(time time.Duration) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.WarningUnaryRequestDuration = time } -} - -// WithExperimentalWarningUnaryRequestDuration sets a value for `-experimental-warning-unary-request-duration`. -// TODO(ahrtr): remove this function when the corresponding experimental flag is decommissioned. -func WithExperimentalWarningUnaryRequestDuration(time time.Duration) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.ExperimentalWarningUnaryRequestDuration = time } -} - -func WithCompactionBatchLimit(limit int) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.CompactionBatchLimit = limit } -} - -func WithPeerProxy(enabled bool) EPClusterOption { - return func(c *EtcdProcessClusterConfig) { c.PeerProxy = enabled } -} - -// NewEtcdProcessCluster launches a new cluster from etcd processes, returning -// a new EtcdProcessCluster once all nodes are ready to accept client requests. -func NewEtcdProcessCluster(ctx context.Context, t testing.TB, opts ...EPClusterOption) (*EtcdProcessCluster, error) { - cfg := NewConfig(opts...) - epc, err := InitEtcdProcessCluster(t, cfg) - if err != nil { - return nil, err - } - - return StartEtcdProcessCluster(ctx, epc, cfg) -} - -// InitEtcdProcessCluster initializes a new cluster based on the given config. -// It doesn't start the cluster. -func InitEtcdProcessCluster(t testing.TB, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) { - SkipInShortMode(t) - - if cfg.Logger == nil { - cfg.Logger = zaptest.NewLogger(t) - } - if cfg.BasePort == 0 { - cfg.BasePort = EtcdProcessBasePort - } - if cfg.SnapshotCount == 0 { - cfg.SnapshotCount = etcdserver.DefaultSnapshotCount - } - - etcdCfgs := cfg.EtcdAllServerProcessConfigs(t) - epc := &EtcdProcessCluster{ - Cfg: cfg, - lg: zaptest.NewLogger(t), - Procs: make([]EtcdProcess, cfg.ClusterSize), - nextSeq: cfg.ClusterSize, - } - - // launch etcd processes - for i := range etcdCfgs { - proc, err := NewEtcdProcess(etcdCfgs[i]) - if err != nil { - epc.Close() - return nil, fmt.Errorf("cannot configure: %v", err) - } - epc.Procs[i] = proc - } - - return epc, nil -} - -// StartEtcdProcessCluster launches a new cluster from etcd processes. -func StartEtcdProcessCluster(ctx context.Context, epc *EtcdProcessCluster, cfg *EtcdProcessClusterConfig) (*EtcdProcessCluster, error) { - if cfg.RollingStart { - if err := epc.RollingStart(ctx); err != nil { - return nil, fmt.Errorf("cannot rolling-start: %v", err) - } - } else { - if err := epc.Start(ctx); err != nil { - return nil, fmt.Errorf("cannot start: %v", err) - } - } - - return epc, nil -} - -func (cfg *EtcdProcessClusterConfig) ClientScheme() string { - if cfg.Client.ConnectionType == ClientTLS { - return "https" - } - return "http" -} - -func (cfg *EtcdProcessClusterConfig) PeerScheme() string { - peerScheme := cfg.BaseScheme - if peerScheme == "" { - peerScheme = "http" - } - if cfg.IsPeerTLS { - peerScheme += "s" - } - return peerScheme -} - -func (cfg *EtcdProcessClusterConfig) EtcdAllServerProcessConfigs(tb testing.TB) []*EtcdServerProcessConfig { - etcdCfgs := make([]*EtcdServerProcessConfig, cfg.ClusterSize) - initialCluster := make([]string, cfg.ClusterSize) - - for i := 0; i < cfg.ClusterSize; i++ { - etcdCfgs[i] = cfg.EtcdServerProcessConfig(tb, i) - initialCluster[i] = fmt.Sprintf("%s=%s", etcdCfgs[i].Name, etcdCfgs[i].PeerURL.String()) - } - - for i := range etcdCfgs { - cfg.SetInitialOrDiscovery(etcdCfgs[i], initialCluster, "new") - } - - return etcdCfgs -} - -func (cfg *EtcdProcessClusterConfig) SetInitialOrDiscovery(serverCfg *EtcdServerProcessConfig, initialCluster []string, initialClusterState string) { - if cfg.Discovery == "" && len(cfg.DiscoveryEndpoints) == 0 { - serverCfg.InitialCluster = strings.Join(initialCluster, ",") - serverCfg.Args = append(serverCfg.Args, "--initial-cluster", serverCfg.InitialCluster) - serverCfg.Args = append(serverCfg.Args, "--initial-cluster-state", initialClusterState) - } - - if len(cfg.DiscoveryEndpoints) > 0 { - serverCfg.Args = append(serverCfg.Args, fmt.Sprintf("--discovery-token=%s", cfg.DiscoveryToken)) - serverCfg.Args = append(serverCfg.Args, fmt.Sprintf("--discovery-endpoints=%s", strings.Join(cfg.DiscoveryEndpoints, ","))) - } -} - -func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfig(tb testing.TB, i int) *EtcdServerProcessConfig { - var curls []string - var curl, curltls string - port := cfg.BasePort + 5*i - clientPort := port - peerPort := port + 1 - metricsPort := port + 2 - peer2Port := port + 3 - - curlHost := fmt.Sprintf("localhost:%d", clientPort) - switch cfg.Client.ConnectionType { - case ClientNonTLS, ClientTLS: - curl = (&url.URL{Scheme: cfg.ClientScheme(), Host: curlHost}).String() - curls = []string{curl} - case ClientTLSAndNonTLS: - curl = (&url.URL{Scheme: "http", Host: curlHost}).String() - curltls = (&url.URL{Scheme: "https", Host: curlHost}).String() - curls = []string{curl, curltls} - } - - peerListenUrl := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", peerPort)} - peerAdvertiseUrl := url.URL{Scheme: cfg.PeerScheme(), Host: fmt.Sprintf("localhost:%d", peerPort)} - var proxyCfg *proxy.ServerConfig - if cfg.PeerProxy { - peerAdvertiseUrl.Host = fmt.Sprintf("localhost:%d", peer2Port) - proxyCfg = &proxy.ServerConfig{ - Logger: zap.NewNop(), - To: peerListenUrl, - From: peerAdvertiseUrl, - } - } - - name := fmt.Sprintf("%s-test-%d", testNameCleanRegex.ReplaceAllString(tb.Name(), ""), i) - - dataDirPath := cfg.DataDirPath - if cfg.DataDirPath == "" { - dataDirPath = tb.TempDir() - } else { - // When test cases specify the DataDirPath and there are more than - // one member in the cluster, we need to create a subdirectory for - // each member to avoid conflict. - // We also create a subdirectory for one-member cluster, because we - // support dynamically adding new member. - dataDirPath = filepath.Join(cfg.DataDirPath, fmt.Sprintf("member-%d", i)) - } - - args := []string{ - "--name", name, - "--listen-client-urls", strings.Join(curls, ","), - "--advertise-client-urls", strings.Join(curls, ","), - "--listen-peer-urls", peerListenUrl.String(), - "--initial-advertise-peer-urls", peerAdvertiseUrl.String(), - "--initial-cluster-token", cfg.InitialToken, - "--data-dir", dataDirPath, - "--snapshot-count", fmt.Sprintf("%d", cfg.SnapshotCount), - } - - if cfg.ForceNewCluster { - args = append(args, "--force-new-cluster") - } - if cfg.QuotaBackendBytes > 0 { - args = append(args, - "--quota-backend-bytes", fmt.Sprintf("%d", cfg.QuotaBackendBytes), - ) - } - if !cfg.StrictReconfigCheck { - args = append(args, "--strict-reconfig-check=false") - } - if cfg.EnableV2 { - args = append(args, "--enable-v2") - } - if cfg.InitialCorruptCheck { - args = append(args, "--experimental-initial-corrupt-check") - } - var murl string - if cfg.MetricsURLScheme != "" { - murl = (&url.URL{ - Scheme: cfg.MetricsURLScheme, - Host: fmt.Sprintf("localhost:%d", metricsPort), - }).String() - args = append(args, "--listen-metrics-urls", murl) - } - - args = append(args, cfg.TlsArgs()...) - - if cfg.AuthTokenOpts != "" { - args = append(args, "--auth-token", cfg.AuthTokenOpts) - } - - if cfg.V2deprecation != "" { - args = append(args, "--v2-deprecation", cfg.V2deprecation) - } - - if cfg.Discovery != "" { - args = append(args, "--discovery", cfg.Discovery) - } - - if cfg.LogLevel != "" { - args = append(args, "--log-level", cfg.LogLevel) - } - - if cfg.MaxConcurrentStreams != 0 { - args = append(args, "--max-concurrent-streams", fmt.Sprintf("%d", cfg.MaxConcurrentStreams)) - } - - if cfg.CorruptCheckTime != 0 { - args = append(args, "--experimental-corrupt-check-time", fmt.Sprintf("%s", cfg.CorruptCheckTime)) - } - if cfg.CompactHashCheckEnabled { - args = append(args, "--experimental-compact-hash-check-enabled") - } - if cfg.CompactHashCheckTime != 0 { - args = append(args, "--experimental-compact-hash-check-time", cfg.CompactHashCheckTime.String()) - } - if cfg.CompactionBatchLimit != 0 { - args = append(args, "--experimental-compaction-batch-limit", fmt.Sprintf("%d", cfg.CompactionBatchLimit)) - } - if cfg.WarningUnaryRequestDuration != 0 { - args = append(args, "--warning-unary-request-duration", cfg.WarningUnaryRequestDuration.String()) - } - if cfg.ExperimentalWarningUnaryRequestDuration != 0 { - args = append(args, "--experimental-warning-unary-request-duration", cfg.ExperimentalWarningUnaryRequestDuration.String()) - } - if cfg.SnapshotCatchUpEntries > 0 { - args = append(args, "--experimental-snapshot-catchup-entries", fmt.Sprintf("%d", cfg.SnapshotCatchUpEntries)) - } - envVars := map[string]string{} - for key, value := range cfg.EnvVars { - envVars[key] = value - } - var gofailPort int - if cfg.GoFailEnabled { - gofailPort = (i+1)*10000 + 2381 - envVars["GOFAIL_HTTP"] = fmt.Sprintf("127.0.0.1:%d", gofailPort) - } - - var execPath string - switch cfg.Version { - case CurrentVersion: - execPath = BinPath.Etcd - case MinorityLastVersion: - if i <= cfg.ClusterSize/2 { - execPath = BinPath.Etcd - } else { - execPath = BinPath.EtcdLastRelease - } - case QuorumLastVersion: - if i <= cfg.ClusterSize/2 { - execPath = BinPath.EtcdLastRelease - } else { - execPath = BinPath.Etcd - } - case LastVersion: - execPath = BinPath.EtcdLastRelease - default: - panic(fmt.Sprintf("Unknown cluster version %v", cfg.Version)) - } - - return &EtcdServerProcessConfig{ - lg: cfg.Logger, - ExecPath: execPath, - Args: args, - EnvVars: envVars, - TlsArgs: cfg.TlsArgs(), - Client: cfg.Client, - DataDirPath: dataDirPath, - KeepDataDir: cfg.KeepDataDir, - Name: name, - PeerURL: peerAdvertiseUrl, - ClientURL: curl, - MetricsURL: murl, - InitialToken: cfg.InitialToken, - GoFailPort: gofailPort, - Proxy: proxyCfg, - } -} - -func (cfg *EtcdProcessClusterConfig) TlsArgs() (args []string) { - if cfg.Client.ConnectionType != ClientNonTLS { - if cfg.Client.AutoTLS { - args = append(args, "--auto-tls") - } else { - tlsClientArgs := []string{ - "--cert-file", CertPath, - "--key-file", PrivateKeyPath, - "--trusted-ca-file", CaPath, - } - args = append(args, tlsClientArgs...) - - if cfg.Client.CertAuthority { - args = append(args, "--client-cert-auth") - } - } - } - - if cfg.IsPeerTLS { - if cfg.IsPeerAutoTLS { - args = append(args, "--peer-auto-tls") - } else { - tlsPeerArgs := []string{ - "--peer-cert-file", CertPath, - "--peer-key-file", PrivateKeyPath, - "--peer-trusted-ca-file", CaPath, - } - args = append(args, tlsPeerArgs...) - } - } - - if cfg.Client.RevokeCerts { - args = append(args, "--client-crl-file", CrlPath, "--client-cert-auth") - } - - if len(cfg.CipherSuites) > 0 { - args = append(args, "--cipher-suites", strings.Join(cfg.CipherSuites, ",")) - } - - return args -} - -func (epc *EtcdProcessCluster) EndpointsV2() []string { - return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV2() }) -} - -func (epc *EtcdProcessCluster) EndpointsV3() []string { - return epc.Endpoints(func(ep EtcdProcess) []string { return ep.EndpointsV3() }) -} - -func (epc *EtcdProcessCluster) Endpoints(f func(ep EtcdProcess) []string) (ret []string) { - for _, p := range epc.Procs { - ret = append(ret, f(p)...) - } - return ret -} - -func (epc *EtcdProcessCluster) CloseProc(ctx context.Context, finder func(EtcdProcess) bool, opts ...config.ClientOption) error { - procIndex := -1 - if finder != nil { - for i := range epc.Procs { - if finder(epc.Procs[i]) { - procIndex = i - break - } - } - } else { - procIndex = len(epc.Procs) - 1 - } - - if procIndex == -1 { - return fmt.Errorf("no process found to stop") - } - - proc := epc.Procs[procIndex] - epc.Procs = append(epc.Procs[:procIndex], epc.Procs[procIndex+1:]...) - - if proc == nil { - return nil - } - - // First remove member from the cluster - - memberCtl := epc.Client(opts...) - memberList, err := memberCtl.MemberList(ctx) - if err != nil { - return fmt.Errorf("failed to get member list: %w", err) - } - - memberID, err := findMemberIDByEndpoint(memberList.Members, proc.Config().ClientURL) - if err != nil { - return fmt.Errorf("failed to find member ID: %w", err) - } - - memberRemoved := false - for i := 0; i < 10; i++ { - _, err := memberCtl.MemberRemove(ctx, memberID) - if err != nil && strings.Contains(err.Error(), "member not found") { - memberRemoved = true - break - } - - time.Sleep(500 * time.Millisecond) - } - - if !memberRemoved { - return errors.New("failed to remove member after 10 tries") - } - - epc.lg.Info("successfully removed member", zap.String("acurl", proc.Config().ClientURL)) - - // Then stop process - return proc.Close() -} - -func (epc *EtcdProcessCluster) StartNewProc(ctx context.Context, cfg *EtcdProcessClusterConfig, tb testing.TB, opts ...config.ClientOption) error { - var serverCfg *EtcdServerProcessConfig - if cfg != nil { - serverCfg = cfg.EtcdServerProcessConfig(tb, epc.nextSeq) - } else { - serverCfg = epc.Cfg.EtcdServerProcessConfig(tb, epc.nextSeq) - } - - epc.nextSeq++ - - initialCluster := []string{ - fmt.Sprintf("%s=%s", serverCfg.Name, serverCfg.PeerURL.String()), - } - for _, p := range epc.Procs { - initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", p.Config().Name, p.Config().PeerURL.String())) - } - - epc.Cfg.SetInitialOrDiscovery(serverCfg, initialCluster, "existing") - - // First add new member to cluster - memberCtl := epc.Client(opts...) - _, err := memberCtl.MemberAdd(ctx, serverCfg.Name, []string{serverCfg.PeerURL.String()}) - if err != nil { - return fmt.Errorf("failed to add new member: %w", err) - } - - // Then start process - proc, err := NewEtcdProcess(serverCfg) - if err != nil { - epc.Close() - return fmt.Errorf("cannot configure: %v", err) - } - - epc.Procs = append(epc.Procs, proc) - - return proc.Start(ctx) -} - -func (epc *EtcdProcessCluster) Start(ctx context.Context) error { - return epc.start(func(ep EtcdProcess) error { return ep.Start(ctx) }) -} - -func (epc *EtcdProcessCluster) RollingStart(ctx context.Context) error { - return epc.rollingStart(func(ep EtcdProcess) error { return ep.Start(ctx) }) -} - -func (epc *EtcdProcessCluster) Restart(ctx context.Context) error { - return epc.start(func(ep EtcdProcess) error { return ep.Restart(ctx) }) -} - -func (epc *EtcdProcessCluster) start(f func(ep EtcdProcess) error) error { - readyC := make(chan error, len(epc.Procs)) - for i := range epc.Procs { - go func(n int) { readyC <- f(epc.Procs[n]) }(i) - } - for range epc.Procs { - if err := <-readyC; err != nil { - epc.Close() - return err - } - } - return nil -} - -func (epc *EtcdProcessCluster) rollingStart(f func(ep EtcdProcess) error) error { - readyC := make(chan error, len(epc.Procs)) - for i := range epc.Procs { - go func(n int) { readyC <- f(epc.Procs[n]) }(i) - // make sure the servers do not start at the same time - time.Sleep(time.Second) - } - for range epc.Procs { - if err := <-readyC; err != nil { - epc.Close() - return err - } - } - return nil -} - -func (epc *EtcdProcessCluster) Stop() (err error) { - errCh := make(chan error, len(epc.Procs)) - for i := range epc.Procs { - if epc.Procs[i] == nil { - errCh <- nil - continue - } - go func(n int) { errCh <- epc.Procs[n].Stop() }(i) - } - - for range epc.Procs { - if curErr := <-errCh; curErr != nil { - if err != nil { - err = fmt.Errorf("%v; %v", err, curErr) - } else { - err = curErr - } - } - } - close(errCh) - return err -} - -func (epc *EtcdProcessCluster) Client(opts ...config.ClientOption) *EtcdctlV3 { - etcdctl, err := NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3(), opts...) - if err != nil { - panic(err) - } - return etcdctl -} - -func (epc *EtcdProcessCluster) Close() error { - epc.lg.Info("closing test cluster...") - err := epc.Stop() - for _, p := range epc.Procs { - // p is nil when NewEtcdProcess fails in the middle - // Close still gets called to clean up test data - if p == nil { - continue - } - if cerr := p.Close(); cerr != nil { - err = cerr - } - } - epc.lg.Info("closed test cluster.") - return err -} - -func findMemberIDByEndpoint(members []*etcdserverpb.Member, endpoint string) (uint64, error) { - for _, m := range members { - if m.ClientURLs[0] == endpoint { - return m.ID, nil - } - } - - return 0, fmt.Errorf("member not found") -} - -// WaitLeader returns index of the member in c.Members() that is leader -// or fails the test (if not established in 30s). -func (epc *EtcdProcessCluster) WaitLeader(t testing.TB) int { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - return epc.WaitMembersForLeader(ctx, t, epc.Procs) -} - -// WaitMembersForLeader waits until given members agree on the same leader, -// and returns its 'index' in the 'membs' list -func (epc *EtcdProcessCluster) WaitMembersForLeader(ctx context.Context, t testing.TB, membs []EtcdProcess) int { - cc := epc.Client() - - // ensure leader is up via linearizable get - for { - select { - case <-ctx.Done(): - t.Fatal("WaitMembersForLeader timeout") - default: - } - _, err := cc.Get(ctx, "0", config.GetOptions{Timeout: 10*config.TickDuration + time.Second}) - if err == nil || strings.Contains(err.Error(), "Key not found") { - break - } - } - - leaders := make(map[uint64]struct{}) - members := make(map[uint64]int) - for { - select { - case <-ctx.Done(): - t.Fatal("WaitMembersForLeader timeout") - default: - } - for i := range membs { - resp, err := membs[i].Client().Status(ctx) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - // if member[i] has stopped - continue - } else { - t.Fatal(err) - } - } - members[resp[0].Header.MemberId] = i - leaders[resp[0].Leader] = struct{}{} - } - // members agree on the same leader - if len(leaders) == 1 { - break - } - leaders = make(map[uint64]struct{}) - members = make(map[uint64]int) - time.Sleep(10 * config.TickDuration) - } - for l := range leaders { - if index, ok := members[l]; ok { - t.Logf("members agree on a leader, members:%v , leader:%v", members, l) - return index - } - t.Fatalf("members agree on a leader which is not one of members, members:%v , leader:%v", members, l) - } - t.Fatal("impossible path of execution") - return -1 -} diff --git a/tests/framework/e2e/cluster_direct.go b/tests/framework/e2e/cluster_direct.go deleted file mode 100644 index ac659bd6bcc..00000000000 --- a/tests/framework/e2e/cluster_direct.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package e2e - -func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) { - return NewEtcdServerProcess(cfg) -} diff --git a/tests/framework/e2e/cluster_proxy.go b/tests/framework/e2e/cluster_proxy.go deleted file mode 100644 index bc6c4593cdf..00000000000 --- a/tests/framework/e2e/cluster_proxy.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build cluster_proxy - -package e2e - -import ( - "context" - "fmt" - "net" - "net/url" - "path" - "strconv" - "strings" - - "go.uber.org/zap" - - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/pkg/v3/proxy" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -type proxyEtcdProcess struct { - etcdProc EtcdProcess - // TODO(ahrtr): We need to remove `proxyV2` and v2discovery when the v2client is removed. - proxyV2 *proxyV2Proc - proxyV3 *proxyV3Proc -} - -func NewEtcdProcess(cfg *EtcdServerProcessConfig) (EtcdProcess, error) { - return NewProxyEtcdProcess(cfg) -} - -func NewProxyEtcdProcess(cfg *EtcdServerProcessConfig) (*proxyEtcdProcess, error) { - ep, err := NewEtcdServerProcess(cfg) - if err != nil { - return nil, err - } - pep := &proxyEtcdProcess{ - etcdProc: ep, - proxyV2: newProxyV2Proc(cfg), - proxyV3: newProxyV3Proc(cfg), - } - return pep, nil -} - -func (p *proxyEtcdProcess) Config() *EtcdServerProcessConfig { return p.etcdProc.Config() } - -func (p *proxyEtcdProcess) EndpointsV2() []string { return p.proxyV2.endpoints() } -func (p *proxyEtcdProcess) EndpointsV3() []string { return p.proxyV3.endpoints() } -func (p *proxyEtcdProcess) EndpointsMetrics() []string { - panic("not implemented; proxy doesn't provide health information") -} - -func (p *proxyEtcdProcess) Start(ctx context.Context) error { - if err := p.etcdProc.Start(ctx); err != nil { - return err - } - return p.proxyV3.Start(ctx) -} - -func (p *proxyEtcdProcess) Restart(ctx context.Context) error { - if err := p.etcdProc.Restart(ctx); err != nil { - return err - } - return p.proxyV3.Restart(ctx) -} - -func (p *proxyEtcdProcess) Stop() error { - err := p.proxyV3.Stop() - if eerr := p.etcdProc.Stop(); eerr != nil && err == nil { - // fails on go-grpc issue #1384 - if !strings.Contains(eerr.Error(), "exit status 2") { - err = eerr - } - } - return err -} - -func (p *proxyEtcdProcess) Close() error { - err := p.proxyV3.Close() - if eerr := p.etcdProc.Close(); eerr != nil && err == nil { - // fails on go-grpc issue #1384 - if !strings.Contains(eerr.Error(), "exit status 2") { - err = eerr - } - } - return err -} - -func (p *proxyEtcdProcess) Client(opts ...config.ClientOption) *EtcdctlV3 { - etcdctl, err := NewEtcdctl(p.etcdProc.Config().Client, p.etcdProc.EndpointsV3(), opts...) - if err != nil { - panic(err) - } - return etcdctl -} - -func (p *proxyEtcdProcess) Logs() LogsExpect { - return p.etcdProc.Logs() -} - -func (p *proxyEtcdProcess) Kill() error { - return p.etcdProc.Kill() -} - -func (p *proxyEtcdProcess) IsRunning() bool { - return p.etcdProc.IsRunning() -} - -func (p *proxyEtcdProcess) Wait(ctx context.Context) error { - return p.etcdProc.Wait(ctx) -} - -func (p *proxyEtcdProcess) PeerProxy() proxy.Server { - return nil -} - -func (p *proxyEtcdProcess) Failpoints() *BinaryFailpoints { - return p.etcdProc.Failpoints() -} - -type proxyProc struct { - lg *zap.Logger - name string - execPath string - args []string - ep string - murl string - donec chan struct{} - - proc *expect.ExpectProcess -} - -func (pp *proxyProc) endpoints() []string { return []string{pp.ep} } - -func (pp *proxyProc) start() error { - if pp.proc != nil { - panic("already started") - } - proc, err := SpawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil, pp.name) - if err != nil { - return err - } - pp.proc = proc - return nil -} - -func (pp *proxyProc) waitReady(ctx context.Context, readyStr string) error { - defer close(pp.donec) - return WaitReadyExpectProc(ctx, pp.proc, []string{readyStr}) -} - -func (pp *proxyProc) Stop() error { - if pp.proc == nil { - return nil - } - err := pp.proc.Stop() - if err != nil { - return err - } - - err = pp.proc.Close() - if err != nil { - // proxy received SIGTERM signal - if !(strings.Contains(err.Error(), "unexpected exit code") || - // v2proxy exits with status 1 on auto tls; not sure why - strings.Contains(err.Error(), "exit status 1")) { - - return err - } - } - pp.proc = nil - <-pp.donec - pp.donec = make(chan struct{}) - return nil -} - -func (pp *proxyProc) Close() error { return pp.Stop() } - -type proxyV2Proc struct { - proxyProc - dataDir string -} - -func proxyListenURL(cfg *EtcdServerProcessConfig, portOffset int) string { - u, err := url.Parse(cfg.ClientURL) - if err != nil { - panic(err) - } - host, port, _ := net.SplitHostPort(u.Host) - p, _ := strconv.ParseInt(port, 10, 16) - u.Host = fmt.Sprintf("%s:%d", host, int(p)+portOffset) - return u.String() -} - -func newProxyV2Proc(cfg *EtcdServerProcessConfig) *proxyV2Proc { - listenAddr := proxyListenURL(cfg, 2) - name := fmt.Sprintf("testname-proxy-%p", cfg) - dataDir := path.Join(cfg.DataDirPath, name+".etcd") - args := []string{ - "--name", name, - "--proxy", "on", - "--listen-client-urls", listenAddr, - "--initial-cluster", cfg.Name + "=" + cfg.PeerURL.String(), - "--data-dir", dataDir, - } - return &proxyV2Proc{ - proxyProc: proxyProc{ - name: cfg.Name, - lg: cfg.lg, - execPath: cfg.ExecPath, - args: append(args, cfg.TlsArgs...), - ep: listenAddr, - donec: make(chan struct{}), - }, - dataDir: dataDir, - } -} - -type proxyV3Proc struct { - proxyProc -} - -func newProxyV3Proc(cfg *EtcdServerProcessConfig) *proxyV3Proc { - listenAddr := proxyListenURL(cfg, 3) - args := []string{ - "grpc-proxy", - "start", - "--listen-addr", strings.Split(listenAddr, "/")[2], - "--endpoints", cfg.ClientURL, - // pass-through member RPCs - "--advertise-client-url", "", - "--data-dir", cfg.DataDirPath, - } - murl := "" - if cfg.MetricsURL != "" { - murl = proxyListenURL(cfg, 4) - args = append(args, "--metrics-addr", murl) - } - tlsArgs := []string{} - for i := 0; i < len(cfg.TlsArgs); i++ { - switch cfg.TlsArgs[i] { - case "--cert-file": - tlsArgs = append(tlsArgs, "--cert-file", cfg.TlsArgs[i+1]) - i++ - case "--key-file": - tlsArgs = append(tlsArgs, "--key-file", cfg.TlsArgs[i+1]) - i++ - case "--trusted-ca-file": - tlsArgs = append(tlsArgs, "--trusted-ca-file", cfg.TlsArgs[i+1]) - i++ - case "--auto-tls": - tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify") - case "--peer-trusted-ca-file", "--peer-cert-file", "--peer-key-file": - i++ // skip arg - case "--client-cert-auth", "--peer-auto-tls": - default: - tlsArgs = append(tlsArgs, cfg.TlsArgs[i]) - } - } - if len(cfg.TlsArgs) > 0 { - // Configure certificates for connection proxy ---> server. - // This certificate must NOT have CN set. - tlsArgs = append(tlsArgs, - "--cert", path.Join(FixturesDir, "client-nocn.crt"), - "--key", path.Join(FixturesDir, "client-nocn.key.insecure"), - "--cacert", path.Join(FixturesDir, "ca.crt"), - "--client-crl-file", path.Join(FixturesDir, "revoke.crl")) - } - - return &proxyV3Proc{ - proxyProc{ - name: cfg.Name, - lg: cfg.lg, - execPath: cfg.ExecPath, - args: append(args, tlsArgs...), - ep: listenAddr, - murl: murl, - donec: make(chan struct{}), - }, - } -} - -func (v3p *proxyV3Proc) Restart(ctx context.Context) error { - if err := v3p.Stop(); err != nil { - return err - } - return v3p.Start(ctx) -} - -func (v3p *proxyV3Proc) Start(ctx context.Context) error { - if err := v3p.start(); err != nil { - return err - } - return v3p.waitReady(ctx, "started gRPC proxy") -} diff --git a/tests/framework/e2e/config.go b/tests/framework/e2e/config.go deleted file mode 100644 index acc1d82e048..00000000000 --- a/tests/framework/e2e/config.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -type ClusterVersion string - -const ( - CurrentVersion ClusterVersion = "" - MinorityLastVersion ClusterVersion = "minority-last-version" - QuorumLastVersion ClusterVersion = "quorum-last-version" - LastVersion ClusterVersion = "last-version" -) - -type ClusterContext struct { - Version ClusterVersion -} diff --git a/tests/framework/e2e/curl.go b/tests/framework/e2e/curl.go deleted file mode 100644 index 20bf111eebf..00000000000 --- a/tests/framework/e2e/curl.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" -) - -type CURLReq struct { - Username string - Password string - - IsTLS bool - Timeout int - - Endpoint string - - Value string - Expected string - Header string - - MetricsURLScheme string - - Ciphers string -} - -func (r CURLReq) timeoutDuration() time.Duration { - if r.Timeout != 0 { - return time.Duration(r.Timeout) * time.Second - } - - // assume a sane default to finish a curl request - return 5 * time.Second -} - -// CURLPrefixArgs builds the beginning of a curl command for a given key -// addressed to a random URL in the given cluster. -func CURLPrefixArgs(cfg *EtcdProcessClusterConfig, member EtcdProcess, method string, req CURLReq) []string { - var ( - cmdArgs = []string{"curl"} - acurl = member.Config().ClientURL - ) - if req.MetricsURLScheme != "https" { - if req.IsTLS { - if cfg.Client.ConnectionType != ClientTLSAndNonTLS { - panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS") - } - cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath) - acurl = ToTLS(member.Config().ClientURL) - } else if cfg.Client.ConnectionType == ClientTLS { - if cfg.CN { - cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath, "--key", PrivateKeyPath) - } else { - cmdArgs = append(cmdArgs, "--cacert", CaPath, "--cert", CertPath3, "--key", PrivateKeyPath3) - } - } - } - if req.MetricsURLScheme != "" { - acurl = member.EndpointsMetrics()[0] - } - ep := acurl + req.Endpoint - - if req.Username != "" || req.Password != "" { - cmdArgs = append(cmdArgs, "-L", "-u", fmt.Sprintf("%s:%s", req.Username, req.Password), ep) - } else { - cmdArgs = append(cmdArgs, "-L", ep) - } - if req.Timeout != 0 { - cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.Timeout)) - } - - if req.Header != "" { - cmdArgs = append(cmdArgs, "-H", req.Header) - } - - if req.Ciphers != "" { - cmdArgs = append(cmdArgs, "--ciphers", req.Ciphers) - } - - switch method { - case "POST", "PUT": - dt := req.Value - if !strings.HasPrefix(dt, "{") { // for non-JSON value - dt = "value=" + dt - } - cmdArgs = append(cmdArgs, "-X", method, "-d", dt) - } - return cmdArgs -} - -func CURLPost(clus *EtcdProcessCluster, req CURLReq) error { - ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration()) - defer cancel() - return SpawnWithExpectsContext(ctx, CURLPrefixArgs(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "POST", req), nil, req.Expected) -} - -func CURLPut(clus *EtcdProcessCluster, req CURLReq) error { - ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration()) - defer cancel() - return SpawnWithExpectsContext(ctx, CURLPrefixArgs(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "PUT", req), nil, req.Expected) -} - -func CURLGet(clus *EtcdProcessCluster, req CURLReq) error { - ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration()) - defer cancel() - - return SpawnWithExpectsContext(ctx, CURLPrefixArgs(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "GET", req), nil, req.Expected) -} diff --git a/tests/framework/e2e/e2e.go b/tests/framework/e2e/e2e.go deleted file mode 100644 index 1c3b1830335..00000000000 --- a/tests/framework/e2e/e2e.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "os" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -type e2eRunner struct{} - -func NewE2eRunner() intf.TestRunner { - return &e2eRunner{} -} - -func (e e2eRunner) TestMain(m *testing.M) { - InitFlags() - v := m.Run() - if v == 0 && testutil.CheckLeakedGoroutine() { - os.Exit(1) - } - os.Exit(v) -} - -func (e e2eRunner) BeforeTest(t testing.TB) { - BeforeTest(t) -} - -func (e e2eRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster { - cfg := config.NewClusterConfig(opts...) - e2eConfig := NewConfig( - WithClusterSize(cfg.ClusterSize), - WithQuotaBackendBytes(cfg.QuotaBackendBytes), - WithStrictReconfigCheck(cfg.StrictReconfigCheck), - WithAuthTokenOpts(cfg.AuthToken), - WithSnapshotCount(cfg.SnapshotCount), - ) - - if cfg.ClusterContext != nil { - e2eClusterCtx := cfg.ClusterContext.(*ClusterContext) - e2eConfig.Version = e2eClusterCtx.Version - } - - switch cfg.ClientTLS { - case config.NoTLS: - e2eConfig.Client.ConnectionType = ClientNonTLS - case config.AutoTLS: - e2eConfig.Client.AutoTLS = true - e2eConfig.Client.ConnectionType = ClientTLS - case config.ManualTLS: - e2eConfig.Client.AutoTLS = false - e2eConfig.Client.ConnectionType = ClientTLS - default: - t.Fatalf("ClientTLS config %q not supported", cfg.ClientTLS) - } - switch cfg.PeerTLS { - case config.NoTLS: - e2eConfig.IsPeerTLS = false - e2eConfig.IsPeerAutoTLS = false - case config.AutoTLS: - e2eConfig.IsPeerTLS = true - e2eConfig.IsPeerAutoTLS = true - case config.ManualTLS: - e2eConfig.IsPeerTLS = true - e2eConfig.IsPeerAutoTLS = false - default: - t.Fatalf("PeerTLS config %q not supported", cfg.PeerTLS) - } - epc, err := NewEtcdProcessCluster(ctx, t, WithConfig(e2eConfig)) - if err != nil { - t.Fatalf("could not start etcd integrationCluster: %s", err) - } - return &e2eCluster{t, *epc} -} - -type e2eCluster struct { - t testing.TB - EtcdProcessCluster -} - -func (c *e2eCluster) Client(opts ...config.ClientOption) (intf.Client, error) { - etcdctl, err := NewEtcdctl(c.Cfg.Client, c.EndpointsV3(), opts...) - return e2eClient{etcdctl}, err -} - -func (c *e2eCluster) Endpoints() []string { - return c.EndpointsV3() -} - -func (c *e2eCluster) Members() (ms []intf.Member) { - for _, proc := range c.EtcdProcessCluster.Procs { - ms = append(ms, e2eMember{EtcdProcess: proc, Cfg: c.Cfg}) - } - return ms -} - -type e2eClient struct { - *EtcdctlV3 -} - -type e2eMember struct { - EtcdProcess - Cfg *EtcdProcessClusterConfig -} - -func (m e2eMember) Client() intf.Client { - etcdctl, err := NewEtcdctl(m.Cfg.Client, m.EndpointsV3()) - if err != nil { - panic(err) - } - return e2eClient{etcdctl} -} - -func (m e2eMember) Start(ctx context.Context) error { - return m.EtcdProcess.Start(ctx) -} - -func (m e2eMember) Stop() { - m.EtcdProcess.Stop() -} diff --git a/tests/framework/e2e/e2e_test.go b/tests/framework/e2e/e2e_test.go deleted file mode 100644 index 00059df81ba..00000000000 --- a/tests/framework/e2e/e2e_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "encoding/json" - "testing" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func Test_AddTxnResponse(t *testing.T) { - jsonData := `{"header":{"cluster_id":238453183653593855,"member_id":14578408409545168728,"revision":3,"raft_term":2},"succeeded":true,"responses":[{"Response":{"response_range":{"header":{"revision":3},"kvs":[{"key":"a2V5MQ==","create_revision":2,"mod_revision":2,"version":1,"value":"dmFsdWUx"}],"count":1}}},{"Response":{"response_range":{"header":{"revision":3},"kvs":[{"key":"a2V5Mg==","create_revision":3,"mod_revision":3,"version":1,"value":"dmFsdWUy"}],"count":1}}}]}` - var resp clientv3.TxnResponse - AddTxnResponse(&resp, jsonData) - err := json.Unmarshal([]byte(jsonData), &resp) - if err != nil { - t.Errorf("json Unmarshal failed. err: %s", err) - } - enc, err := json.Marshal(resp) - if err != nil { - t.Errorf("json Marshal failed. err: %s", err) - } - if string(enc) != jsonData { - t.Error("could not get original message after encoding") - } -} diff --git a/tests/framework/e2e/etcd_process.go b/tests/framework/e2e/etcd_process.go deleted file mode 100644 index 2703d777749..00000000000 --- a/tests/framework/e2e/etcd_process.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strings" - "syscall" - "testing" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/expect" - "go.etcd.io/etcd/pkg/v3/proxy" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -var ( - EtcdServerReadyLines = []string{"ready to serve client requests"} -) - -// EtcdProcess is a process that serves etcd requests. -type EtcdProcess interface { - EndpointsV2() []string - EndpointsV3() []string - EndpointsMetrics() []string - Client(opts ...config.ClientOption) *EtcdctlV3 - - IsRunning() bool - Wait(ctx context.Context) error - Start(ctx context.Context) error - Restart(ctx context.Context) error - Stop() error - Close() error - Config() *EtcdServerProcessConfig - PeerProxy() proxy.Server - Failpoints() *BinaryFailpoints - Logs() LogsExpect - Kill() error -} - -type LogsExpect interface { - ExpectWithContext(context.Context, string) (string, error) - Lines() []string - LineCount() int -} - -type EtcdServerProcess struct { - cfg *EtcdServerProcessConfig - proc *expect.ExpectProcess - proxy proxy.Server - failpoints *BinaryFailpoints - donec chan struct{} // closed when Interact() terminates -} - -type EtcdServerProcessConfig struct { - lg *zap.Logger - ExecPath string - Args []string - TlsArgs []string - EnvVars map[string]string - - Client ClientConfig - DataDirPath string - KeepDataDir bool - - Name string - - PeerURL url.URL - ClientURL string - MetricsURL string - - InitialToken string - InitialCluster string - GoFailPort int - - Proxy *proxy.ServerConfig -} - -func NewEtcdServerProcess(cfg *EtcdServerProcessConfig) (*EtcdServerProcess, error) { - if !fileutil.Exist(cfg.ExecPath) { - return nil, fmt.Errorf("could not find etcd binary: %s", cfg.ExecPath) - } - if !cfg.KeepDataDir { - if err := os.RemoveAll(cfg.DataDirPath); err != nil { - return nil, err - } - } - ep := &EtcdServerProcess{cfg: cfg, donec: make(chan struct{})} - if cfg.GoFailPort != 0 { - ep.failpoints = &BinaryFailpoints{member: ep} - } - return ep, nil -} - -func (ep *EtcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.ClientURL} } -func (ep *EtcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() } -func (ep *EtcdServerProcess) EndpointsMetrics() []string { return []string{ep.cfg.MetricsURL} } - -func (epc *EtcdServerProcess) Client(opts ...config.ClientOption) *EtcdctlV3 { - etcdctl, err := NewEtcdctl(epc.Config().Client, epc.EndpointsV3(), opts...) - if err != nil { - panic(err) - } - return etcdctl -} - -func (ep *EtcdServerProcess) Start(ctx context.Context) error { - ep.donec = make(chan struct{}) - if ep.proc != nil { - panic("already started") - } - if ep.cfg.Proxy != nil && ep.proxy == nil { - ep.cfg.lg.Info("starting proxy...", zap.String("name", ep.cfg.Name), zap.String("from", ep.cfg.Proxy.From.String()), zap.String("to", ep.cfg.Proxy.To.String())) - ep.proxy = proxy.NewServer(*ep.cfg.Proxy) - select { - case <-ep.proxy.Ready(): - case err := <-ep.proxy.Error(): - return err - } - } - ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.Name)) - proc, err := SpawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.ExecPath}, ep.cfg.Args...), ep.cfg.EnvVars, ep.cfg.Name) - if err != nil { - return err - } - ep.proc = proc - err = ep.waitReady(ctx) - if err == nil { - ep.cfg.lg.Info("started server.", zap.String("name", ep.cfg.Name), zap.Int("pid", ep.proc.Pid())) - } - return err -} - -func (ep *EtcdServerProcess) Restart(ctx context.Context) error { - ep.cfg.lg.Info("restarting server...", zap.String("name", ep.cfg.Name)) - if err := ep.Stop(); err != nil { - return err - } - err := ep.Start(ctx) - if err == nil { - ep.cfg.lg.Info("restarted server", zap.String("name", ep.cfg.Name)) - } - return err -} - -func (ep *EtcdServerProcess) Stop() (err error) { - ep.cfg.lg.Info("stopping server...", zap.String("name", ep.cfg.Name)) - if ep == nil || ep.proc == nil { - return nil - } - defer func() { - ep.proc = nil - }() - - err = ep.proc.Stop() - if err != nil { - return err - } - err = ep.proc.Close() - if err != nil && !strings.Contains(err.Error(), "unexpected exit code") { - return err - } - <-ep.donec - ep.donec = make(chan struct{}) - if ep.cfg.PeerURL.Scheme == "unix" || ep.cfg.PeerURL.Scheme == "unixs" { - err = os.Remove(ep.cfg.PeerURL.Host + ep.cfg.PeerURL.Path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - ep.cfg.lg.Info("stopped server.", zap.String("name", ep.cfg.Name)) - if ep.proxy != nil { - ep.cfg.lg.Info("stopping proxy...", zap.String("name", ep.cfg.Name)) - err := ep.proxy.Close() - ep.proxy = nil - if err != nil { - return err - } - } - return nil -} - -func (ep *EtcdServerProcess) Close() error { - ep.cfg.lg.Info("closing server...", zap.String("name", ep.cfg.Name)) - if err := ep.Stop(); err != nil { - return err - } - - if !ep.cfg.KeepDataDir { - ep.cfg.lg.Info("removing directory", zap.String("data-dir", ep.cfg.DataDirPath)) - return os.RemoveAll(ep.cfg.DataDirPath) - } - return nil -} - -func (ep *EtcdServerProcess) waitReady(ctx context.Context) error { - defer close(ep.donec) - return WaitReadyExpectProc(ctx, ep.proc, EtcdServerReadyLines) -} - -func (ep *EtcdServerProcess) Config() *EtcdServerProcessConfig { return ep.cfg } - -func (ep *EtcdServerProcess) Logs() LogsExpect { - if ep.proc == nil { - ep.cfg.lg.Panic("Please grab logs before process is stopped") - } - return ep.proc -} - -func (ep *EtcdServerProcess) Kill() error { - ep.cfg.lg.Info("killing server...", zap.String("name", ep.cfg.Name)) - return ep.proc.Signal(syscall.SIGKILL) -} - -func (ep *EtcdServerProcess) Wait(ctx context.Context) error { - ch := make(chan struct{}) - go func() { - defer close(ch) - if ep.proc != nil { - ep.proc.Wait() - ep.cfg.lg.Info("server exited", zap.String("name", ep.cfg.Name)) - } - }() - select { - case <-ch: - ep.proc = nil - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (ep *EtcdServerProcess) IsRunning() bool { - if ep.proc == nil { - return false - } - _, err := ep.proc.ExitCode() - if err == expect.ErrProcessRunning { - return true - } - ep.cfg.lg.Info("server exited", zap.String("name", ep.cfg.Name)) - ep.proc = nil - return false -} - -func AssertProcessLogs(t *testing.T, ep EtcdProcess, expectLog string) { - t.Helper() - var err error - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - _, err = ep.Logs().ExpectWithContext(ctx, expectLog) - if err != nil { - t.Fatal(err) - } -} - -func (ep *EtcdServerProcess) PeerProxy() proxy.Server { - return ep.proxy -} - -func (ep *EtcdServerProcess) Failpoints() *BinaryFailpoints { - return ep.failpoints -} - -type BinaryFailpoints struct { - member EtcdProcess - availableCache map[string]struct{} -} - -func (f *BinaryFailpoints) Setup(ctx context.Context, failpoint, payload string) error { - host := fmt.Sprintf("127.0.0.1:%d", f.member.Config().GoFailPort) - failpointUrl := url.URL{ - Scheme: "http", - Host: host, - Path: failpoint, - } - r, err := http.NewRequestWithContext(ctx, "PUT", failpointUrl.String(), bytes.NewBuffer([]byte(payload))) - if err != nil { - return err - } - resp, err := httpClient.Do(r) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusNoContent { - return fmt.Errorf("bad status code: %d", resp.StatusCode) - } - return nil -} - -var httpClient = http.Client{ - Timeout: 10 * time.Millisecond, -} - -func (f *BinaryFailpoints) Available() map[string]struct{} { - if f.availableCache == nil { - fs, err := fetchFailpoints(f.member) - if err != nil { - panic(err) - } - f.availableCache = fs - } - return f.availableCache -} - -func fetchFailpoints(member EtcdProcess) (map[string]struct{}, error) { - address := fmt.Sprintf("127.0.0.1:%d", member.Config().GoFailPort) - failpointUrl := url.URL{ - Scheme: "http", - Host: address, - } - resp, err := http.Get(failpointUrl.String()) - if err != nil { - return nil, err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - text := strings.ReplaceAll(string(body), "=", "") - failpoints := map[string]struct{}{} - for _, f := range strings.Split(text, "\n") { - failpoints[f] = struct{}{} - } - return failpoints, nil -} diff --git a/tests/framework/e2e/etcd_spawn.go b/tests/framework/e2e/etcd_spawn.go deleted file mode 100644 index ff2e2f4c790..00000000000 --- a/tests/framework/e2e/etcd_spawn.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "os" - "strings" - - "go.uber.org/zap" - - "go.etcd.io/etcd/pkg/v3/expect" -) - -var ( - initBinPath func(string) binPath - additionalArgs func() ([]string, error) -) - -func SpawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) { - return SpawnNamedCmd(strings.Join(args, "_"), args, envVars) -} - -func SpawnNamedCmd(processName string, args []string, envVars map[string]string) (*expect.ExpectProcess, error) { - return SpawnCmdWithLogger(zap.NewNop(), args, envVars, processName) -} - -func SpawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string, name string) (*expect.ExpectProcess, error) { - wd, err := os.Getwd() - if err != nil { - return nil, err - } - - newArgs, err := additionalArgs() - if err != nil { - return nil, err - } - env := mergeEnvVariables(envVars) - lg.Info("spawning process", - zap.Strings("args", args), - zap.String("working-dir", wd), - zap.String("name", name), - zap.Strings("environment-variables", env)) - return expect.NewExpectWithEnv(args[0], append(args[1:], newArgs...), env, name) -} diff --git a/tests/framework/e2e/etcd_spawn_cov.go b/tests/framework/e2e/etcd_spawn_cov.go deleted file mode 100644 index 05f4c006807..00000000000 --- a/tests/framework/e2e/etcd_spawn_cov.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build cov - -package e2e - -import ( - "fmt" - "os" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -const noOutputLineCount = 2 // cov-enabled binaries emit PASS and coverage count lines - -var ( - coverDir = testutils.MustAbsPath(os.Getenv("COVERDIR")) -) - -func init() { - initBinPath = initBinPathCov - additionalArgs = additionalArgsCov -} - -func initBinPathCov(binDir string) binPath { - return binPath{ - Etcd: binDir + "/etcd_test", - EtcdLastRelease: binDir + "/etcd-last-release", - Etcdctl: binDir + "/etcdctl_test", - Etcdutl: binDir + "/etcdutl_test", - } -} - -func additionalArgsCov() ([]string, error) { - if !fileutil.Exist(coverDir) { - return nil, fmt.Errorf("could not find coverage folder: %s", coverDir) - } - covArgs := []string{ - fmt.Sprintf("-test.coverprofile=e2e.%v.coverprofile", time.Now().UnixNano()), - "-test.outputdir=" + coverDir, - } - return covArgs, nil -} diff --git a/tests/framework/e2e/etcd_spawn_nocov.go b/tests/framework/e2e/etcd_spawn_nocov.go deleted file mode 100644 index 62b4ff6c820..00000000000 --- a/tests/framework/e2e/etcd_spawn_nocov.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cov - -package e2e - -const noOutputLineCount = 0 // regular binaries emit no extra lines - -func init() { - initBinPath = initBinPathNoCov - additionalArgs = additionalArgsNoCov -} - -func initBinPathNoCov(binDir string) binPath { - return binPath{ - Etcd: binDir + "/etcd", - EtcdLastRelease: binDir + "/etcd-last-release", - Etcdctl: binDir + "/etcdctl", - Etcdutl: binDir + "/etcdutl", - } -} - -func additionalArgsNoCov() ([]string, error) { - return []string{}, nil -} diff --git a/tests/framework/e2e/etcdctl.go b/tests/framework/e2e/etcdctl.go deleted file mode 100644 index f3e0f8112ee..00000000000 --- a/tests/framework/e2e/etcdctl.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" - "time" - - "google.golang.org/grpc" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -type EtcdctlV3 struct { - cfg ClientConfig - endpoints []string - authConfig clientv3.AuthConfig -} - -func NewEtcdctl(cfg ClientConfig, endpoints []string, opts ...config.ClientOption) (*EtcdctlV3, error) { - ctl := &EtcdctlV3{ - cfg: cfg, - endpoints: endpoints, - } - - for _, opt := range opts { - opt(ctl) - } - - if !ctl.authConfig.Empty() { - client, err := clientv3.New(clientv3.Config{ - Endpoints: ctl.endpoints, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - Username: ctl.authConfig.Username, - Password: ctl.authConfig.Password, - }) - if err != nil { - return nil, err - } - client.Close() - } - - return ctl, nil -} - -func WithAuth(userName, password string) config.ClientOption { - return func(c any) { - ctl := c.(*EtcdctlV3) - ctl.authConfig.Username = userName - ctl.authConfig.Password = password - } -} - -func WithEndpoints(endpoints []string) config.ClientOption { - return func(c any) { - ctl := c.(*EtcdctlV3) - ctl.endpoints = endpoints - } -} - -func (ctl *EtcdctlV3) DowngradeEnable(ctx context.Context, version string) error { - _, err := SpawnWithExpectLines(ctx, ctl.cmdArgs("downgrade", "enable", version), nil, "Downgrade enable success") - return err -} - -func (ctl *EtcdctlV3) Get(ctx context.Context, key string, o config.GetOptions) (*clientv3.GetResponse, error) { - resp := clientv3.GetResponse{} - var args []string - if o.Timeout != 0 { - args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout)) - } - if o.Serializable { - args = append(args, "--consistency", "s") - } - args = append(args, "get", key, "-w", "json") - if o.End != "" { - args = append(args, o.End) - } - if o.Revision != 0 { - args = append(args, fmt.Sprintf("--rev=%d", o.Revision)) - } - if o.Prefix { - args = append(args, "--prefix") - } - if o.Limit != 0 { - args = append(args, fmt.Sprintf("--limit=%d", o.Limit)) - } - if o.FromKey { - args = append(args, "--from-key") - } - if o.CountOnly { - args = append(args, "-w", "fields", "--count-only") - } else { - args = append(args, "-w", "json") - } - switch o.SortBy { - case clientv3.SortByCreateRevision: - args = append(args, "--sort-by=CREATE") - case clientv3.SortByModRevision: - args = append(args, "--sort-by=MODIFY") - case clientv3.SortByValue: - args = append(args, "--sort-by=VALUE") - case clientv3.SortByVersion: - args = append(args, "--sort-by=VERSION") - case clientv3.SortByKey: - // nothing - default: - return nil, fmt.Errorf("bad sort target %v", o.SortBy) - } - switch o.Order { - case clientv3.SortAscend: - args = append(args, "--order=ASCEND") - case clientv3.SortDescend: - args = append(args, "--order=DESCEND") - case clientv3.SortNone: - // nothing - default: - return nil, fmt.Errorf("bad sort order %v", o.Order) - } - if o.CountOnly { - cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil) - if err != nil { - return nil, err - } - defer cmd.Close() - _, err = cmd.ExpectWithContext(ctx, "Count") - return &resp, err - } - err := ctl.spawnJsonCmd(ctx, &resp, args...) - return &resp, err -} - -func (ctl *EtcdctlV3) Put(ctx context.Context, key, value string, opts config.PutOptions) error { - args := ctl.cmdArgs() - args = append(args, "put", key, value) - if opts.LeaseID != 0 { - args = append(args, "--lease", strconv.FormatInt(int64(opts.LeaseID), 16)) - } - _, err := SpawnWithExpectLines(ctx, args, nil, "OK") - return err -} - -func (ctl *EtcdctlV3) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) { - args := []string{"del", key} - if o.End != "" { - args = append(args, o.End) - } - if o.Prefix { - args = append(args, "--prefix") - } - if o.FromKey { - args = append(args, "--from-key") - } - var resp clientv3.DeleteResponse - err := ctl.spawnJsonCmd(ctx, &resp, args...) - return &resp, err -} - -func (ctl *EtcdctlV3) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) { - args := ctl.cmdArgs() - args = append(args, "txn") - if o.Interactive { - args = append(args, "--interactive") - } - args = append(args, "-w", "json", "--hex=true") - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - _, err = cmd.ExpectWithContext(ctx, "compares:") - if err != nil { - return nil, err - } - for _, cmp := range compares { - if err := cmd.Send(cmp + "\r"); err != nil { - return nil, err - } - } - if err := cmd.Send("\r"); err != nil { - return nil, err - } - _, err = cmd.ExpectWithContext(ctx, "success requests (get, put, del):") - if err != nil { - return nil, err - } - for _, req := range ifSucess { - if err = cmd.Send(req + "\r"); err != nil { - return nil, err - } - } - if err = cmd.Send("\r"); err != nil { - return nil, err - } - - _, err = cmd.ExpectWithContext(ctx, "failure requests (get, put, del):") - if err != nil { - return nil, err - } - for _, req := range ifFail { - if err = cmd.Send(req + "\r"); err != nil { - return nil, err - } - } - if err = cmd.Send("\r"); err != nil { - return nil, err - } - var line string - line, err = cmd.ExpectWithContext(ctx, "header") - if err != nil { - return nil, err - } - var resp clientv3.TxnResponse - AddTxnResponse(&resp, line) - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -// AddTxnResponse looks for ResponseOp json tags and adds the objects for json decoding -func AddTxnResponse(resp *clientv3.TxnResponse, jsonData string) { - if resp == nil { - return - } - if resp.Responses == nil { - resp.Responses = []*etcdserverpb.ResponseOp{} - } - jd := json.NewDecoder(strings.NewReader(jsonData)) - for { - t, e := jd.Token() - if e == io.EOF { - break - } - if t == "response_range" { - resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{ - Response: &etcdserverpb.ResponseOp_ResponseRange{}, - }) - } - if t == "response_put" { - resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{ - Response: &etcdserverpb.ResponseOp_ResponsePut{}, - }) - } - if t == "response_delete_range" { - resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{ - Response: &etcdserverpb.ResponseOp_ResponseDeleteRange{}, - }) - } - if t == "response_txn" { - resp.Responses = append(resp.Responses, &etcdserverpb.ResponseOp{ - Response: &etcdserverpb.ResponseOp_ResponseTxn{}, - }) - } - } -} - -func (ctl *EtcdctlV3) MemberList(ctx context.Context) (*clientv3.MemberListResponse, error) { - var resp clientv3.MemberListResponse - err := ctl.spawnJsonCmd(ctx, &resp, "member", "list") - return &resp, err -} - -func (ctl *EtcdctlV3) MemberAdd(ctx context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) { - var resp clientv3.MemberAddResponse - err := ctl.spawnJsonCmd(ctx, &resp, "member", "add", name, "--peer-urls", strings.Join(peerAddrs, ",")) - return &resp, err -} - -func (ctl *EtcdctlV3) MemberAddAsLearner(ctx context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) { - var resp clientv3.MemberAddResponse - err := ctl.spawnJsonCmd(ctx, &resp, "member", "add", name, "--learner", "--peer-urls", strings.Join(peerAddrs, ",")) - return &resp, err -} - -func (ctl *EtcdctlV3) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) { - var resp clientv3.MemberRemoveResponse - err := ctl.spawnJsonCmd(ctx, &resp, "member", "remove", fmt.Sprintf("%x", id)) - return &resp, err -} - -func (ctl *EtcdctlV3) cmdArgs(args ...string) []string { - cmdArgs := []string{BinPath.Etcdctl} - for k, v := range ctl.flags() { - cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v)) - } - return append(cmdArgs, args...) -} - -func (ctl *EtcdctlV3) flags() map[string]string { - fmap := make(map[string]string) - if ctl.cfg.ConnectionType == ClientTLS { - if ctl.cfg.AutoTLS { - fmap["insecure-transport"] = "false" - fmap["insecure-skip-tls-verify"] = "true" - } else if ctl.cfg.RevokeCerts { - fmap["cacert"] = CaPath - fmap["cert"] = RevokedCertPath - fmap["key"] = RevokedPrivateKeyPath - } else { - fmap["cacert"] = CaPath - fmap["cert"] = CertPath - fmap["key"] = PrivateKeyPath - } - } - fmap["endpoints"] = strings.Join(ctl.endpoints, ",") - if !ctl.authConfig.Empty() { - fmap["user"] = ctl.authConfig.Username + ":" + ctl.authConfig.Password - } - return fmap -} - -func (ctl *EtcdctlV3) Compact(ctx context.Context, rev int64, o config.CompactOption) (*clientv3.CompactResponse, error) { - args := ctl.cmdArgs("compact", fmt.Sprint(rev)) - if o.Timeout != 0 { - args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout)) - } - if o.Physical { - args = append(args, "--physical") - } - - _, err := SpawnWithExpectLines(ctx, args, nil, fmt.Sprintf("compacted revision %v", rev)) - return nil, err -} - -func (ctl *EtcdctlV3) Status(ctx context.Context) ([]*clientv3.StatusResponse, error) { - var epStatus []*struct { - Endpoint string - Status *clientv3.StatusResponse - } - err := ctl.spawnJsonCmd(ctx, &epStatus, "endpoint", "status") - if err != nil { - return nil, err - } - resp := make([]*clientv3.StatusResponse, len(epStatus)) - for i, e := range epStatus { - resp[i] = e.Status - } - return resp, err -} - -func (ctl *EtcdctlV3) HashKV(ctx context.Context, rev int64) ([]*clientv3.HashKVResponse, error) { - var epHashKVs []*struct { - Endpoint string - HashKV *clientv3.HashKVResponse - } - err := ctl.spawnJsonCmd(ctx, &epHashKVs, "endpoint", "hashkv", "--rev", fmt.Sprint(rev)) - if err != nil { - return nil, err - } - resp := make([]*clientv3.HashKVResponse, len(epHashKVs)) - for i, e := range epHashKVs { - resp[i] = e.HashKV - } - return resp, err -} - -func (ctl *EtcdctlV3) Health(ctx context.Context) error { - args := ctl.cmdArgs() - args = append(args, "endpoint", "health") - lines := make([]string, len(ctl.endpoints)) - for i := range lines { - lines[i] = "is healthy" - } - _, err := SpawnWithExpectLines(ctx, args, nil, lines...) - return err -} - -func (ctl *EtcdctlV3) Grant(ctx context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) { - args := ctl.cmdArgs() - args = append(args, "lease", "grant", strconv.FormatInt(ttl, 10), "-w", "json") - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - var resp clientv3.LeaseGrantResponse - line, err := cmd.ExpectWithContext(ctx, "ID") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { - args := ctl.cmdArgs() - args = append(args, "lease", "timetolive", strconv.FormatInt(int64(id), 16), "-w", "json") - if o.WithAttachedKeys { - args = append(args, "--keys") - } - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - var resp clientv3.LeaseTimeToLiveResponse - line, err := cmd.ExpectWithContext(ctx, "id") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) Defragment(ctx context.Context, o config.DefragOption) error { - args := append(ctl.cmdArgs(), "defrag") - if o.Timeout != 0 { - args = append(args, fmt.Sprintf("--command-timeout=%s", o.Timeout)) - } - lines := make([]string, len(ctl.endpoints)) - for i := range lines { - lines[i] = "Finished defragmenting etcd member" - } - _, err := SpawnWithExpectLines(ctx, args, map[string]string{}, lines...) - return err -} - -func (ctl *EtcdctlV3) Leases(ctx context.Context) (*clientv3.LeaseLeasesResponse, error) { - args := ctl.cmdArgs("lease", "list", "-w", "json") - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - var resp clientv3.LeaseLeasesResponse - line, err := cmd.ExpectWithContext(ctx, "id") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) KeepAliveOnce(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) { - args := ctl.cmdArgs("lease", "keep-alive", strconv.FormatInt(int64(id), 16), "--once", "-w", "json") - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - var resp clientv3.LeaseKeepAliveResponse - line, err := cmd.ExpectWithContext(ctx, "ID") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) { - var resp clientv3.LeaseRevokeResponse - err := ctl.spawnJsonCmd(ctx, &resp, "lease", "revoke", strconv.FormatInt(int64(id), 16)) - return &resp, err -} - -func (ctl *EtcdctlV3) AlarmList(ctx context.Context) (*clientv3.AlarmResponse, error) { - var resp clientv3.AlarmResponse - err := ctl.spawnJsonCmd(ctx, &resp, "alarm", "list") - return &resp, err -} - -func (ctl *EtcdctlV3) AlarmDisarm(ctx context.Context, _ *clientv3.AlarmMember) (*clientv3.AlarmResponse, error) { - args := ctl.cmdArgs() - args = append(args, "alarm", "disarm", "-w", "json") - ep, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer ep.Close() - var resp clientv3.AlarmResponse - line, err := ep.ExpectWithContext(ctx, "alarm") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) AuthEnable(ctx context.Context) error { - args := []string{"auth", "enable"} - cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil) - if err != nil { - return err - } - defer cmd.Close() - - _, err = cmd.ExpectWithContext(ctx, "Authentication Enabled") - return err -} - -func (ctl *EtcdctlV3) AuthDisable(ctx context.Context) error { - args := []string{"auth", "disable"} - cmd, err := SpawnCmd(ctl.cmdArgs(args...), nil) - if err != nil { - return err - } - defer cmd.Close() - - _, err = cmd.ExpectWithContext(ctx, "Authentication Disabled") - return err -} - -func (ctl *EtcdctlV3) AuthStatus(ctx context.Context) (*clientv3.AuthStatusResponse, error) { - var resp clientv3.AuthStatusResponse - err := ctl.spawnJsonCmd(ctx, &resp, "auth", "status") - return &resp, err -} - -func (ctl *EtcdctlV3) UserAdd(ctx context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) { - args := ctl.cmdArgs() - args = append(args, "user", "add") - if password == "" { - args = append(args, name) - } else { - args = append(args, fmt.Sprintf("%s:%s", name, password)) - } - - if opts.NoPassword { - args = append(args, "--no-password") - } - - args = append(args, "--interactive=false", "-w", "json") - - cmd, err := SpawnCmd(args, nil) - if err != nil { - return nil, err - } - defer cmd.Close() - - // If no password is provided, and NoPassword isn't set, the CLI will always - // wait for a password, send an enter in this case for an "empty" password. - if !opts.NoPassword && password == "" { - err := cmd.Send("\n") - if err != nil { - return nil, err - } - } - - var resp clientv3.AuthUserAddResponse - line, err := cmd.ExpectWithContext(ctx, "header") - if err != nil { - return nil, err - } - err = json.Unmarshal([]byte(line), &resp) - return &resp, err -} - -func (ctl *EtcdctlV3) UserGet(ctx context.Context, name string) (*clientv3.AuthUserGetResponse, error) { - var resp clientv3.AuthUserGetResponse - err := ctl.spawnJsonCmd(ctx, &resp, "user", "get", name) - return &resp, err -} - -func (ctl *EtcdctlV3) UserList(ctx context.Context) (*clientv3.AuthUserListResponse, error) { - var resp clientv3.AuthUserListResponse - err := ctl.spawnJsonCmd(ctx, &resp, "user", "list") - return &resp, err -} - -func (ctl *EtcdctlV3) UserDelete(ctx context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) { - var resp clientv3.AuthUserDeleteResponse - err := ctl.spawnJsonCmd(ctx, &resp, "user", "delete", name) - return &resp, err -} - -func (ctl *EtcdctlV3) UserChangePass(ctx context.Context, user, newPass string) error { - args := ctl.cmdArgs() - args = append(args, "user", "passwd", user, "--interactive=false") - cmd, err := SpawnCmd(args, nil) - if err != nil { - return err - } - defer cmd.Close() - err = cmd.Send(newPass + "\n") - if err != nil { - return err - } - - _, err = cmd.ExpectWithContext(ctx, "Password updated") - return err -} - -func (ctl *EtcdctlV3) UserGrantRole(ctx context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) { - var resp clientv3.AuthUserGrantRoleResponse - err := ctl.spawnJsonCmd(ctx, &resp, "user", "grant-role", user, role) - return &resp, err -} - -func (ctl *EtcdctlV3) UserRevokeRole(ctx context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) { - var resp clientv3.AuthUserRevokeRoleResponse - err := ctl.spawnJsonCmd(ctx, &resp, "user", "revoke-role", user, role) - return &resp, err -} - -func (ctl *EtcdctlV3) RoleAdd(ctx context.Context, name string) (*clientv3.AuthRoleAddResponse, error) { - var resp clientv3.AuthRoleAddResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "add", name) - return &resp, err -} - -func (ctl *EtcdctlV3) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) { - permissionType := authpb.Permission_Type_name[int32(permType)] - var resp clientv3.AuthRoleGrantPermissionResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "grant-permission", name, permissionType, key, rangeEnd) - return &resp, err -} - -func (ctl *EtcdctlV3) RoleGet(ctx context.Context, role string) (*clientv3.AuthRoleGetResponse, error) { - var resp clientv3.AuthRoleGetResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "get", role) - return &resp, err -} - -func (ctl *EtcdctlV3) RoleList(ctx context.Context) (*clientv3.AuthRoleListResponse, error) { - var resp clientv3.AuthRoleListResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "list") - return &resp, err -} - -func (ctl *EtcdctlV3) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) { - var resp clientv3.AuthRoleRevokePermissionResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "revoke-permission", role, key, rangeEnd) - return &resp, err -} - -func (ctl *EtcdctlV3) RoleDelete(ctx context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) { - var resp clientv3.AuthRoleDeleteResponse - err := ctl.spawnJsonCmd(ctx, &resp, "role", "delete", role) - return &resp, err -} - -func (ctl *EtcdctlV3) spawnJsonCmd(ctx context.Context, output interface{}, args ...string) error { - args = append(args, "-w", "json") - cmd, err := SpawnCmd(append(ctl.cmdArgs(), args...), nil) - if err != nil { - return err - } - defer cmd.Close() - line, err := cmd.ExpectWithContext(ctx, "header") - if err != nil { - return err - } - return json.Unmarshal([]byte(line), output) -} - -func (ctl *EtcdctlV3) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan { - args := ctl.cmdArgs() - args = append(args, "watch", key) - if opts.RangeEnd != "" { - args = append(args, opts.RangeEnd) - } - args = append(args, "-w", "json") - if opts.Prefix { - args = append(args, "--prefix") - } - if opts.Revision != 0 { - args = append(args, "--rev", fmt.Sprint(opts.Revision)) - } - proc, err := SpawnCmd(args, nil) - if err != nil { - return nil - } - - ch := make(chan clientv3.WatchResponse) - go func() { - defer proc.Stop() - for { - select { - case <-ctx.Done(): - close(ch) - return - default: - if line := proc.ReadLine(); line != "" { - var resp clientv3.WatchResponse - json.Unmarshal([]byte(line), &resp) - if resp.Canceled { - close(ch) - return - } - if len(resp.Events) > 0 { - ch <- resp - } - } - } - } - }() - - return ch -} diff --git a/tests/framework/e2e/flags.go b/tests/framework/e2e/flags.go deleted file mode 100644 index d51d2898034..00000000000 --- a/tests/framework/e2e/flags.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "flag" - "os" - "runtime" - - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -var ( - CertDir string - - CertPath string - PrivateKeyPath string - CaPath string - - CertPath2 string - PrivateKeyPath2 string - - CertPath3 string - PrivateKeyPath3 string - - CrlPath string - RevokedCertPath string - RevokedPrivateKeyPath string - - BinPath binPath - FixturesDir = testutils.MustAbsPath("../fixtures") -) - -type binPath struct { - Etcd string - EtcdLastRelease string - Etcdctl string - Etcdutl string -} - -func InitFlags() { - os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH) - - binDirDef := testutils.MustAbsPath("../../bin") - certDirDef := FixturesDir - - binDir := flag.String("bin-dir", binDirDef, "The directory for store etcd and etcdctl binaries.") - flag.StringVar(&CertDir, "cert-dir", certDirDef, "The directory for store certificate files.") - flag.Parse() - - BinPath = initBinPath(*binDir) - CertPath = CertDir + "/server.crt" - PrivateKeyPath = CertDir + "/server.key.insecure" - CaPath = CertDir + "/ca.crt" - RevokedCertPath = CertDir + "/server-revoked.crt" - RevokedPrivateKeyPath = CertDir + "/server-revoked.key.insecure" - CrlPath = CertDir + "/revoke.crl" - - CertPath2 = CertDir + "/server2.crt" - PrivateKeyPath2 = CertDir + "/server2.key.insecure" - - CertPath3 = CertDir + "/server3.crt" - PrivateKeyPath3 = CertDir + "/server3.key.insecure" -} diff --git a/tests/framework/e2e/testing.go b/tests/framework/e2e/testing.go deleted file mode 100644 index 7d7de27fddd..00000000000 --- a/tests/framework/e2e/testing.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func BeforeTest(t testing.TB) { - SkipInShortMode(t) - testutil.BeforeTest(t) -} diff --git a/tests/framework/e2e/util.go b/tests/framework/e2e/util.go deleted file mode 100644 index 46ac286f0df..00000000000 --- a/tests/framework/e2e/util.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "os" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/pkg/v3/expect" -) - -func WaitReadyExpectProc(ctx context.Context, exproc *expect.ExpectProcess, readyStrs []string) error { - matchSet := func(l string) bool { - for _, s := range readyStrs { - if strings.Contains(l, s) { - return true - } - } - return false - } - _, err := exproc.ExpectFunc(ctx, matchSet) - return err -} - -func SpawnWithExpect(args []string, expected string) error { - return SpawnWithExpects(args, nil, []string{expected}...) -} - -func SpawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error { - return SpawnWithExpects(args, envVars, []string{expected}...) -} - -func SpawnWithExpects(args []string, envVars map[string]string, xs ...string) error { - return SpawnWithExpectsContext(context.TODO(), args, envVars, xs...) -} - -func SpawnWithExpectsContext(ctx context.Context, args []string, envVars map[string]string, xs ...string) error { - _, err := SpawnWithExpectLines(ctx, args, envVars, xs...) - return err -} - -func SpawnWithExpectLines(ctx context.Context, args []string, envVars map[string]string, xs ...string) ([]string, error) { - proc, err := SpawnCmd(args, envVars) - if err != nil { - return nil, err - } - defer proc.Close() - // process until either stdout or stderr contains - // the expected string - var ( - lines []string - ) - for _, txt := range xs { - l, lerr := proc.ExpectWithContext(ctx, txt) - if lerr != nil { - proc.Close() - return nil, fmt.Errorf("%v %v (expected %q, got %q). Try EXPECT_DEBUG=TRUE", args, lerr, txt, lines) - } - lines = append(lines, l) - } - perr := proc.Close() - if perr != nil { - return lines, fmt.Errorf("err: %w, with output lines %v", perr, proc.Lines()) - } - - l := proc.LineCount() - if len(xs) == 0 && l != noOutputLineCount { // expect no output - return nil, fmt.Errorf("unexpected output from %v (got lines %q, line count %d) %v. Try EXPECT_DEBUG=TRUE", args, lines, l, l != noOutputLineCount) - } - return lines, nil -} - -func RunUtilCompletion(args []string, envVars map[string]string) ([]string, error) { - proc, err := SpawnCmd(args, envVars) - if err != nil { - return nil, fmt.Errorf("failed to spawn command %v with error: %w", args, err) - } - - proc.Wait() - err = proc.Close() - if err != nil { - return nil, fmt.Errorf("failed to close command %v with error: %w", args, err) - } - - return proc.Lines(), nil -} - -func RandomLeaseID() int64 { - return rand.New(rand.NewSource(time.Now().UnixNano())).Int63() -} - -func DataMarshal(data interface{}) (d string, e error) { - m, err := json.Marshal(data) - if err != nil { - return "", err - } - return string(m), nil -} - -func CloseWithTimeout(p *expect.ExpectProcess, d time.Duration) error { - errc := make(chan error, 1) - go func() { errc <- p.Close() }() - select { - case err := <-errc: - return err - case <-time.After(d): - p.Stop() - // retry close after stopping to collect SIGQUIT data, if any - CloseWithTimeout(p, time.Second) - } - return fmt.Errorf("took longer than %v to Close process %+v", d, p) -} - -func ToTLS(s string) string { - return strings.Replace(s, "http://", "https://", 1) -} - -func SkipInShortMode(t testing.TB) { - testutil.SkipTestIfShortMode(t, "e2e tests are not running in --short mode") -} - -func mergeEnvVariables(envVars map[string]string) []string { - var env []string - // Environment variables are passed as parameter have higher priority - // than os environment variables. - for k, v := range envVars { - env = append(env, fmt.Sprintf("%s=%s", k, v)) - } - - // Now, we can set os environment variables not passed as parameter. - currVars := os.Environ() - for _, v := range currVars { - p := strings.Split(v, "=") - // TODO: Remove PATH when we stop using system binaries (`awk`, `echo`) - if !strings.HasPrefix(p[0], "ETCD_") && !strings.HasPrefix(p[0], "ETCDCTL_") && !strings.HasPrefix(p[0], "EXPECT_") && p[0] != "PATH" { - continue - } - if _, ok := envVars[p[0]]; !ok { - env = append(env, fmt.Sprintf("%s=%s", p[0], p[1])) - } - } - - return env -} diff --git a/tests/framework/integration/bridge.go b/tests/framework/integration/bridge.go deleted file mode 100644 index 74aaf8ab948..00000000000 --- a/tests/framework/integration/bridge.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "io" - "net" - "sync" -) - -type Dialer interface { - Dial() (net.Conn, error) -} - -// bridge proxies connections between listener and dialer, making it possible -// to disconnect grpc network connections without closing the logical grpc connection. -type bridge struct { - dialer Dialer - l net.Listener - conns map[*bridgeConn]struct{} - - stopc chan struct{} - pausec chan struct{} - blackholec chan struct{} - wg sync.WaitGroup - - mu sync.Mutex -} - -func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) { - b := &bridge{ - // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - dialer: dialer, - l: listener, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), - blackholec: make(chan struct{}), - } - close(b.pausec) - b.wg.Add(1) - go b.serveListen() - return b, nil -} - -func (b *bridge) Close() { - b.l.Close() - b.mu.Lock() - select { - case <-b.stopc: - default: - close(b.stopc) - } - b.mu.Unlock() - b.wg.Wait() -} - -func (b *bridge) DropConnections() { - b.mu.Lock() - defer b.mu.Unlock() - for bc := range b.conns { - bc.Close() - } - b.conns = make(map[*bridgeConn]struct{}) -} - -func (b *bridge) PauseConnections() { - b.mu.Lock() - b.pausec = make(chan struct{}) - b.mu.Unlock() -} - -func (b *bridge) UnpauseConnections() { - b.mu.Lock() - select { - case <-b.pausec: - default: - close(b.pausec) - } - b.mu.Unlock() -} - -func (b *bridge) serveListen() { - defer func() { - b.l.Close() - b.mu.Lock() - for bc := range b.conns { - bc.Close() - } - b.mu.Unlock() - b.wg.Done() - }() - - for { - inc, ierr := b.l.Accept() - if ierr != nil { - return - } - b.mu.Lock() - pausec := b.pausec - b.mu.Unlock() - select { - case <-b.stopc: - inc.Close() - return - case <-pausec: - } - - outc, oerr := b.dialer.Dial() - if oerr != nil { - inc.Close() - return - } - - bc := &bridgeConn{inc, outc, make(chan struct{})} - b.wg.Add(1) - b.mu.Lock() - b.conns[bc] = struct{}{} - go b.serveConn(bc) - b.mu.Unlock() - } -} - -func (b *bridge) serveConn(bc *bridgeConn) { - defer func() { - close(bc.donec) - bc.Close() - b.mu.Lock() - delete(b.conns, bc) - b.mu.Unlock() - b.wg.Done() - }() - - var wg sync.WaitGroup - wg.Add(2) - go func() { - b.ioCopy(bc.out, bc.in) - bc.close() - wg.Done() - }() - go func() { - b.ioCopy(bc.in, bc.out) - bc.close() - wg.Done() - }() - wg.Wait() -} - -type bridgeConn struct { - in net.Conn - out net.Conn - donec chan struct{} -} - -func (bc *bridgeConn) Close() { - bc.close() - <-bc.donec -} - -func (bc *bridgeConn) close() { - bc.in.Close() - bc.out.Close() -} - -func (b *bridge) Blackhole() { - b.mu.Lock() - close(b.blackholec) - b.mu.Unlock() -} - -func (b *bridge) Unblackhole() { - b.mu.Lock() - for bc := range b.conns { - bc.Close() - } - b.conns = make(map[*bridgeConn]struct{}) - b.blackholec = make(chan struct{}) - b.mu.Unlock() -} - -// ref. https://github.com/golang/go/blob/master/src/io/io.go copyBuffer -func (b *bridge) ioCopy(dst io.Writer, src io.Reader) (err error) { - buf := make([]byte, 32*1024) - for { - select { - case <-b.blackholec: - io.Copy(io.Discard, src) - return nil - default: - } - nr, er := src.Read(buf) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if ew != nil { - return ew - } - if nr != nw { - return io.ErrShortWrite - } - } - if er != nil { - err = er - break - } - } - return err -} diff --git a/tests/framework/integration/cluster.go b/tests/framework/integration/cluster.go deleted file mode 100644 index 1c26d070074..00000000000 --- a/tests/framework/integration/cluster.go +++ /dev/null @@ -1,1676 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "math/rand" - "net" - "net/http" - "net/http/httptest" - "os" - "reflect" - "sort" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/tlsutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/client/pkg/v3/types" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/grpc_testing" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election" - epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock" - lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - "go.etcd.io/etcd/server/v3/verify" - framecfg "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/testutils" - "go.etcd.io/raft/v3" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/soheilhy/cmux" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -const ( - // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss. - RequestWaitTimeout = 5 * time.Second - RequestTimeout = 20 * time.Second - - ClusterName = "etcd" - BasePort = 21000 - URLScheme = "unix" - URLSchemeTLS = "unixs" - BaseGRPCPort = 30000 -) - -var ( - ElectionTicks = 10 - - // UniqueCount integration test is used to set unique member ids - UniqueCount = int32(0) - - TestTLSInfo = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../fixtures/server.key.insecure"), - CertFile: testutils.MustAbsPath("../fixtures/server.crt"), - TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"), - ClientCertAuth: true, - } - - TestTLSInfoWithSpecificUsage = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../fixtures/server-serverusage.key.insecure"), - CertFile: testutils.MustAbsPath("../fixtures/server-serverusage.crt"), - ClientKeyFile: testutils.MustAbsPath("../fixtures/client-clientusage.key.insecure"), - ClientCertFile: testutils.MustAbsPath("../fixtures/client-clientusage.crt"), - TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"), - ClientCertAuth: true, - } - - TestTLSInfoIP = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../fixtures/server-ip.key.insecure"), - CertFile: testutils.MustAbsPath("../fixtures/server-ip.crt"), - TrustedCAFile: testutils.MustAbsPath("../fixtures/ca.crt"), - ClientCertAuth: true, - } - - TestTLSInfoExpired = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("./fixtures-expired/server.key.insecure"), - CertFile: testutils.MustAbsPath("./fixtures-expired/server.crt"), - TrustedCAFile: testutils.MustAbsPath("./fixtures-expired/ca.crt"), - ClientCertAuth: true, - } - - TestTLSInfoExpiredIP = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("./fixtures-expired/server-ip.key.insecure"), - CertFile: testutils.MustAbsPath("./fixtures-expired/server-ip.crt"), - TrustedCAFile: testutils.MustAbsPath("./fixtures-expired/ca.crt"), - ClientCertAuth: true, - } - - DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", - testutils.MustAbsPath("../fixtures/server.crt"), testutils.MustAbsPath("../fixtures/server.key.insecure")) - - // UniqueNumber is used to generate unique port numbers - // Should only be accessed via atomic package methods. - UniqueNumber int32 -) - -type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - - DiscoveryURL string - - AuthToken string - AuthTokenTTL uint - - QuotaBackendBytes int64 - - MaxTxnOps uint - MaxRequestBytes uint - SnapshotCount uint64 - SnapshotCatchUpEntries uint64 - - GRPCKeepAliveMinTime time.Duration - GRPCKeepAliveInterval time.Duration - GRPCKeepAliveTimeout time.Duration - - ClientMaxCallSendMsgSize int - ClientMaxCallRecvMsgSize int - - // UseIP is true to use only IP for gRPC requests. - UseIP bool - // UseBridge adds bridge between client and grpc server. Should be used in tests that - // want to manipulate connection or require connection not breaking despite server stop/restart. - UseBridge bool - // UseTCP configures server listen on tcp socket. If disabled unix socket is used. - UseTCP bool - - EnableLeaseCheckpoint bool - LeaseCheckpointInterval time.Duration - LeaseCheckpointPersist bool - - WatchProgressNotifyInterval time.Duration - ExperimentalMaxLearners int - DisableStrictReconfigCheck bool - CorruptCheckTime time.Duration -} - -type Cluster struct { - Cfg *ClusterConfig - Members []*Member - LastMemberNum int - - mu sync.Mutex -} - -func SchemeFromTLSInfo(tls *transport.TLSInfo) string { - if tls == nil { - return URLScheme - } - return URLSchemeTLS -} - -func (c *Cluster) fillClusterForMembers() error { - if c.Cfg.DiscoveryURL != "" { - // Cluster will be discovered - return nil - } - - addrs := make([]string, 0) - for _, m := range c.Members { - scheme := SchemeFromTLSInfo(m.PeerTLSInfo) - for _, l := range m.PeerListeners { - addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) - } - } - clusterStr := strings.Join(addrs, ",") - var err error - for _, m := range c.Members { - m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - return err - } - } - return nil -} - -func (c *Cluster) Launch(t testutil.TB) { - t.Logf("Launching new cluster...") - errc := make(chan error) - for _, m := range c.Members { - // Members are launched in separate goroutines because if they boot - // using discovery url, they have to wait for others to register to continue. - go func(m *Member) { - errc <- m.Launch() - }(m) - } - for range c.Members { - if err := <-errc; err != nil { - c.Terminate(t) - t.Fatalf("error setting up member: %v", err) - } - } - // wait Cluster to be stable to receive future client requests - c.WaitMembersMatch(t, c.ProtoMembers()) - c.waitVersion() - for _, m := range c.Members { - t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL()) - } -} - -// ProtoMembers returns a list of all active members as client.Members -func (c *Cluster) ProtoMembers() []*pb.Member { - var ms []*pb.Member - for _, m := range c.Members { - pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) - cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) - cm := &pb.Member{Name: m.Name} - for _, ln := range m.PeerListeners { - cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) - } - for _, ln := range m.ClientListeners { - cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String()) - } - ms = append(ms, cm) - } - return ms -} - -func (c *Cluster) mustNewMember(t testutil.TB) *Member { - memberNumber := c.LastMemberNum - c.LastMemberNum++ - - m := MustNewMember(t, - MemberConfig{ - Name: fmt.Sprintf("m%v", memberNumber), - MemberNumber: memberNumber, - AuthToken: c.Cfg.AuthToken, - AuthTokenTTL: c.Cfg.AuthTokenTTL, - PeerTLS: c.Cfg.PeerTLS, - ClientTLS: c.Cfg.ClientTLS, - QuotaBackendBytes: c.Cfg.QuotaBackendBytes, - MaxTxnOps: c.Cfg.MaxTxnOps, - MaxRequestBytes: c.Cfg.MaxRequestBytes, - SnapshotCount: c.Cfg.SnapshotCount, - SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries, - GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime, - GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval, - GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout, - ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, - ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, - UseIP: c.Cfg.UseIP, - UseBridge: c.Cfg.UseBridge, - UseTCP: c.Cfg.UseTCP, - EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint, - LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval, - LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist, - WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval, - ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners, - DisableStrictReconfigCheck: c.Cfg.DisableStrictReconfigCheck, - CorruptCheckTime: c.Cfg.CorruptCheckTime, - }) - m.DiscoveryURL = c.Cfg.DiscoveryURL - return m -} - -// addMember return PeerURLs of the added member. -func (c *Cluster) addMember(t testutil.TB) types.URLs { - m := c.mustNewMember(t) - - scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) - - // send add request to the Cluster - var err error - for i := 0; i < len(c.Members); i++ { - peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() - if err = c.AddMemberByURL(t, c.Members[i].Client, peerURL); err == nil { - break - } - } - if err != nil { - t.Fatalf("add member failed on all members error: %v", err) - } - - m.InitialPeerURLsMap = types.URLsMap{} - for _, mm := range c.Members { - m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs - } - m.InitialPeerURLsMap[m.Name] = m.PeerURLs - m.NewCluster = false - if err := m.Launch(); err != nil { - t.Fatal(err) - } - c.Members = append(c.Members, m) - // wait Cluster to be stable to receive future client requests - c.WaitMembersMatch(t, c.ProtoMembers()) - return m.PeerURLs -} - -func (c *Cluster) AddMemberByURL(t testutil.TB, cc *clientv3.Client, peerURL string) error { - ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) - _, err := cc.MemberAdd(ctx, []string{peerURL}) - cancel() - if err != nil { - return err - } - - // wait for the add node entry applied in the Cluster - members := append(c.ProtoMembers(), &pb.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) - c.WaitMembersMatch(t, members) - return nil -} - -// AddMember return PeerURLs of the added member. -func (c *Cluster) AddMember(t testutil.TB) types.URLs { - return c.addMember(t) -} - -func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) error { - // send remove request to the Cluster - - ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) - _, err := cc.MemberRemove(ctx, id) - cancel() - if err != nil { - return err - } - newMembers := make([]*Member, 0) - for _, m := range c.Members { - if uint64(m.Server.MemberId()) != id { - newMembers = append(newMembers, m) - } else { - m.Client.Close() - select { - case <-m.Server.StopNotify(): - m.Terminate(t) - // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout - // TODO: remove connection write timeout by selecting on http response closeNotifier - // blocking on https://github.com/golang/go/issues/9524 - case <-time.After(time.Second + time.Duration(ElectionTicks)*framecfg.TickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.Server.MemberId()) - } - } - } - - c.Members = newMembers - c.WaitMembersMatch(t, c.ProtoMembers()) - return nil -} - -func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []*pb.Member) { - ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) - defer cancel() - for _, m := range c.Members { - cc := ToGRPC(m.Client) - select { - case <-m.Server.StopNotify(): - continue - default: - } - for { - resp, err := cc.Cluster.MemberList(ctx, &pb.MemberListRequest{Linearizable: false}) - if errors.Is(err, context.DeadlineExceeded) { - t.Fatal(err) - } - if err != nil { - continue - } - if isMembersEqual(resp.Members, membs) { - break - } - time.Sleep(framecfg.TickDuration) - } - } -} - -// WaitLeader returns index of the member in c.Members that is leader -// or fails the test (if not established in 30s). -func (c *Cluster) WaitLeader(t testing.TB) int { - return c.WaitMembersForLeader(t, c.Members) -} - -// WaitMembersForLeader waits until given members agree on the same leader, -// and returns its 'index' in the 'membs' list -func (c *Cluster) WaitMembersForLeader(t testing.TB, membs []*Member) int { - t.Logf("WaitMembersForLeader") - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - l := 0 - for l = c.waitMembersForLeader(ctx, t, membs); l < 0; { - if ctx.Err() != nil { - t.Fatalf("WaitLeader FAILED: %v", ctx.Err()) - } - } - t.Logf("WaitMembersForLeader succeeded. Cluster leader index: %v", l) - - // TODO: Consider second pass check as sometimes leadership is lost - // soon after election: - // - // We perform multiple attempts, as some-times just after successful WaitLLeader - // there is a race and leadership is quickly lost: - // - MsgAppResp message with higher term from 2acc3d3b521981 [term: 3] {"member": "m0"} - // - 9903a56eaf96afac became follower at term 3 {"member": "m0"} - // - 9903a56eaf96afac lost leader 9903a56eaf96afac at term 3 {"member": "m0"} - - return l -} - -// WaitMembersForLeader waits until given members agree on the same leader, -// and returns its 'index' in the 'membs' list -func (c *Cluster) waitMembersForLeader(ctx context.Context, t testing.TB, membs []*Member) int { - possibleLead := make(map[uint64]bool) - var lead uint64 - for _, m := range membs { - possibleLead[uint64(m.Server.MemberId())] = true - } - cc, err := c.ClusterClient(t) - if err != nil { - t.Fatal(err) - } - // ensure leader is up via linearizable get - for { - ctx, cancel := context.WithTimeout(ctx, 10*framecfg.TickDuration+time.Second) - _, err := cc.Get(ctx, "0") - cancel() - if err == nil || strings.Contains(err.Error(), "Key not found") { - break - } - } - - for lead == 0 || !possibleLead[lead] { - lead = 0 - for _, m := range membs { - select { - case <-m.Server.StopNotify(): - continue - default: - } - if lead != 0 && lead != m.Server.Lead() { - lead = 0 - time.Sleep(10 * framecfg.TickDuration) - break - } - lead = m.Server.Lead() - } - } - - for i, m := range membs { - if uint64(m.Server.MemberId()) == lead { - t.Logf("waitMembersForLeader found leader. Member: %v lead: %x", i, lead) - return i - } - } - - t.Logf("waitMembersForLeader failed (-1)") - return -1 -} - -func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) } - -// WaitMembersNoLeader waits until given members lose leader. -func (c *Cluster) WaitMembersNoLeader(membs []*Member) { - noLeader := false - for !noLeader { - noLeader = true - for _, m := range membs { - select { - case <-m.Server.StopNotify(): - continue - default: - } - if m.Server.Lead() != 0 { - noLeader = false - time.Sleep(10 * framecfg.TickDuration) - break - } - } - } -} - -func (c *Cluster) waitVersion() { - for _, m := range c.Members { - for { - if m.Server.ClusterVersion() != nil { - break - } - time.Sleep(framecfg.TickDuration) - } - } -} - -// isMembersEqual checks whether two members equal except ID field. -// The given wmembs should always set ID field to empty string. -func isMembersEqual(membs []*pb.Member, wmembs []*pb.Member) bool { - sort.Sort(SortableMemberSliceByPeerURLs(membs)) - sort.Sort(SortableMemberSliceByPeerURLs(wmembs)) - return cmp.Equal(membs, wmembs, cmpopts.IgnoreFields(pb.Member{}, "ID", "PeerURLs", "ClientURLs")) -} - -func NewLocalListener(t testutil.TB) net.Listener { - c := atomic.AddInt32(&UniqueCount, 1) - // Go 1.8+ allows only numbers in port - addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid()) - return NewListenerWithAddr(t, addr) -} - -func NewListenerWithAddr(t testutil.TB, addr string) net.Listener { - t.Logf("Creating listener with addr: %v", addr) - l, err := transport.NewUnixListener(addr) - if err != nil { - t.Fatal(err) - } - return l -} - -type Member struct { - config.ServerConfig - UniqNumber int - MemberNumber int - Port string - PeerListeners, ClientListeners []net.Listener - GrpcListener net.Listener - // PeerTLSInfo enables peer TLS when set - PeerTLSInfo *transport.TLSInfo - // ClientTLSInfo enables client TLS when set - ClientTLSInfo *transport.TLSInfo - DialOptions []grpc.DialOption - - RaftHandler *testutil.PauseableHandler - Server *etcdserver.EtcdServer - ServerClosers []func() - - GrpcServerOpts []grpc.ServerOption - GrpcServer *grpc.Server - GrpcURL string - GrpcBridge *bridge - - // ServerClient is a clientv3 that directly calls the etcdserver. - ServerClient *clientv3.Client - // Client is a clientv3 that communicates via socket, either UNIX or TCP. - Client *clientv3.Client - - KeepDataDirTerminate bool - ClientMaxCallSendMsgSize int - ClientMaxCallRecvMsgSize int - UseIP bool - UseBridge bool - UseTCP bool - - IsLearner bool - Closed bool - - GrpcServerRecorder *grpc_testing.GrpcRecorder -} - -func (m *Member) GRPCURL() string { return m.GrpcURL } - -type MemberConfig struct { - Name string - UniqNumber int64 - MemberNumber int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - AuthToken string - AuthTokenTTL uint - QuotaBackendBytes int64 - MaxTxnOps uint - MaxRequestBytes uint - SnapshotCount uint64 - SnapshotCatchUpEntries uint64 - GrpcKeepAliveMinTime time.Duration - GrpcKeepAliveInterval time.Duration - GrpcKeepAliveTimeout time.Duration - ClientMaxCallSendMsgSize int - ClientMaxCallRecvMsgSize int - UseIP bool - UseBridge bool - UseTCP bool - EnableLeaseCheckpoint bool - LeaseCheckpointInterval time.Duration - LeaseCheckpointPersist bool - WatchProgressNotifyInterval time.Duration - ExperimentalMaxLearners int - DisableStrictReconfigCheck bool - CorruptCheckTime time.Duration -} - -// MustNewMember return an inited member with the given name. If peerTLS is -// set, it will use https scheme to communicate between peers. -func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member { - var err error - m := &Member{ - MemberNumber: mcfg.MemberNumber, - UniqNumber: int(atomic.AddInt32(&UniqueCount, 1)), - } - - peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS) - clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS) - - pln := NewLocalListener(t) - m.PeerListeners = []net.Listener{pln} - m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()}) - if err != nil { - t.Fatal(err) - } - m.PeerTLSInfo = mcfg.PeerTLS - - cln := NewLocalListener(t) - m.ClientListeners = []net.Listener{cln} - m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()}) - if err != nil { - t.Fatal(err) - } - m.ClientTLSInfo = mcfg.ClientTLS - - m.Name = mcfg.Name - - m.DataDir, err = os.MkdirTemp(t.TempDir(), "etcd") - if err != nil { - t.Fatal(err) - } - clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String()) - m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - t.Fatal(err) - } - m.InitialClusterToken = ClusterName - m.NewCluster = true - m.BootstrapTimeout = 10 * time.Millisecond - if m.PeerTLSInfo != nil { - m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo - } - m.ElectionTicks = ElectionTicks - m.InitialElectionTickAdvance = true - m.TickMs = uint(framecfg.TickDuration / time.Millisecond) - m.QuotaBackendBytes = mcfg.QuotaBackendBytes - m.MaxTxnOps = mcfg.MaxTxnOps - if m.MaxTxnOps == 0 { - m.MaxTxnOps = embed.DefaultMaxTxnOps - } - m.MaxRequestBytes = mcfg.MaxRequestBytes - if m.MaxRequestBytes == 0 { - m.MaxRequestBytes = embed.DefaultMaxRequestBytes - } - m.SnapshotCount = etcdserver.DefaultSnapshotCount - if mcfg.SnapshotCount != 0 { - m.SnapshotCount = mcfg.SnapshotCount - } - m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries - if mcfg.SnapshotCatchUpEntries != 0 { - m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries - } - - // for the purpose of integration testing, simple token is enough - m.AuthToken = "simple" - if mcfg.AuthToken != "" { - m.AuthToken = mcfg.AuthToken - } - if mcfg.AuthTokenTTL != 0 { - m.TokenTTL = mcfg.AuthTokenTTL - } - - m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing - - m.GrpcServerOpts = []grpc.ServerOption{} - if mcfg.GrpcKeepAliveMinTime > time.Duration(0) { - m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: mcfg.GrpcKeepAliveMinTime, - PermitWithoutStream: false, - })) - } - if mcfg.GrpcKeepAliveInterval > time.Duration(0) && - mcfg.GrpcKeepAliveTimeout > time.Duration(0) { - m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: mcfg.GrpcKeepAliveInterval, - Timeout: mcfg.GrpcKeepAliveTimeout, - })) - } - m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize - m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize - m.UseIP = mcfg.UseIP - m.UseBridge = mcfg.UseBridge - m.UseTCP = mcfg.UseTCP - m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint - m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval - m.LeaseCheckpointPersist = mcfg.LeaseCheckpointPersist - - m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval - - m.InitialCorruptCheck = true - if mcfg.CorruptCheckTime > time.Duration(0) { - m.CorruptCheckTime = mcfg.CorruptCheckTime - } - m.WarningApplyDuration = embed.DefaultWarningApplyDuration - m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration - m.ExperimentalMaxLearners = membership.DefaultMaxLearners - if mcfg.ExperimentalMaxLearners != 0 { - m.ExperimentalMaxLearners = mcfg.ExperimentalMaxLearners - } - m.V2Deprecation = config.V2_DEPR_DEFAULT - m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{} - m.Logger = memberLogger(t, mcfg.Name) - m.StrictReconfigCheck = !mcfg.DisableStrictReconfigCheck - if err := m.listenGRPC(); err != nil { - t.Fatalf("listenGRPC FAILED: %v", err) - } - t.Cleanup(func() { - // if we didn't cleanup the logger, the consecutive test - // might reuse this (t). - raft.ResetDefaultLogger() - }) - return m -} - -func memberLogger(t testutil.TB, name string) *zap.Logger { - level := zapcore.InfoLevel - if os.Getenv("CLUSTER_DEBUG") != "" { - level = zapcore.DebugLevel - } - - options := zaptest.WrapOptions(zap.Fields(zap.String("member", name))) - return zaptest.NewLogger(t, zaptest.Level(level), options).Named(name) -} - -// listenGRPC starts a grpc server over a unix domain socket on the member -func (m *Member) listenGRPC() error { - // prefix with localhost so cert has right domain - network, host, port := m.grpcAddr() - grpcAddr := net.JoinHostPort(host, port) - wd, err := os.Getwd() - if err != nil { - return err - } - m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name), zap.String("workdir", wd)) - grpcListener, err := net.Listen(network, grpcAddr) - if err != nil { - return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err) - } - - addr := grpcListener.Addr().String() - host, port, err = net.SplitHostPort(addr) - if err != nil { - return fmt.Errorf("failed to parse grpc listen port from address %s (%v)", addr, err) - } - m.Port = port - m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), addr) - m.Logger.Info("LISTEN GRPC SUCCESS", zap.String("grpcAddr", m.GrpcURL), zap.String("m.Name", m.Name), - zap.String("workdir", wd), zap.String("port", m.Port)) - - if m.UseBridge { - _, err = m.addBridge() - if err != nil { - grpcListener.Close() - return err - } - } - m.GrpcListener = grpcListener - return nil -} - -func (m *Member) clientScheme() string { - switch { - case m.UseTCP && m.ClientTLSInfo != nil: - return "https" - case m.UseTCP && m.ClientTLSInfo == nil: - return "http" - case !m.UseTCP && m.ClientTLSInfo != nil: - return "unixs" - case !m.UseTCP && m.ClientTLSInfo == nil: - return "unix" - } - m.Logger.Panic("Failed to determine client schema") - return "" -} - -func (m *Member) addBridge() (*bridge, error) { - network, host, port := m.grpcAddr() - grpcAddr := net.JoinHostPort(host, m.Port) - bridgePort := fmt.Sprintf("%s%s", port, "0") - if m.UseTCP { - bridgePort = "0" - } - bridgeAddr := net.JoinHostPort(host, bridgePort) - m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name)) - bridgeListener, err := transport.NewUnixListener(bridgeAddr) - if err != nil { - return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err) - } - m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) - if err != nil { - bridgeListener.Close() - return nil, err - } - addr := bridgeListener.Addr().String() - m.Logger.Info("LISTEN BRIDGE SUCCESS", zap.String("grpc-address", addr), zap.String("member", m.Name)) - m.GrpcURL = m.clientScheme() + "://" + addr - return m.GrpcBridge, nil -} - -func (m *Member) Bridge() *bridge { - if !m.UseBridge { - m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.") - } - return m.GrpcBridge -} - -func (m *Member) grpcAddr() (network, host, port string) { - // prefix with localhost so cert has right domain - host = "localhost" - if m.UseIP { // for IP-only TLS certs - host = "127.0.0.1" - } - network = "unix" - if m.UseTCP { - network = "tcp" - } - - if m.Port != "" { - return network, host, m.Port - } - - port = m.Name - if m.UseTCP { - // let net.Listen choose the port automatically - port = fmt.Sprintf("%d", 0) - } - return network, host, port -} - -func (m *Member) GrpcPortNumber() string { - return m.Port -} - -type dialer struct { - network string - addr string -} - -func (d dialer) Dial() (net.Conn, error) { - return net.Dial(d.network, d.addr) -} - -func (m *Member) ElectionTimeout() time.Duration { - return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond -} - -func (m *Member) ID() types.ID { return m.Server.MemberId() } - -// NewClientV3 creates a new grpc client connection to the member -func NewClientV3(m *Member) (*clientv3.Client, error) { - if m.GrpcURL == "" { - return nil, fmt.Errorf("member not configured for grpc") - } - - cfg := clientv3.Config{ - Endpoints: []string{m.GrpcURL}, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize, - MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize, - Logger: m.Logger.Named("client"), - } - - if m.ClientTLSInfo != nil { - tls, err := m.ClientTLSInfo.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = tls - } - if m.DialOptions != nil { - cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...) - } - return newClientV3(cfg) -} - -// Clone returns a member with the same server configuration. The returned -// member will not set PeerListeners and ClientListeners. -func (m *Member) Clone(t testutil.TB) *Member { - mm := &Member{} - mm.ServerConfig = m.ServerConfig - - var err error - clientURLStrs := m.ClientURLs.StringSlice() - mm.ClientURLs, err = types.NewURLs(clientURLStrs) - if err != nil { - // this should never fail - panic(err) - } - peerURLStrs := m.PeerURLs.StringSlice() - mm.PeerURLs, err = types.NewURLs(peerURLStrs) - if err != nil { - // this should never fail - panic(err) - } - clusterStr := m.InitialPeerURLsMap.String() - mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - // this should never fail - panic(err) - } - mm.InitialClusterToken = m.InitialClusterToken - mm.ElectionTicks = m.ElectionTicks - mm.PeerTLSInfo = m.PeerTLSInfo - mm.ClientTLSInfo = m.ClientTLSInfo - mm.Logger = memberLogger(t, mm.Name+"c") - return mm -} - -// Launch starts a member based on ServerConfig, PeerListeners -// and ClientListeners. -func (m *Member) Launch() error { - m.Logger.Info( - "launching a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) - var err error - if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil { - return fmt.Errorf("failed to initialize the etcd server: %v", err) - } - m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond) - m.Server.Start() - - var peerTLScfg *tls.Config - if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() { - if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil { - return err - } - } - - if m.GrpcListener != nil { - var ( - tlscfg *tls.Config - ) - if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() { - tlscfg, err = m.ClientTLSInfo.ServerConfig() - if err != nil { - return err - } - } - m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...) - m.ServerClient = v3client.New(m.Server) - lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient)) - epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient)) - go m.GrpcServer.Serve(m.GrpcListener) - } - - m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)} - - h := (http.Handler)(m.RaftHandler) - if m.GrpcListener != nil { - h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m.RaftHandler.ServeHTTP(w, r) - }) - } - - for _, ln := range m.PeerListeners { - cm := cmux.New(ln) - // don't hang on matcher after closing listener - cm.SetReadTimeout(time.Second) - - // serve http1/http2 rafthttp/grpc - ll := cm.Match(cmux.Any()) - if peerTLScfg != nil { - if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil { - return err - } - } - hs := &httptest.Server{ - Listener: ll, - Config: &http.Server{ - Handler: h, - TLSConfig: peerTLScfg, - ErrorLog: log.New(io.Discard, "net/http", 0), - }, - TLS: peerTLScfg, - } - hs.Start() - - donec := make(chan struct{}) - go func() { - defer close(donec) - cm.Serve() - }() - closer := func() { - ll.Close() - hs.CloseClientConnections() - hs.Close() - <-donec - } - m.ServerClosers = append(m.ServerClosers, closer) - } - for _, ln := range m.ClientListeners { - handler := http.NewServeMux() - etcdhttp.HandleDebug(handler) - etcdhttp.HandleVersion(handler, m.Server) - etcdhttp.HandleMetrics(handler) - etcdhttp.HandleHealth(m.Logger, handler, m.Server) - hs := &httptest.Server{ - Listener: ln, - Config: &http.Server{ - Handler: handler, - ErrorLog: log.New(io.Discard, "net/http", 0), - }, - } - if m.ClientTLSInfo == nil { - hs.Start() - } else { - info := m.ClientTLSInfo - hs.TLS, err = info.ServerConfig() - if err != nil { - return err - } - - // baseConfig is called on initial TLS handshake start. - // - // Previously, - // 1. Server has non-empty (*tls.Config).Certificates on client hello - // 2. Server calls (*tls.Config).GetCertificate iff: - // - Server'Server (*tls.Config).Certificates is not empty, or - // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName - // - // When (*tls.Config).Certificates is always populated on initial handshake, - // client is expected to provide a valid matching SNI to pass the TLS - // verification, thus trigger server (*tls.Config).GetCertificate to reload - // TLS assets. However, a cert whose SAN field does not include domain names - // but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus - // it was never able to trigger TLS reload on initial handshake; first - // ceritifcate object was being used, never being updated. - // - // Now, (*tls.Config).Certificates is created empty on initial TLS client - // handshake, in order to trigger (*tls.Config).GetCertificate and populate - // rest of the certificates on every new TLS connection, even when client - // SNI is empty (e.g. cert only includes IPs). - // - // This introduces another problem with "httptest.Server": - // when server initial certificates are empty, certificates - // are overwritten by Go'Server internal test certs, which have - // different SAN fields (e.g. example.com). To work around, - // re-overwrite (*tls.Config).Certificates before starting - // test server. - tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil) - if err != nil { - return err - } - hs.TLS.Certificates = []tls.Certificate{*tlsCert} - - hs.StartTLS() - } - closer := func() { - ln.Close() - hs.CloseClientConnections() - hs.Close() - } - m.ServerClosers = append(m.ServerClosers, closer) - } - if m.GrpcURL != "" && m.Client == nil { - m.Client, err = NewClientV3(m) - if err != nil { - return err - } - } - - m.Logger.Info( - "launched a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) - return nil -} - -func (m *Member) RecordedRequests() []grpc_testing.RequestInfo { - return m.GrpcServerRecorder.RecordedRequests() -} - -func (m *Member) WaitOK(t testutil.TB) { - m.WaitStarted(t) - for m.Server.Leader() == 0 { - time.Sleep(framecfg.TickDuration) - } -} - -func (m *Member) WaitStarted(t testutil.TB) { - for { - ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) - _, err := m.Client.Get(ctx, "/", clientv3.WithSerializable()) - if err != nil { - time.Sleep(framecfg.TickDuration) - continue - } - cancel() - break - } -} - -func WaitClientV3(t testutil.TB, kv clientv3.KV) { - WaitClientV3WithKey(t, kv, "/") -} - -func WaitClientV3WithKey(t testutil.TB, kv clientv3.KV, key string) { - timeout := time.Now().Add(RequestTimeout) - var err error - for time.Now().Before(timeout) { - ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) - _, err = kv.Get(ctx, key) - cancel() - if err == nil { - return - } - time.Sleep(framecfg.TickDuration) - } - if err != nil { - t.Fatalf("timed out waiting for client: %v", err) - } -} - -func (m *Member) URL() string { return m.ClientURLs[0].String() } - -func (m *Member) Pause() { - m.RaftHandler.Pause() - m.Server.PauseSending() -} - -func (m *Member) Resume() { - m.RaftHandler.Resume() - m.Server.ResumeSending() -} - -// Close stops the member'Server etcdserver and closes its connections -func (m *Member) Close() { - if m.GrpcBridge != nil { - m.GrpcBridge.Close() - m.GrpcBridge = nil - } - if m.ServerClient != nil { - m.ServerClient.Close() - m.ServerClient = nil - } - if m.GrpcServer != nil { - ch := make(chan struct{}) - go func() { - defer close(ch) - // close listeners to stop accepting new connections, - // will block on any existing transports - m.GrpcServer.GracefulStop() - }() - // wait until all pending RPCs are finished - select { - case <-ch: - case <-time.After(2 * time.Second): - // took too long, manually close open transports - // e.g. watch streams - m.GrpcServer.Stop() - <-ch - } - m.GrpcServer = nil - } - if m.Server != nil { - m.Server.HardStop() - } - for _, f := range m.ServerClosers { - f() - } - if !m.Closed { - // Avoid verification of the same file multiple times - // (that might not exist any longer) - verify.MustVerifyIfEnabled(verify.Config{ - Logger: m.Logger, - DataDir: m.DataDir, - ExactIndex: false, - }) - } - m.Closed = true -} - -// Stop stops the member, but the data dir of the member is preserved. -func (m *Member) Stop(_ testutil.TB) { - m.Logger.Info( - "stopping a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) - m.Close() - m.ServerClosers = nil - m.Logger.Info( - "stopped a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) -} - -// CheckLeaderTransition waits for leader transition, returning the new leader ID. -func CheckLeaderTransition(m *Member, oldLead uint64) uint64 { - interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond - for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) { - time.Sleep(interval) - } - return m.Server.Lead() -} - -// StopNotify unblocks when a member stop completes -func (m *Member) StopNotify() <-chan struct{} { - return m.Server.StopNotify() -} - -// Restart starts the member using the preserved data dir. -func (m *Member) Restart(t testutil.TB) error { - m.Logger.Info( - "restarting a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) - newPeerListeners := make([]net.Listener, 0) - for _, ln := range m.PeerListeners { - newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String())) - } - m.PeerListeners = newPeerListeners - newClientListeners := make([]net.Listener, 0) - for _, ln := range m.ClientListeners { - newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String())) - } - m.ClientListeners = newClientListeners - - if m.GrpcListener != nil { - if err := m.listenGRPC(); err != nil { - t.Fatal(err) - } - } - - err := m.Launch() - m.Logger.Info( - "restarted a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - zap.Error(err), - ) - return err -} - -// Terminate stops the member and removes the data dir. -func (m *Member) Terminate(t testutil.TB) { - m.Logger.Info( - "terminating a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) - m.Close() - if !m.KeepDataDirTerminate { - if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { - t.Fatal(err) - } - } - m.Logger.Info( - "terminated a member", - zap.String("name", m.Name), - zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), - zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-url", m.GrpcURL), - ) -} - -// Metric gets the metric value for a member -func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) { - cfgtls := transport.TLSInfo{} - tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) - if err != nil { - return "", err - } - cli := &http.Client{Transport: tr} - resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics") - if err != nil { - return "", err - } - defer resp.Body.Close() - b, rerr := io.ReadAll(resp.Body) - if rerr != nil { - return "", rerr - } - lines := strings.Split(string(b), "\n") - for _, l := range lines { - if !strings.HasPrefix(l, metricName) { - continue - } - ok := true - for _, lv := range expectLabels { - if !strings.Contains(l, lv) { - ok = false - break - } - } - if !ok { - continue - } - return strings.Split(l, " ")[1], nil - } - return "", nil -} - -// InjectPartition drops connections from m to others, vice versa. -func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { - for _, other := range others { - m.Server.CutPeer(other.Server.MemberId()) - other.Server.CutPeer(m.Server.MemberId()) - t.Logf("network partition injected between: %v <-> %v", m.Server.MemberId(), other.Server.MemberId()) - } -} - -// RecoverPartition recovers connections from m to others, vice versa. -func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { - for _, other := range others { - m.Server.MendPeer(other.Server.MemberId()) - other.Server.MendPeer(m.Server.MemberId()) - t.Logf("network partition between: %v <-> %v", m.Server.MemberId(), other.Server.MemberId()) - } -} - -func (m *Member) ReadyNotify() <-chan struct{} { - return m.Server.ReadyNotify() -} - -type SortableMemberSliceByPeerURLs []*pb.Member - -func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) } -func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { - return p[i].PeerURLs[0] < p[j].PeerURLs[0] -} -func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// NewCluster returns a launched Cluster with a grpc client connection -// for each Cluster member. -func NewCluster(t testutil.TB, cfg *ClusterConfig) *Cluster { - t.Helper() - - assertInTestContext(t) - - testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests") - - c := &Cluster{Cfg: cfg} - ms := make([]*Member, cfg.Size) - for i := 0; i < cfg.Size; i++ { - ms[i] = c.mustNewMember(t) - } - c.Members = ms - if err := c.fillClusterForMembers(); err != nil { - t.Fatalf("fillClusterForMembers failed: %v", err) - } - c.Launch(t) - - return c -} - -func (c *Cluster) TakeClient(idx int) { - c.mu.Lock() - c.Members[idx].Client = nil - c.mu.Unlock() -} - -func (c *Cluster) Terminate(t testutil.TB) { - if t != nil { - t.Logf("========= Cluster termination started =====================") - } - for _, m := range c.Members { - if m.Client != nil { - m.Client.Close() - } - } - var wg sync.WaitGroup - wg.Add(len(c.Members)) - for _, m := range c.Members { - go func(mm *Member) { - defer wg.Done() - mm.Terminate(t) - }(m) - } - wg.Wait() - if t != nil { - t.Logf("========= Cluster termination succeeded ===================") - } -} - -func (c *Cluster) RandClient() *clientv3.Client { - return c.Members[rand.Intn(len(c.Members))].Client -} - -func (c *Cluster) Client(i int) *clientv3.Client { - return c.Members[i].Client -} - -func (c *Cluster) Endpoints() []string { - var endpoints []string - for _, m := range c.Members { - endpoints = append(endpoints, m.GrpcURL) - } - return endpoints -} - -func (c *Cluster) ClusterClient(t testing.TB, opts ...framecfg.ClientOption) (client *clientv3.Client, err error) { - cfg, err := c.newClientCfg() - if err != nil { - return nil, err - } - for _, opt := range opts { - opt(cfg) - } - client, err = newClientV3(*cfg) - if err != nil { - return nil, err - } - t.Cleanup(func() { - client.Close() - }) - return client, nil -} - -func WithAuth(userName, password string) framecfg.ClientOption { - return func(c any) { - cfg := c.(*clientv3.Config) - cfg.Username = userName - cfg.Password = password - } -} - -func (c *Cluster) newClientCfg() (*clientv3.Config, error) { - cfg := &clientv3.Config{ - Endpoints: c.Endpoints(), - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - MaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, - MaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, - } - if c.Cfg.ClientTLS != nil { - tls, err := c.Cfg.ClientTLS.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = tls - } - return cfg, nil -} - -// NewClientV3 creates a new grpc client connection to the member -func (c *Cluster) NewClientV3(memberIndex int) (*clientv3.Client, error) { - return NewClientV3(c.Members[memberIndex]) -} - -func makeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client { - var mu sync.Mutex - *clients = nil - return func() *clientv3.Client { - cli, err := clus.NewClientV3(chooseMemberIndex()) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - mu.Lock() - *clients = append(*clients, cli) - mu.Unlock() - return cli - } -} - -// MakeSingleNodeClients creates factory of clients that all connect to member 0. -// All the created clients are put on the 'clients' list. The factory is thread-safe. -func MakeSingleNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { - return makeClients(t, clus, clients, func() int { return 0 }) -} - -// MakeMultiNodeClients creates factory of clients that all connect to random members. -// All the created clients are put on the 'clients' list. The factory is thread-safe. -func MakeMultiNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { - return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) }) -} - -// CloseClients closes all the clients from the 'clients' list. -func CloseClients(t testutil.TB, clients []*clientv3.Client) { - for _, cli := range clients { - if err := cli.Close(); err != nil { - t.Fatal(err) - } - } -} - -type GrpcAPI struct { - // Cluster is the Cluster API for the client'Server connection. - Cluster pb.ClusterClient - // KV is the keyvalue API for the client'Server connection. - KV pb.KVClient - // Lease is the lease API for the client'Server connection. - Lease pb.LeaseClient - // Watch is the watch API for the client'Server connection. - Watch pb.WatchClient - // Maintenance is the maintenance API for the client'Server connection. - Maintenance pb.MaintenanceClient - // Auth is the authentication API for the client'Server connection. - Auth pb.AuthClient - // Lock is the lock API for the client'Server connection. - Lock lockpb.LockClient - // Election is the election API for the client'Server connection. - Election epb.ElectionClient -} - -// GetLearnerMembers returns the list of learner members in Cluster using MemberList API. -func (c *Cluster) GetLearnerMembers() ([]*pb.Member, error) { - cli := c.Client(0) - resp, err := cli.MemberList(context.Background()) - if err != nil { - return nil, fmt.Errorf("failed to list member %v", err) - } - var learners []*pb.Member - for _, m := range resp.Members { - if m.IsLearner { - learners = append(learners, m) - } - } - return learners, nil -} - -// AddAndLaunchLearnerMember creates a learner member, adds it to Cluster -// via v3 MemberAdd API, and then launches the new member. -func (c *Cluster) AddAndLaunchLearnerMember(t testutil.TB) { - m := c.mustNewMember(t) - m.IsLearner = true - - scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) - peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()} - - cli := c.Client(0) - _, err := cli.MemberAddAsLearner(context.Background(), peerURLs) - if err != nil { - t.Fatalf("failed to add learner member %v", err) - } - - m.InitialPeerURLsMap = types.URLsMap{} - for _, mm := range c.Members { - m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs - } - m.InitialPeerURLsMap[m.Name] = m.PeerURLs - m.NewCluster = false - - if err := m.Launch(); err != nil { - t.Fatal(err) - } - - c.Members = append(c.Members, m) - - c.waitMembersMatch(t) -} - -// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member -func (c *Cluster) getMembers() []*pb.Member { - var mems []*pb.Member - for _, m := range c.Members { - mem := &pb.Member{ - Name: m.Name, - PeerURLs: m.PeerURLs.StringSlice(), - ClientURLs: m.ClientURLs.StringSlice(), - IsLearner: m.IsLearner, - } - mems = append(mems, mem) - } - return mems -} - -// waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the -// local 'c.Members', which is the local recording of members in the testing Cluster. With -// the exception that the local recording c.Members does not have info on Member.ID, which -// is generated when the member is been added to Cluster. -// -// Note: -// A successful match means the Member.clientURLs are matched. This means member has already -// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide -// write request (in v2 server). Therefore, at this point, any raft log entries prior to this -// would have already been applied. -// -// If a new member was added to an existing Cluster, at this point, it has finished publishing -// its own server attributes to the Cluster. And therefore by the same argument, it has already -// applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point, -// the new member has the correct view of the Cluster configuration. -// -// Special note on learner member: -// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting -// the learner member, its initial view of the Cluster created by peerURLs map does not have info -// on whether or not the new member itself is learner. But at this point, a successful match does -// indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry -// which was used to add the learner itself to the Cluster, and therefore it has the correct info -// on learner. -func (c *Cluster) waitMembersMatch(t testutil.TB) { - wMembers := c.getMembers() - sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers)) - cli := c.Client(0) - for { - resp, err := cli.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - if len(resp.Members) != len(wMembers) { - continue - } - sort.Sort(SortableProtoMemberSliceByPeerURLs(resp.Members)) - for _, m := range resp.Members { - m.ID = 0 - } - if reflect.DeepEqual(resp.Members, wMembers) { - return - } - - time.Sleep(framecfg.TickDuration) - } -} - -type SortableProtoMemberSliceByPeerURLs []*pb.Member - -func (p SortableProtoMemberSliceByPeerURLs) Len() int { return len(p) } -func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool { - return p[i].PeerURLs[0] < p[j].PeerURLs[0] -} -func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// MustNewMember creates a new member instance based on the response of V3 Member Add API. -func (c *Cluster) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member { - m := c.mustNewMember(t) - m.IsLearner = resp.Member.IsLearner - m.NewCluster = false - - m.InitialPeerURLsMap = types.URLsMap{} - for _, mm := range c.Members { - m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs - } - m.InitialPeerURLsMap[m.Name] = types.MustNewURLs(resp.Member.PeerURLs) - c.Members = append(c.Members, m) - return m -} diff --git a/tests/framework/integration/cluster_direct.go b/tests/framework/integration/cluster_direct.go deleted file mode 100644 index 3bd8e5a5b01..00000000000 --- a/tests/framework/integration/cluster_direct.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package integration - -import ( - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" -) - -const ThroughProxy = false - -func ToGRPC(c *clientv3.Client) GrpcAPI { - return GrpcAPI{ - pb.NewClusterClient(c.ActiveConnection()), - pb.NewKVClient(c.ActiveConnection()), - pb.NewLeaseClient(c.ActiveConnection()), - pb.NewWatchClient(c.ActiveConnection()), - pb.NewMaintenanceClient(c.ActiveConnection()), - pb.NewAuthClient(c.ActiveConnection()), - v3lockpb.NewLockClient(c.ActiveConnection()), - v3electionpb.NewElectionClient(c.ActiveConnection()), - } -} - -func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { - return clientv3.New(cfg) -} diff --git a/tests/framework/integration/cluster_proxy.go b/tests/framework/integration/cluster_proxy.go deleted file mode 100644 index a70499070b4..00000000000 --- a/tests/framework/integration/cluster_proxy.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build cluster_proxy - -package integration - -import ( - "context" - "sync" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/namespace" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter" -) - -const ThroughProxy = true - -var ( - pmu sync.Mutex - proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy) -) - -const proxyNamespace = "proxy-namespace" - -type grpcClientProxy struct { - ctx context.Context - ctxCancel func() - grpc GrpcAPI - wdonec <-chan struct{} - kvdonec <-chan struct{} - lpdonec <-chan struct{} -} - -func ToGRPC(c *clientv3.Client) GrpcAPI { - pmu.Lock() - defer pmu.Unlock() - - // dedicated context bound to 'grpc-proxy' lifetype - // (so in practice lifetime of the client connection to the proxy). - // TODO: Refactor to a separate clientv3.Client instance instead of the context alone. - ctx, ctxCancel := context.WithCancel(context.WithValue(context.TODO(), "_name", "grpcProxyContext")) - - lg := c.GetLogger() - - if v, ok := proxies[c]; ok { - return v.grpc - } - - // test namespacing proxy - c.KV = namespace.NewKV(c.KV, proxyNamespace) - c.Watcher = namespace.NewWatcher(c.Watcher, proxyNamespace) - c.Lease = namespace.NewLease(c.Lease, proxyNamespace) - // test coalescing/caching proxy - kvp, kvpch := grpcproxy.NewKvProxy(c) - wp, wpch := grpcproxy.NewWatchProxy(ctx, lg, c) - lp, lpch := grpcproxy.NewLeaseProxy(ctx, c) - mp := grpcproxy.NewMaintenanceProxy(c) - clp, _ := grpcproxy.NewClusterProxy(lg, c, "", "") // without registering proxy URLs - authp := grpcproxy.NewAuthProxy(c) - lockp := grpcproxy.NewLockProxy(c) - electp := grpcproxy.NewElectionProxy(c) - - grpc := GrpcAPI{ - adapter.ClusterServerToClusterClient(clp), - adapter.KvServerToKvClient(kvp), - adapter.LeaseServerToLeaseClient(lp), - adapter.WatchServerToWatchClient(wp), - adapter.MaintenanceServerToMaintenanceClient(mp), - adapter.AuthServerToAuthClient(authp), - adapter.LockServerToLockClient(lockp), - adapter.ElectionServerToElectionClient(electp), - } - proxies[c] = grpcClientProxy{ctx: ctx, ctxCancel: ctxCancel, grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch} - return grpc -} - -type proxyCloser struct { - clientv3.Watcher - proxyCtxCancel func() - wdonec <-chan struct{} - kvdonec <-chan struct{} - lclose func() - lpdonec <-chan struct{} -} - -func (pc *proxyCloser) Close() error { - pc.proxyCtxCancel() - <-pc.kvdonec - err := pc.Watcher.Close() - <-pc.wdonec - pc.lclose() - <-pc.lpdonec - return err -} - -func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { - c, err := clientv3.New(cfg) - if err != nil { - return nil, err - } - rpc := ToGRPC(c) - c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) - pmu.Lock() - lc := c.Lease - c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, c, cfg.DialTimeout) - c.Watcher = &proxyCloser{ - Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch, c), - wdonec: proxies[c].wdonec, - kvdonec: proxies[c].kvdonec, - lclose: func() { lc.Close() }, - lpdonec: proxies[c].lpdonec, - proxyCtxCancel: proxies[c].ctxCancel, - } - pmu.Unlock() - return c, nil -} diff --git a/tests/framework/integration/integration.go b/tests/framework/integration/integration.go deleted file mode 100644 index df9db28b84e..00000000000 --- a/tests/framework/integration/integration.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "strings" - "testing" - - healthpb "google.golang.org/grpc/health/grpc_health_v1" - - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - etcdctlcmd "go.etcd.io/etcd/etcdctl/v3/ctlv3/command" - - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -type integrationRunner struct{} - -func NewIntegrationRunner() intf.TestRunner { - return &integrationRunner{} -} - -func (e integrationRunner) TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} - -func (e integrationRunner) BeforeTest(t testing.TB) { - BeforeTest(t) -} - -func (e integrationRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster { - var err error - cfg := config.NewClusterConfig(opts...) - integrationCfg := ClusterConfig{ - Size: cfg.ClusterSize, - QuotaBackendBytes: cfg.QuotaBackendBytes, - DisableStrictReconfigCheck: !cfg.StrictReconfigCheck, - AuthToken: cfg.AuthToken, - SnapshotCount: uint64(cfg.SnapshotCount), - } - integrationCfg.ClientTLS, err = tlsInfo(t, cfg.ClientTLS) - if err != nil { - t.Fatalf("ClientTLS: %s", err) - } - integrationCfg.PeerTLS, err = tlsInfo(t, cfg.PeerTLS) - if err != nil { - t.Fatalf("PeerTLS: %s", err) - } - return &integrationCluster{ - Cluster: NewCluster(t, &integrationCfg), - t: t, - ctx: ctx, - } -} - -func tlsInfo(t testing.TB, cfg config.TLSConfig) (*transport.TLSInfo, error) { - switch cfg { - case config.NoTLS: - return nil, nil - case config.AutoTLS: - tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1) - if err != nil { - return nil, fmt.Errorf("failed to generate cert: %s", err) - } - return &tls, nil - case config.ManualTLS: - return &TestTLSInfo, nil - default: - return nil, fmt.Errorf("config %q not supported", cfg) - } -} - -type integrationCluster struct { - *Cluster - t testing.TB - ctx context.Context -} - -func (c *integrationCluster) Members() (ms []intf.Member) { - for _, m := range c.Cluster.Members { - ms = append(ms, integrationMember{Member: m, t: c.t}) - } - return ms -} - -type integrationMember struct { - *Member - t testing.TB -} - -func (m integrationMember) Client() intf.Client { - return integrationClient{Client: m.Member.Client} -} - -func (m integrationMember) Start(ctx context.Context) error { - return m.Member.Restart(m.t) -} - -func (m integrationMember) Stop() { - m.Member.Stop(m.t) -} - -func (c *integrationCluster) Close() error { - c.Terminate(c.t) - return nil -} - -func (c *integrationCluster) Client(opts ...config.ClientOption) (intf.Client, error) { - cc, err := c.ClusterClient(c.t, opts...) - if err != nil { - return nil, err - } - return integrationClient{Client: cc}, nil -} - -type integrationClient struct { - *clientv3.Client -} - -func (c integrationClient) Get(ctx context.Context, key string, o config.GetOptions) (*clientv3.GetResponse, error) { - if o.Timeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, o.Timeout) - defer cancel() - } - var clientOpts []clientv3.OpOption - if o.Revision != 0 { - clientOpts = append(clientOpts, clientv3.WithRev(int64(o.Revision))) - } - if o.End != "" { - clientOpts = append(clientOpts, clientv3.WithRange(o.End)) - } - if o.Serializable { - clientOpts = append(clientOpts, clientv3.WithSerializable()) - } - if o.Prefix { - clientOpts = append(clientOpts, clientv3.WithPrefix()) - } - if o.Limit != 0 { - clientOpts = append(clientOpts, clientv3.WithLimit(int64(o.Limit))) - } - if o.FromKey { - clientOpts = append(clientOpts, clientv3.WithFromKey()) - } - if o.CountOnly { - clientOpts = append(clientOpts, clientv3.WithCountOnly()) - } - if o.SortBy != clientv3.SortByKey || o.Order != clientv3.SortNone { - clientOpts = append(clientOpts, clientv3.WithSort(o.SortBy, o.Order)) - } - return c.Client.Get(ctx, key, clientOpts...) -} - -func (c integrationClient) Put(ctx context.Context, key, value string, opts config.PutOptions) error { - var clientOpts []clientv3.OpOption - if opts.LeaseID != 0 { - clientOpts = append(clientOpts, clientv3.WithLease(opts.LeaseID)) - } - _, err := c.Client.Put(ctx, key, value, clientOpts...) - return err -} - -func (c integrationClient) Delete(ctx context.Context, key string, o config.DeleteOptions) (*clientv3.DeleteResponse, error) { - var clientOpts []clientv3.OpOption - if o.Prefix { - clientOpts = append(clientOpts, clientv3.WithPrefix()) - } - if o.FromKey { - clientOpts = append(clientOpts, clientv3.WithFromKey()) - } - if o.End != "" { - clientOpts = append(clientOpts, clientv3.WithRange(o.End)) - } - return c.Client.Delete(ctx, key, clientOpts...) -} - -func (c integrationClient) Compact(ctx context.Context, rev int64, o config.CompactOption) (*clientv3.CompactResponse, error) { - if o.Timeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, o.Timeout) - defer cancel() - } - var clientOpts []clientv3.CompactOption - if o.Physical { - clientOpts = append(clientOpts, clientv3.WithCompactPhysical()) - } - return c.Client.Compact(ctx, rev, clientOpts...) -} - -func (c integrationClient) Status(ctx context.Context) ([]*clientv3.StatusResponse, error) { - endpoints := c.Client.Endpoints() - var resp []*clientv3.StatusResponse - for _, ep := range endpoints { - status, err := c.Client.Status(ctx, ep) - if err != nil { - return nil, err - } - resp = append(resp, status) - } - return resp, nil -} - -func (c integrationClient) HashKV(ctx context.Context, rev int64) ([]*clientv3.HashKVResponse, error) { - endpoints := c.Client.Endpoints() - var resp []*clientv3.HashKVResponse - for _, ep := range endpoints { - hashKV, err := c.Client.HashKV(ctx, ep, rev) - if err != nil { - return nil, err - } - resp = append(resp, hashKV) - } - return resp, nil -} - -func (c integrationClient) Health(ctx context.Context) error { - cli := healthpb.NewHealthClient(c.Client.ActiveConnection()) - resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) - if err != nil { - return err - } - if resp.Status != healthpb.HealthCheckResponse_SERVING { - return fmt.Errorf("status expected %s, got %s", healthpb.HealthCheckResponse_SERVING, resp.Status) - } - return nil -} - -func (c integrationClient) Defragment(ctx context.Context, o config.DefragOption) error { - if o.Timeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, o.Timeout) - defer cancel() - } - for _, ep := range c.Endpoints() { - _, err := c.Client.Defragment(ctx, ep) - if err != nil { - return err - } - } - return nil -} - -func (c integrationClient) TimeToLive(ctx context.Context, id clientv3.LeaseID, o config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) { - var leaseOpts []clientv3.LeaseOption - if o.WithAttachedKeys { - leaseOpts = append(leaseOpts, clientv3.WithAttachedKeys()) - } - - return c.Client.TimeToLive(ctx, id, leaseOpts...) -} - -func (c integrationClient) Leases(ctx context.Context) (*clientv3.LeaseLeasesResponse, error) { - return c.Client.Leases(ctx) -} - -func (c integrationClient) KeepAliveOnce(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) { - return c.Client.KeepAliveOnce(ctx, id) -} - -func (c integrationClient) Revoke(ctx context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) { - return c.Client.Revoke(ctx, id) -} - -func (c integrationClient) AuthEnable(ctx context.Context) error { - _, err := c.Client.AuthEnable(ctx) - return err -} - -func (c integrationClient) AuthDisable(ctx context.Context) error { - _, err := c.Client.AuthDisable(ctx) - return err -} - -func (c integrationClient) AuthStatus(ctx context.Context) (*clientv3.AuthStatusResponse, error) { - return c.Client.AuthStatus(ctx) -} - -func (c integrationClient) UserAdd(ctx context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) { - return c.Client.UserAddWithOptions(ctx, name, password, &clientv3.UserAddOptions{ - NoPassword: opts.NoPassword, - }) -} - -func (c integrationClient) UserGet(ctx context.Context, name string) (*clientv3.AuthUserGetResponse, error) { - return c.Client.UserGet(ctx, name) -} - -func (c integrationClient) UserList(ctx context.Context) (*clientv3.AuthUserListResponse, error) { - return c.Client.UserList(ctx) -} - -func (c integrationClient) UserDelete(ctx context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) { - return c.Client.UserDelete(ctx, name) -} - -func (c integrationClient) UserChangePass(ctx context.Context, user, newPass string) error { - _, err := c.Client.UserChangePassword(ctx, user, newPass) - return err -} - -func (c integrationClient) UserGrantRole(ctx context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) { - return c.Client.UserGrantRole(ctx, user, role) -} - -func (c integrationClient) UserRevokeRole(ctx context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) { - return c.Client.UserRevokeRole(ctx, user, role) -} - -func (c integrationClient) RoleAdd(ctx context.Context, name string) (*clientv3.AuthRoleAddResponse, error) { - return c.Client.RoleAdd(ctx, name) -} - -func (c integrationClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) { - return c.Client.RoleGrantPermission(ctx, name, key, rangeEnd, permType) -} - -func (c integrationClient) RoleGet(ctx context.Context, role string) (*clientv3.AuthRoleGetResponse, error) { - return c.Client.RoleGet(ctx, role) -} - -func (c integrationClient) RoleList(ctx context.Context) (*clientv3.AuthRoleListResponse, error) { - return c.Client.RoleList(ctx) -} - -func (c integrationClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) { - return c.Client.RoleRevokePermission(ctx, role, key, rangeEnd) -} - -func (c integrationClient) RoleDelete(ctx context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) { - return c.Client.RoleDelete(ctx, role) -} - -func (c integrationClient) Txn(ctx context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) { - txn := c.Client.Txn(ctx) - var cmps []clientv3.Cmp - for _, c := range compares { - cmp, err := etcdctlcmd.ParseCompare(c) - if err != nil { - return nil, err - } - cmps = append(cmps, *cmp) - } - succOps, err := getOps(ifSucess) - if err != nil { - return nil, err - } - failOps, err := getOps(ifFail) - if err != nil { - return nil, err - } - txnrsp, err := txn. - If(cmps...). - Then(succOps...). - Else(failOps...). - Commit() - return txnrsp, err -} - -func getOps(ss []string) ([]clientv3.Op, error) { - var ops []clientv3.Op - for _, s := range ss { - s = strings.TrimSpace(s) - args := etcdctlcmd.Argify(s) - switch args[0] { - case "get": - ops = append(ops, clientv3.OpGet(args[1])) - case "put": - ops = append(ops, clientv3.OpPut(args[1], args[2])) - case "del": - ops = append(ops, clientv3.OpDelete(args[1])) - } - } - return ops, nil -} - -func (c integrationClient) Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan { - var opOpts []clientv3.OpOption - if opts.Prefix { - opOpts = append(opOpts, clientv3.WithPrefix()) - } - if opts.Revision != 0 { - opOpts = append(opOpts, clientv3.WithRev(opts.Revision)) - } - if opts.RangeEnd != "" { - opOpts = append(opOpts, clientv3.WithRange(opts.RangeEnd)) - } - - return c.Client.Watch(ctx, key, opOpts...) -} - -func (c integrationClient) MemberAdd(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) { - return c.Client.MemberAdd(ctx, peerAddrs) -} - -func (c integrationClient) MemberAddAsLearner(ctx context.Context, _ string, peerAddrs []string) (*clientv3.MemberAddResponse, error) { - return c.Client.MemberAddAsLearner(ctx, peerAddrs) -} - -func (c integrationClient) MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) { - return c.Client.MemberRemove(ctx, id) -} diff --git a/tests/framework/integration/testing.go b/tests/framework/integration/testing.go deleted file mode 100644 index 7a4613a8ac2..00000000000 --- a/tests/framework/integration/testing.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "os" - "testing" - - grpc_logsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zapgrpc" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/verify" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" -) - -var grpc_logger grpc_logsettable.SettableLoggerV2 -var insideTestContext bool - -func init() { - grpc_logger = grpc_logsettable.ReplaceGrpcLoggerV2() -} - -type testOptions struct { - goLeakDetection bool - skipInShort bool -} - -func newTestOptions(opts ...TestOption) *testOptions { - o := &testOptions{goLeakDetection: true, skipInShort: true} - for _, opt := range opts { - opt(o) - } - return o -} - -type TestOption func(opt *testOptions) - -// WithoutGoLeakDetection disables checking whether a testcase leaked a goroutine. -func WithoutGoLeakDetection() TestOption { - return func(opt *testOptions) { opt.goLeakDetection = false } -} - -func WithoutSkipInShort() TestOption { - return func(opt *testOptions) { opt.skipInShort = false } -} - -// BeforeTestExternal initializes test context and is targeted for external APIs. -// In general the `integration` package is not targeted to be used outside of -// etcd project, but till the dedicated package is developed, this is -// the best entry point so far (without backward compatibility promise). -func BeforeTestExternal(t testutil.TB) { - BeforeTest(t, WithoutSkipInShort(), WithoutGoLeakDetection()) -} - -func BeforeTest(t testutil.TB, opts ...TestOption) { - t.Helper() - options := newTestOptions(opts...) - - if insideTestContext { - t.Fatal("already in test context. BeforeTest was likely already called") - } - - if options.skipInShort { - testutil.SkipTestIfShortMode(t, "Cannot create clusters in --short tests") - } - - if options.goLeakDetection { - testutil.RegisterLeakDetection(t) - } - - previousWD, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - previousInsideTestContext := insideTestContext - - // Integration tests should verify written state as much as possible. - revertFunc := verify.EnableAllVerifications() - - // Registering cleanup early, such it will get executed even if the helper fails. - t.Cleanup(func() { - grpc_logger.Reset() - insideTestContext = previousInsideTestContext - os.Chdir(previousWD) - revertFunc() - }) - - grpc_logger.Set(zapgrpc.NewLogger(zaptest.NewLogger(t).Named("grpc"))) - insideTestContext = true - - os.Chdir(t.TempDir()) -} - -func assertInTestContext(t testutil.TB) { - if !insideTestContext { - t.Errorf("the function can be called only in the test context. Was integration.BeforeTest() called ?") - } -} - -func NewEmbedConfig(t testing.TB, name string) *embed.Config { - cfg := embed.NewConfig() - cfg.Name = name - lg := zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named(cfg.Name) - cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(lg) - cfg.Dir = t.TempDir() - return cfg -} - -func NewClient(t testing.TB, cfg clientv3.Config) (*clientv3.Client, error) { - if cfg.Logger == nil { - cfg.Logger = zaptest.NewLogger(t).Named("client") - } - return clientv3.New(cfg) -} diff --git a/tests/framework/interfaces/interface.go b/tests/framework/interfaces/interface.go deleted file mode 100644 index 6bafbd1e8e6..00000000000 --- a/tests/framework/interfaces/interface.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interfaces - -import ( - "context" - "testing" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" -) - -type TestRunner interface { - TestMain(m *testing.M) - BeforeTest(testing.TB) - NewCluster(context.Context, testing.TB, ...config.ClusterOption) Cluster -} - -type Cluster interface { - Members() []Member - Client(opts ...config.ClientOption) (Client, error) - WaitLeader(t testing.TB) int - Close() error - Endpoints() []string -} - -type Member interface { - Client() Client - Start(ctx context.Context) error - Stop() -} - -type Client interface { - Put(context context.Context, key, value string, opts config.PutOptions) error - Get(context context.Context, key string, opts config.GetOptions) (*clientv3.GetResponse, error) - Delete(context context.Context, key string, opts config.DeleteOptions) (*clientv3.DeleteResponse, error) - Compact(context context.Context, rev int64, opts config.CompactOption) (*clientv3.CompactResponse, error) - Status(context context.Context) ([]*clientv3.StatusResponse, error) - HashKV(context context.Context, rev int64) ([]*clientv3.HashKVResponse, error) - Health(context context.Context) error - Defragment(context context.Context, opts config.DefragOption) error - AlarmList(context context.Context) (*clientv3.AlarmResponse, error) - AlarmDisarm(context context.Context, alarmMember *clientv3.AlarmMember) (*clientv3.AlarmResponse, error) - Grant(context context.Context, ttl int64) (*clientv3.LeaseGrantResponse, error) - TimeToLive(context context.Context, id clientv3.LeaseID, opts config.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) - Leases(context context.Context) (*clientv3.LeaseLeasesResponse, error) - KeepAliveOnce(context context.Context, id clientv3.LeaseID) (*clientv3.LeaseKeepAliveResponse, error) - Revoke(context context.Context, id clientv3.LeaseID) (*clientv3.LeaseRevokeResponse, error) - - AuthEnable(context context.Context) error - AuthDisable(context context.Context) error - AuthStatus(context context.Context) (*clientv3.AuthStatusResponse, error) - UserAdd(context context.Context, name, password string, opts config.UserAddOptions) (*clientv3.AuthUserAddResponse, error) - UserGet(context context.Context, name string) (*clientv3.AuthUserGetResponse, error) - UserList(context context.Context) (*clientv3.AuthUserListResponse, error) - UserDelete(context context.Context, name string) (*clientv3.AuthUserDeleteResponse, error) - UserChangePass(context context.Context, user, newPass string) error - UserGrantRole(context context.Context, user string, role string) (*clientv3.AuthUserGrantRoleResponse, error) - UserRevokeRole(context context.Context, user string, role string) (*clientv3.AuthUserRevokeRoleResponse, error) - RoleAdd(context context.Context, name string) (*clientv3.AuthRoleAddResponse, error) - RoleGrantPermission(context context.Context, name string, key, rangeEnd string, permType clientv3.PermissionType) (*clientv3.AuthRoleGrantPermissionResponse, error) - RoleGet(context context.Context, role string) (*clientv3.AuthRoleGetResponse, error) - RoleList(context context.Context) (*clientv3.AuthRoleListResponse, error) - RoleRevokePermission(context context.Context, role string, key, rangeEnd string) (*clientv3.AuthRoleRevokePermissionResponse, error) - RoleDelete(context context.Context, role string) (*clientv3.AuthRoleDeleteResponse, error) - - Txn(context context.Context, compares, ifSucess, ifFail []string, o config.TxnOptions) (*clientv3.TxnResponse, error) - - MemberList(context context.Context) (*clientv3.MemberListResponse, error) - MemberAdd(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) - MemberAddAsLearner(context context.Context, name string, peerAddrs []string) (*clientv3.MemberAddResponse, error) - MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) - - Watch(ctx context.Context, key string, opts config.WatchOptions) clientv3.WatchChan -} diff --git a/tests/framework/testrunner.go b/tests/framework/testrunner.go deleted file mode 100644 index d5da5e79cbf..00000000000 --- a/tests/framework/testrunner.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package framework - -import ( - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" - - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/framework/integration" - "go.etcd.io/etcd/tests/v3/framework/unit" -) - -var ( - // UnitTestRunner only runs in `--short` mode, will fail otherwise. Attempts in cluster creation will result in tests being skipped. - UnitTestRunner intf.TestRunner = unit.NewUnitRunner() - // E2eTestRunner runs etcd and etcdctl binaries in a separate process. - E2eTestRunner = e2e.NewE2eRunner() - // IntegrationTestRunner runs etcdserver.EtcdServer in separate goroutine and uses client libraries to communicate. - IntegrationTestRunner = integration.NewIntegrationRunner() -) diff --git a/tests/framework/testutils/execute.go b/tests/framework/testutils/execute.go deleted file mode 100644 index d9c3d335879..00000000000 --- a/tests/framework/testutils/execute.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import ( - "context" - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func ExecuteWithTimeout(t *testing.T, timeout time.Duration, f func()) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - ExecuteUntil(ctx, t, f) -} - -func ExecuteUntil(ctx context.Context, t *testing.T, f func()) { - deadline, deadlineSet := ctx.Deadline() - timeout := time.Until(deadline) - donec := make(chan struct{}) - go func() { - defer close(donec) - f() - }() - - select { - case <-ctx.Done(): - msg := ctx.Err().Error() - if deadlineSet { - msg = fmt.Sprintf("test timed out after %v, err: %v", timeout, msg) - } - testutil.FatalStack(t, msg) - case <-donec: - } -} diff --git a/tests/framework/testutils/helpters.go b/tests/framework/testutils/helpters.go deleted file mode 100644 index 91363176c2f..00000000000 --- a/tests/framework/testutils/helpters.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import ( - "errors" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -type KV struct { - Key, Val string -} - -func KeysFromGetResponse(resp *clientv3.GetResponse) (kvs []string) { - for _, kv := range resp.Kvs { - kvs = append(kvs, string(kv.Key)) - } - return kvs -} - -func KeyValuesFromGetResponse(resp *clientv3.GetResponse) (kvs []KV) { - for _, kv := range resp.Kvs { - kvs = append(kvs, KV{Key: string(kv.Key), Val: string(kv.Value)}) - } - return kvs -} - -func KeyValuesFromWatchResponse(resp clientv3.WatchResponse) (kvs []KV) { - for _, event := range resp.Events { - kvs = append(kvs, KV{Key: string(event.Kv.Key), Val: string(event.Kv.Value)}) - } - return kvs -} - -func KeyValuesFromWatchChan(wch clientv3.WatchChan, wantedLen int, timeout time.Duration) (kvs []KV, err error) { - for { - select { - case watchResp, ok := <-wch: - if ok { - kvs = append(kvs, KeyValuesFromWatchResponse(watchResp)...) - if len(kvs) == wantedLen { - return kvs, nil - } - } - case <-time.After(timeout): - return nil, errors.New("closed watcher channel should not block") - } - } -} - -func MustClient(c intf.Client, err error) intf.Client { - if err != nil { - panic(err) - } - return c -} diff --git a/tests/framework/testutils/path.go b/tests/framework/testutils/path.go deleted file mode 100644 index 3b9e6521369..00000000000 --- a/tests/framework/testutils/path.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutils - -import "path/filepath" - -func MustAbsPath(path string) string { - abs, err := filepath.Abs(path) - if err != nil { - panic(err) - } - return abs -} diff --git a/tests/framework/unit/unit.go b/tests/framework/unit/unit.go deleted file mode 100644 index f822b7dd1f9..00000000000 --- a/tests/framework/unit/unit.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unit - -import ( - "context" - "flag" - "fmt" - "os" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/framework/config" - intf "go.etcd.io/etcd/tests/v3/framework/interfaces" -) - -type unitRunner struct{} - -var _ intf.TestRunner = (*unitRunner)(nil) - -func NewUnitRunner() intf.TestRunner { - return &unitRunner{} -} - -func (e unitRunner) TestMain(m *testing.M) { - flag.Parse() - if !testing.Short() { - fmt.Println(`No test mode selected, please selected either e2e mode with "--tags e2e" or integration mode with "--tags integration"`) - os.Exit(1) - } -} - -func (e unitRunner) BeforeTest(t testing.TB) { -} - -func (e unitRunner) NewCluster(ctx context.Context, t testing.TB, opts ...config.ClusterOption) intf.Cluster { - testutil.SkipTestIfShortMode(t, "Cannot create clusters in --short tests") - return nil -} diff --git a/tests/functional/Dockerfile b/tests/functional/Dockerfile deleted file mode 100644 index fca3646f711..00000000000 --- a/tests/functional/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM fedora:35 - -RUN dnf check-update || true \ - && dnf install --assumeyes \ - git curl wget mercurial meld gcc gcc-c++ which \ - gcc automake autoconf dh-autoreconf libtool libtool-ltdl \ - tar unzip gzip \ - && dnf check-update || true \ - && dnf upgrade --assumeyes || true \ - && dnf autoremove --assumeyes || true \ - && dnf clean all || true - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} -ENV GO_VERSION 1.19.5 -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang -RUN rm -rf ${GOROOT} \ - && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ - && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ - && go version - -RUN mkdir -p ${GOPATH}/src/go.etcd.io/etcd -ADD . ${GOPATH}/src/go.etcd.io/etcd -ADD ./tests/functional/functional.yaml /functional.yaml - -RUN go get -v go.etcd.io/gofail \ - && pushd ${GOPATH}/src/go.etcd.io/etcd \ - && GO_BUILD_FLAGS="-v" ./scripts/build.sh \ - && mkdir -p /bin \ - && cp ./bin/etcd /bin/etcd \ - && cp ./bin/etcdctl /bin/etcdctl \ - && GO_BUILD_FLAGS="-v" FAILPOINTS=1 ./scripts/build.sh \ - && cp ./bin/etcd /bin/etcd-failpoints \ - && ./tests/functional/build \ - && cp ./bin/etcd-agent /bin/etcd-agent \ - && cp ./bin/etcd-proxy /bin/etcd-proxy \ - && cp ./bin/etcd-runner /bin/etcd-runner \ - && cp ./bin/etcd-tester /bin/etcd-tester \ - && go build -v -o /bin/benchmark ./tools/benchmark \ - && popd \ - && rm -rf ${GOPATH}/src/go.etcd.io/etcd diff --git a/tests/functional/Procfile-proxy b/tests/functional/Procfile-proxy deleted file mode 100644 index 66730ee779a..00000000000 --- a/tests/functional/Procfile-proxy +++ /dev/null @@ -1,14 +0,0 @@ -s1: bin/etcd --name s1 --data-dir /tmp/etcd-proxy-data.s1 --listen-client-urls http://127.0.0.1:1379 --advertise-client-urls http://127.0.0.1:13790 --listen-peer-urls http://127.0.0.1:1380 --initial-advertise-peer-urls http://127.0.0.1:13800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new - -s1-client-proxy: bin/etcd-proxy --from localhost:13790 --to localhost:1379 --http-port 1378 -s1-peer-proxy: bin/etcd-proxy --from localhost:13800 --to localhost:1380 --http-port 1381 - -s2: bin/etcd --name s2 --data-dir /tmp/etcd-proxy-data.s2 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:23790 --listen-peer-urls http://127.0.0.1:2380 --initial-advertise-peer-urls http://127.0.0.1:23800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new - -s2-client-proxy: bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378 -s2-peer-proxy: bin/etcd-proxy --from localhost:23800 --to localhost:2380 --http-port 2381 - -s3: bin/etcd --name s3 --data-dir /tmp/etcd-proxy-data.s3 --listen-client-urls http://127.0.0.1:3379 --advertise-client-urls http://127.0.0.1:33790 --listen-peer-urls http://127.0.0.1:3380 --initial-advertise-peer-urls http://127.0.0.1:33800 --initial-cluster-token tkn --initial-cluster 's1=http://127.0.0.1:13800,s2=http://127.0.0.1:23800,s3=http://127.0.0.1:33800' --initial-cluster-state new - -s3-client-proxy: bin/etcd-proxy --from localhost:33790 --to localhost:3379 --http-port 3378 -s3-client-proxy: bin/etcd-proxy --from localhost:33800 --to localhost:3380 --http-port 3381 diff --git a/tests/functional/README.md b/tests/functional/README.md deleted file mode 100644 index 933abe14272..00000000000 --- a/tests/functional/README.md +++ /dev/null @@ -1,218 +0,0 @@ -## etcd Functional Testing - -[`functional`](https://godoc.org/github.com/coreos/etcd/functional) verifies the correct behavior of etcd under various system and network malfunctions. It sets up an etcd cluster under high pressure loads and continuously injects failures into the cluster. Then it expects the etcd cluster to recover within a few seconds. This has been extremely helpful to find critical bugs. - -See [`rpcpb.Case`](https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case) for all failure cases. - -See [functional.yaml](https://github.com/etcd-io/etcd/blob/main/tests/functional/functional.yaml) for an example configuration. - -### Run locally - -```bash -PASSES=functional ./test -``` - -### Run with Docker - -```bash -pushd .. -make build-docker-functional push-docker-functional pull-docker-functional -popd -``` - -And run [example scripts](./scripts). - -```bash -# run 3 agents for 3-node local etcd cluster -./functional/scripts/docker-local-agent.sh 1 -./functional/scripts/docker-local-agent.sh 2 -./functional/scripts/docker-local-agent.sh 3 - -# to run only 1 tester round -./functional/scripts/docker-local-tester.sh -``` - -## etcd Proxy - -Proxy layer that simulates various network conditions. - -Test locally - -```bash -$ ./scripts/build.sh -$ ./bin/etcd - -$ make build-functional - -$ ./bin/etcd-proxy --help -$ ./bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378 --verbose - -$ ./bin/etcdctl --endpoints localhost:2379 put foo bar -$ ./bin/etcdctl --endpoints localhost:23790 put foo bar -``` - -Proxy overhead per request is under 500μs - -```bash -$ go build -v -o ./bin/benchmark ./tools/benchmark - -$ ./bin/benchmark \ - --endpoints localhost:2379 \ - --conns 5 \ - --clients 15 \ - put \ - --key-size 48 \ - --val-size 50000 \ - --total 10000 - -< tcp://localhost:2379] - -$ ./bin/etcdctl \ - --endpoints localhost:23790 \ - put foo bar -# Error: context deadline exceeded - -$ curl -L http://localhost:2378/pause-tx -X DELETE -# unpaused forwarding [tcp://localhost:23790 -> tcp://localhost:2379] -``` - -Drop client packets - -```bash -$ curl -L http://localhost:2378/blackhole-tx -X PUT -# blackholed; dropping packets [tcp://localhost:23790 -> tcp://localhost:2379] - -$ ./bin/etcdctl --endpoints localhost:23790 put foo bar -# Error: context deadline exceeded - -$ curl -L http://localhost:2378/blackhole-tx -X DELETE -# unblackholed; restart forwarding [tcp://localhost:23790 -> tcp://localhost:2379] -``` - -Trigger leader election - -```bash -$ ./scripts/build.sh -$ make build-functional - -$ rm -rf /tmp/etcd-proxy-data.s* -$ goreman -f ./functional/Procfile-proxy start - -$ ./bin/etcdctl \ - --endpoints localhost:13790,localhost:23790,localhost:33790 \ - member list - -# isolate s1 when s1 is the current leader -$ curl -L http://localhost:1381/blackhole-tx -X PUT -$ curl -L http://localhost:1381/blackhole-rx -X PUT -# s1 becomes follower after election timeout -``` diff --git a/tests/functional/agent/doc.go b/tests/functional/agent/doc.go deleted file mode 100644 index 0195c4c7404..00000000000 --- a/tests/functional/agent/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package agent implements functional-tester agent server. -package agent diff --git a/tests/functional/agent/handler.go b/tests/functional/agent/handler.go deleted file mode 100644 index f31b306adb7..00000000000 --- a/tests/functional/agent/handler.go +++ /dev/null @@ -1,702 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package agent - -import ( - "errors" - "fmt" - "net/url" - "os" - "os/exec" - "path/filepath" - "syscall" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/proxy" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -// return error for system errors (e.g. fail to create files) -// return status error in response for wrong configuration/operation (e.g. start etcd twice) -func (srv *Server) handleTesterRequest(req *rpcpb.Request) (resp *rpcpb.Response, err error) { - defer func() { - if err == nil && req != nil { - srv.last = req.Operation - srv.lg.Info("handler success", zap.String("operation", req.Operation.String())) - } - }() - if req != nil { - srv.Member = req.Member - srv.Tester = req.Tester - } - - switch req.Operation { - case rpcpb.Operation_INITIAL_START_ETCD: - return srv.handle_INITIAL_START_ETCD(req) - case rpcpb.Operation_RESTART_ETCD: - return srv.handle_RESTART_ETCD(req) - - case rpcpb.Operation_SIGTERM_ETCD: - return srv.handle_SIGTERM_ETCD() - case rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA: - return srv.handle_SIGQUIT_ETCD_AND_REMOVE_DATA() - - case rpcpb.Operation_SAVE_SNAPSHOT: - return srv.handle_SAVE_SNAPSHOT() - case rpcpb.Operation_RESTORE_RESTART_FROM_SNAPSHOT: - return srv.handle_RESTORE_RESTART_FROM_SNAPSHOT(req) - case rpcpb.Operation_RESTART_FROM_SNAPSHOT: - return srv.handle_RESTART_FROM_SNAPSHOT(req) - - case rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA: - return srv.handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() - - case rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX: - return srv.handle_BLACKHOLE_PEER_PORT_TX_RX(), nil - case rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX: - return srv.handle_UNBLACKHOLE_PEER_PORT_TX_RX(), nil - case rpcpb.Operation_DELAY_PEER_PORT_TX_RX: - return srv.handle_DELAY_PEER_PORT_TX_RX(), nil - case rpcpb.Operation_UNDELAY_PEER_PORT_TX_RX: - return srv.handle_UNDELAY_PEER_PORT_TX_RX(), nil - - default: - msg := fmt.Sprintf("operation not found (%v)", req.Operation) - return &rpcpb.Response{Success: false, Status: msg}, errors.New(msg) - } -} - -// just archive the first file -func (srv *Server) createEtcdLogFile() error { - var err error - if srv.etcdLogFile, err = os.Create(srv.Member.Etcd.LogOutputs[0]); err != nil { - return err - } - srv.lg.Info("created etcd log file", zap.String("path", srv.Member.Etcd.LogOutputs[0])) - return nil -} - -func (srv *Server) createEtcd(fromSnapshot bool, failpoints string) error { - if !fileutil.Exist(srv.Member.EtcdExec) { - return fmt.Errorf("unknown etcd exec path %q does not exist", srv.Member.EtcdExec) - } - - etcdPath, etcdFlags := srv.Member.EtcdExec, srv.Member.Etcd.Flags() - if fromSnapshot { - etcdFlags = srv.Member.EtcdOnSnapshotRestore.Flags() - } - u, _ := url.Parse(srv.Member.FailpointHTTPAddr) - srv.lg.Info( - "creating etcd command", - zap.String("etcd-exec", etcdPath), - zap.Strings("etcd-flags", etcdFlags), - zap.String("GOFAIL_FAILPOINTS", failpoints), - zap.String("failpoint-http-addr", srv.Member.FailpointHTTPAddr), - zap.String("failpoint-addr", u.Host), - ) - srv.etcdCmd = exec.Command(etcdPath, etcdFlags...) - srv.etcdCmd.Env = []string{"GOFAIL_HTTP=" + u.Host} - if failpoints != "" { - srv.etcdCmd.Env = append(srv.etcdCmd.Env, "GOFAIL_FAILPOINTS="+failpoints) - } - srv.etcdCmd.Stdout = srv.etcdLogFile - srv.etcdCmd.Stderr = srv.etcdLogFile - return nil -} - -// start but do not wait for it to complete -func (srv *Server) runEtcd() error { - errc := make(chan error) - go func() { - time.Sleep(1 * time.Second) - // server advertise client/peer listener had to start first - // before setting up proxy listener - errc <- srv.startProxy() - }() - - if srv.etcdCmd != nil { - srv.lg.Info( - "starting etcd command", - zap.String("command-path", srv.etcdCmd.Path), - ) - err := srv.etcdCmd.Start() - - srv.lg.Info( - "started etcd command", - zap.String("command-path", srv.etcdCmd.Path), - zap.Strings("command-args", srv.etcdCmd.Args), - zap.Strings("envs", srv.etcdCmd.Env), - zap.Error(err), - ) - if err != nil { - return err - } - - return <-errc - } - - select { - case <-srv.etcdServer.Server.ReadyNotify(): - srv.lg.Info("embedded etcd is ready") - case <-time.After(time.Minute): - srv.etcdServer.Close() - return fmt.Errorf("took too long to start %v", <-srv.etcdServer.Err()) - } - return <-errc -} - -// SIGQUIT to exit with stackstrace -func (srv *Server) stopEtcd(sig os.Signal) error { - srv.stopProxy() - - if srv.etcdCmd != nil { - srv.lg.Info( - "stopping etcd command", - zap.String("command-path", srv.etcdCmd.Path), - zap.String("signal", sig.String()), - ) - - if err := srv.etcdCmd.Process.Signal(sig); err != nil { - return err - } - - errc := make(chan error) - go func() { - _, ew := srv.etcdCmd.Process.Wait() - errc <- ew - close(errc) - }() - - select { - case <-time.After(5 * time.Second): - srv.etcdCmd.Process.Kill() - case e := <-errc: - return e - } - - err := <-errc - - srv.lg.Info( - "stopped etcd command", - zap.String("command-path", srv.etcdCmd.Path), - zap.String("signal", sig.String()), - zap.Error(err), - ) - return err - } - - srv.lg.Info("stopping embedded etcd") - srv.etcdServer.Server.HardStop() - srv.etcdServer.Close() - srv.lg.Info("stopped embedded etcd") - return nil -} - -func (srv *Server) startProxy() error { - if srv.Member.EtcdClientProxy { - advertiseClientURL, advertiseClientURLPort, err := getURLAndPort(srv.Member.Etcd.AdvertiseClientURLs[0]) - if err != nil { - return err - } - listenClientURL, _, err := getURLAndPort(srv.Member.Etcd.ListenClientURLs[0]) - if err != nil { - return err - } - - srv.lg.Info("Checking client target's connectivity", zap.String("target", listenClientURL.Host)) - if err := checkTCPConnect(srv.lg, listenClientURL.Host); err != nil { - return fmt.Errorf("check client target failed, %w", err) - } - - srv.lg.Info("starting proxy on client traffic", zap.String("url", advertiseClientURL.String())) - srv.advertiseClientPortToProxy[advertiseClientURLPort] = proxy.NewServer(proxy.ServerConfig{ - Logger: srv.lg, - From: *advertiseClientURL, - To: *listenClientURL, - }) - select { - case err = <-srv.advertiseClientPortToProxy[advertiseClientURLPort].Error(): - srv.lg.Info("starting client proxy failed", zap.Error(err)) - return err - case <-time.After(2 * time.Second): - srv.lg.Info("started proxy on client traffic", zap.String("url", advertiseClientURL.String())) - } - } - - if srv.Member.EtcdPeerProxy { - advertisePeerURL, advertisePeerURLPort, err := getURLAndPort(srv.Member.Etcd.AdvertisePeerURLs[0]) - if err != nil { - return err - } - listenPeerURL, _, err := getURLAndPort(srv.Member.Etcd.ListenPeerURLs[0]) - if err != nil { - return err - } - - srv.lg.Info("Checking peer target's connectivity", zap.String("target", listenPeerURL.Host)) - if err := checkTCPConnect(srv.lg, listenPeerURL.Host); err != nil { - return fmt.Errorf("check peer target failed, %w", err) - } - - srv.lg.Info("starting proxy on peer traffic", zap.String("url", advertisePeerURL.String())) - srv.advertisePeerPortToProxy[advertisePeerURLPort] = proxy.NewServer(proxy.ServerConfig{ - Logger: srv.lg, - From: *advertisePeerURL, - To: *listenPeerURL, - }) - select { - case err = <-srv.advertisePeerPortToProxy[advertisePeerURLPort].Error(): - srv.lg.Info("starting peer proxy failed", zap.Error(err)) - return err - case <-time.After(2 * time.Second): - srv.lg.Info("started proxy on peer traffic", zap.String("url", advertisePeerURL.String())) - } - } - return nil -} - -func (srv *Server) stopProxy() { - if srv.Member.EtcdClientProxy && len(srv.advertiseClientPortToProxy) > 0 { - for port, px := range srv.advertiseClientPortToProxy { - if err := px.Close(); err != nil { - srv.lg.Warn("failed to close proxy", zap.Int("port", port)) - continue - } - select { - case <-px.Done(): - // enough time to release port - time.Sleep(time.Second) - case <-time.After(time.Second): - } - srv.lg.Info("closed proxy", - zap.Int("port", port), - zap.String("from", px.From()), - zap.String("to", px.To()), - ) - } - srv.advertiseClientPortToProxy = make(map[int]proxy.Server) - } - if srv.Member.EtcdPeerProxy && len(srv.advertisePeerPortToProxy) > 0 { - for port, px := range srv.advertisePeerPortToProxy { - if err := px.Close(); err != nil { - srv.lg.Warn("failed to close proxy", zap.Int("port", port)) - continue - } - select { - case <-px.Done(): - // enough time to release port - time.Sleep(time.Second) - case <-time.After(time.Second): - } - srv.lg.Info("closed proxy", - zap.Int("port", port), - zap.String("from", px.From()), - zap.String("to", px.To()), - ) - } - srv.advertisePeerPortToProxy = make(map[int]proxy.Server) - } -} - -// if started with manual TLS, stores TLS assets -// from tester/client to disk before starting etcd process -func (srv *Server) saveTLSAssets() error { - const defaultFileMode os.FileMode = 0644 - - if err := safeDataToFile(srv.Member.PeerCertPath, []byte(srv.Member.PeerCertData), defaultFileMode); err != nil { - return err - } - if err := safeDataToFile(srv.Member.PeerKeyPath, []byte(srv.Member.PeerKeyData), defaultFileMode); err != nil { - return err - } - if err := safeDataToFile(srv.Member.PeerTrustedCAPath, []byte(srv.Member.PeerTrustedCAData), defaultFileMode); err != nil { - return err - } - if srv.Member.PeerCertPath != "" && - srv.Member.PeerKeyPath != "" && - srv.Member.PeerTrustedCAPath != "" { - srv.lg.Info( - "wrote", - zap.String("peer-cert", srv.Member.PeerCertPath), - zap.String("peer-key", srv.Member.PeerKeyPath), - zap.String("peer-trusted-ca", srv.Member.PeerTrustedCAPath), - ) - } - - if err := safeDataToFile(srv.Member.ClientCertPath, []byte(srv.Member.ClientCertData), defaultFileMode); err != nil { - return err - } - if err := safeDataToFile(srv.Member.ClientKeyPath, []byte(srv.Member.ClientKeyData), defaultFileMode); err != nil { - return err - } - if err := safeDataToFile(srv.Member.ClientTrustedCAPath, []byte(srv.Member.ClientTrustedCAData), defaultFileMode); err != nil { - return err - } - if srv.Member.ClientCertPath != "" && - srv.Member.ClientKeyPath != "" && - srv.Member.ClientTrustedCAPath != "" { - srv.lg.Info( - "wrote", - zap.String("client-cert", srv.Member.ClientCertPath), - zap.String("client-key", srv.Member.ClientKeyPath), - zap.String("client-trusted-ca", srv.Member.ClientTrustedCAPath), - ) - } - return nil -} - -func (srv *Server) loadAutoTLSAssets() error { - if srv.Member.Etcd.PeerAutoTLS { - // in case of slow disk - time.Sleep(time.Second) - - fdir := filepath.Join(srv.Member.Etcd.DataDir, "fixtures", "peer") - - srv.lg.Info( - "loading peer auto TLS assets", - zap.String("dir", fdir), - zap.String("endpoint", srv.EtcdClientEndpoint), - ) - // load peer cert.pem - certPath := filepath.Join(fdir, "cert.pem") - certData, err := loadFileData(certPath) - if err != nil { - return err - } - srv.Member.PeerCertData = string(certData) - // load peer key.pem - keyPath := filepath.Join(fdir, "key.pem") - keyData, err := loadFileData(keyPath) - if err != nil { - return err - } - srv.Member.PeerKeyData = string(keyData) - - srv.lg.Info( - "loaded peer auto TLS assets", - zap.String("peer-cert-path", certPath), - zap.Int("peer-cert-length", len(certData)), - zap.String("peer-key-path", keyPath), - zap.Int("peer-key-length", len(keyData)), - ) - } - - if srv.Member.Etcd.ClientAutoTLS { - // in case of slow disk - time.Sleep(time.Second) - - fdir := filepath.Join(srv.Member.Etcd.DataDir, "fixtures", "client") - - srv.lg.Info( - "loading client TLS assets", - zap.String("dir", fdir), - zap.String("endpoint", srv.EtcdClientEndpoint), - ) - // load client cert.pem - certPath := filepath.Join(fdir, "cert.pem") - certData, err := loadFileData(certPath) - if err != nil { - return err - } - srv.Member.ClientCertData = string(certData) - // load client key.pem - keyPath := filepath.Join(fdir, "key.pem") - keyData, err := loadFileData(keyPath) - if err != nil { - return err - } - srv.Member.ClientKeyData = string(keyData) - - srv.lg.Info( - "loaded client TLS assets", - zap.String("client-cert-path", certPath), - zap.Int("client-cert-length", len(certData)), - zap.String("client-key-path", keyPath), - zap.Int("client-key-length", len(keyData)), - ) - } - - return nil -} - -func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) { - if srv.last != rpcpb.Operation_NOT_STARTED { - return &rpcpb.Response{ - Success: false, - Status: fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_INITIAL_START_ETCD.String(), srv.last.String()), - Member: req.Member, - }, nil - } - - if err := fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir); err != nil { - return nil, err - } - srv.lg.Info("created base directory", zap.String("path", srv.Member.BaseDir)) - - if srv.etcdServer == nil { - if err := srv.createEtcdLogFile(); err != nil { - return nil, err - } - } - - if err := srv.saveTLSAssets(); err != nil { - return nil, err - } - if err := srv.createEtcd(false, req.Member.Failpoints); err != nil { - return nil, err - } - if err := srv.runEtcd(); err != nil { - return nil, err - } - if err := srv.loadAutoTLSAssets(); err != nil { - return nil, err - } - - return &rpcpb.Response{ - Success: true, - Status: "start etcd PASS", - Member: srv.Member, - }, nil -} - -func (srv *Server) handle_RESTART_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) { - var err error - if !fileutil.Exist(srv.Member.BaseDir) { - if err = fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir); err != nil { - return nil, err - } - } - - if err = srv.saveTLSAssets(); err != nil { - return nil, err - } - if err = srv.createEtcd(false, req.Member.Failpoints); err != nil { - return nil, err - } - if err = srv.runEtcd(); err != nil { - return nil, err - } - if err = srv.loadAutoTLSAssets(); err != nil { - return nil, err - } - - return &rpcpb.Response{ - Success: true, - Status: "restart etcd PASS", - Member: srv.Member, - }, nil -} - -func (srv *Server) handle_SIGTERM_ETCD() (*rpcpb.Response, error) { - if err := srv.stopEtcd(syscall.SIGTERM); err != nil { - return nil, err - } - - if srv.etcdServer != nil { - srv.etcdServer.GetLogger().Sync() - } else { - srv.etcdLogFile.Sync() - } - - return &rpcpb.Response{ - Success: true, - Status: "killed etcd", - }, nil -} - -func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA() (*rpcpb.Response, error) { - if err := srv.stopEtcd(syscall.SIGQUIT); err != nil { - return nil, err - } - - if srv.etcdServer != nil { - srv.etcdServer.GetLogger().Sync() - } else { - srv.etcdLogFile.Sync() - srv.etcdLogFile.Close() - } - - // for debugging purposes, rename instead of removing - if err := os.RemoveAll(srv.Member.BaseDir + ".backup"); err != nil { - return nil, err - } - if err := os.Rename(srv.Member.BaseDir, srv.Member.BaseDir+".backup"); err != nil { - return nil, err - } - srv.lg.Info( - "renamed", - zap.String("base-dir", srv.Member.BaseDir), - zap.String("new-dir", srv.Member.BaseDir+".backup"), - ) - - // create a new log file for next new member restart - if !fileutil.Exist(srv.Member.BaseDir) { - if err := fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir); err != nil { - return nil, err - } - } - - return &rpcpb.Response{ - Success: true, - Status: "killed etcd and removed base directory", - }, nil -} - -func (srv *Server) handle_SAVE_SNAPSHOT() (*rpcpb.Response, error) { - if err := srv.Member.SaveSnapshot(srv.lg); err != nil { - return nil, err - } - return &rpcpb.Response{ - Success: true, - Status: "saved snapshot", - SnapshotInfo: srv.Member.SnapshotInfo, - }, nil -} - -func (srv *Server) handle_RESTORE_RESTART_FROM_SNAPSHOT(req *rpcpb.Request) (resp *rpcpb.Response, err error) { - if err = srv.Member.RestoreSnapshot(srv.lg); err != nil { - return nil, err - } - resp, err = srv.handle_RESTART_FROM_SNAPSHOT(req) - if resp != nil && err == nil { - resp.Status = "restored snapshot and " + resp.Status - } - return resp, err -} - -func (srv *Server) handle_RESTART_FROM_SNAPSHOT(req *rpcpb.Request) (resp *rpcpb.Response, err error) { - if err = srv.saveTLSAssets(); err != nil { - return nil, err - } - if err = srv.createEtcd(true, req.Member.Failpoints); err != nil { - return nil, err - } - if err = srv.runEtcd(); err != nil { - return nil, err - } - if err = srv.loadAutoTLSAssets(); err != nil { - return nil, err - } - - return &rpcpb.Response{ - Success: true, - Status: "restarted etcd from snapshot", - SnapshotInfo: srv.Member.SnapshotInfo, - }, nil -} - -func (srv *Server) handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() (*rpcpb.Response, error) { - if err := srv.stopEtcd(syscall.SIGQUIT); err != nil { - return nil, err - } - - if srv.etcdServer != nil { - srv.etcdServer.GetLogger().Sync() - } else { - srv.etcdLogFile.Sync() - srv.etcdLogFile.Close() - } - - // TODO: support separate WAL directory - if err := archive(srv.lg, srv.Member.BaseDir, srv.Member.Etcd.LogOutputs[0], srv.Member.Etcd.DataDir); err != nil { - return nil, err - } - srv.lg.Info("archived data", zap.String("base-dir", srv.Member.BaseDir)) - - if srv.etcdServer == nil { - if err := srv.createEtcdLogFile(); err != nil { - return nil, err - } - } - - // TODO: Verify whether this cleaning of 'cache pages' is needed. - srv.lg.Info("cleaning up page cache") - if err := cleanPageCache(); err != nil { - srv.lg.Warn("failed to clean up page cache", zap.String("error", err.Error())) - } - srv.lg.Info("cleaned up page cache") - - return &rpcpb.Response{ - Success: true, - Status: "cleaned up etcd", - }, nil -} - -func (srv *Server) handle_BLACKHOLE_PEER_PORT_TX_RX() *rpcpb.Response { - for port, px := range srv.advertisePeerPortToProxy { - srv.lg.Info("blackholing", zap.Int("peer-port", port)) - px.BlackholeTx() - px.BlackholeRx() - srv.lg.Info("blackholed", zap.Int("peer-port", port)) - } - return &rpcpb.Response{ - Success: true, - Status: "blackholed peer port tx/rx", - } -} - -func (srv *Server) handle_UNBLACKHOLE_PEER_PORT_TX_RX() *rpcpb.Response { - for port, px := range srv.advertisePeerPortToProxy { - srv.lg.Info("unblackholing", zap.Int("peer-port", port)) - px.UnblackholeTx() - px.UnblackholeRx() - srv.lg.Info("unblackholed", zap.Int("peer-port", port)) - } - return &rpcpb.Response{ - Success: true, - Status: "unblackholed peer port tx/rx", - } -} - -func (srv *Server) handle_DELAY_PEER_PORT_TX_RX() *rpcpb.Response { - lat := time.Duration(srv.Tester.UpdatedDelayLatencyMs) * time.Millisecond - rv := time.Duration(srv.Tester.DelayLatencyMsRv) * time.Millisecond - - for port, px := range srv.advertisePeerPortToProxy { - srv.lg.Info("delaying", - zap.Int("peer-port", port), - zap.Duration("latency", lat), - zap.Duration("random-variable", rv), - ) - px.DelayTx(lat, rv) - px.DelayRx(lat, rv) - srv.lg.Info("delayed", - zap.Int("peer-port", port), - zap.Duration("latency", lat), - zap.Duration("random-variable", rv), - ) - } - - return &rpcpb.Response{ - Success: true, - Status: "delayed peer port tx/rx", - } -} - -func (srv *Server) handle_UNDELAY_PEER_PORT_TX_RX() *rpcpb.Response { - for port, px := range srv.advertisePeerPortToProxy { - srv.lg.Info("undelaying", zap.Int("peer-port", port)) - px.UndelayTx() - px.UndelayRx() - srv.lg.Info("undelayed", zap.Int("peer-port", port)) - } - return &rpcpb.Response{ - Success: true, - Status: "undelayed peer port tx/rx", - } -} diff --git a/tests/functional/agent/server.go b/tests/functional/agent/server.go deleted file mode 100644 index f76f7258c43..00000000000 --- a/tests/functional/agent/server.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package agent - -import ( - "math" - "net" - "os" - "os/exec" - - "go.etcd.io/etcd/pkg/v3/proxy" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - "google.golang.org/grpc" -) - -// Server implements "rpcpb.TransportServer" -// and other etcd operations as an agent -// no need to lock fields since request operations are -// serialized in tester-side -type Server struct { - lg *zap.Logger - - grpcServer *grpc.Server - - network string - address string - ln net.Listener - - rpcpb.TransportServer - last rpcpb.Operation - - *rpcpb.Member - *rpcpb.Tester - - etcdServer *embed.Etcd - etcdCmd *exec.Cmd - etcdLogFile *os.File - - // forward incoming advertise URLs traffic to listen URLs - advertiseClientPortToProxy map[int]proxy.Server - advertisePeerPortToProxy map[int]proxy.Server -} - -// NewServer returns a new agent server. -func NewServer( - lg *zap.Logger, - network string, - address string, -) *Server { - return &Server{ - lg: lg, - network: network, - address: address, - last: rpcpb.Operation_NOT_STARTED, - advertiseClientPortToProxy: make(map[int]proxy.Server), - advertisePeerPortToProxy: make(map[int]proxy.Server), - } -} - -const ( - maxRequestBytes = 1.5 * 1024 * 1024 - grpcOverheadBytes = 512 * 1024 - maxStreams = math.MaxUint32 - maxSendBytes = math.MaxInt32 -) - -// StartServe starts serving agent server. -func (srv *Server) StartServe() error { - var err error - srv.ln, err = net.Listen(srv.network, srv.address) - if err != nil { - return err - } - - var opts []grpc.ServerOption - opts = append(opts, grpc.MaxRecvMsgSize(int(maxRequestBytes+grpcOverheadBytes))) - opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) - opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) - srv.grpcServer = grpc.NewServer(opts...) - - rpcpb.RegisterTransportServer(srv.grpcServer, srv) - - srv.lg.Info( - "gRPC server started", - zap.String("address", srv.address), - zap.String("listener-address", srv.ln.Addr().String()), - ) - err = srv.grpcServer.Serve(srv.ln) - if err != nil { - srv.lg.Warn( - "gRPC server is stopped with error", - zap.String("address", srv.address), - zap.Error(err), - ) - } else { - srv.lg.Info( - "gRPC server is stopped", - zap.String("address", srv.address), - ) - } - return err -} - -// Stop stops serving gRPC server. -func (srv *Server) Stop() { - srv.lg.Info("gRPC server stopping", zap.String("address", srv.address)) - srv.grpcServer.Stop() - srv.lg.Info("gRPC server stopped", zap.String("address", srv.address)) -} - -// Transport communicates with etcd tester. -func (srv *Server) Transport(stream rpcpb.Transport_TransportServer) (reterr error) { - errc := make(chan error, 1) - go func() { - for { - var req *rpcpb.Request - var err error - req, err = stream.Recv() - if err != nil { - errc <- err - // TODO: handle error and retry - return - } - - var resp *rpcpb.Response - resp, err = srv.handleTesterRequest(req) - if err != nil { - errc <- err - // TODO: handle error and retry - return - } - - if err = stream.Send(resp); err != nil { - errc <- err - // TODO: handle error and retry - return - } - } - }() - - select { - case reterr = <-errc: - case <-stream.Context().Done(): - reterr = stream.Context().Err() - } - return reterr -} diff --git a/tests/functional/agent/utils.go b/tests/functional/agent/utils.go deleted file mode 100644 index 98d88bd913a..00000000000 --- a/tests/functional/agent/utils.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package agent - -import ( - "fmt" - "io" - "net" - "net/url" - "os" - "os/exec" - "path/filepath" - "strconv" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - - "go.uber.org/zap" -) - -// TODO: support separate WAL directory -func archive(lg *zap.Logger, baseDir, etcdLogPath, dataDir string) error { - dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339)) - if existDir(dir) { - dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339)) - } - if err := fileutil.TouchDirAll(lg, dir); err != nil { - return err - } - - dst := filepath.Join(dir, "etcd.log") - if err := copyFile(etcdLogPath, dst); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Rename(dataDir, filepath.Join(dir, filepath.Base(dataDir))); err != nil { - if !os.IsNotExist(err) { - return err - } - } - - return nil -} - -func existDir(fpath string) bool { - st, err := os.Stat(fpath) - if err != nil { - if os.IsNotExist(err) { - return false - } - } else { - return st.IsDir() - } - return false -} - -func getURLAndPort(addr string) (urlAddr *url.URL, port int, err error) { - urlAddr, err = url.Parse(addr) - if err != nil { - return nil, -1, err - } - var s string - _, s, err = net.SplitHostPort(urlAddr.Host) - if err != nil { - return nil, -1, err - } - port, err = strconv.Atoi(s) - if err != nil { - return nil, -1, err - } - return urlAddr, port, err -} - -func copyFile(src, dst string) error { - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - w, err := os.Create(dst) - if err != nil { - return err - } - defer w.Close() - - if _, err = io.Copy(w, f); err != nil { - return err - } - return w.Sync() -} - -func safeDataToFile(filePath string, fileData []byte, mode os.FileMode) error { - if filePath != "" { - if len(fileData) == 0 { - return fmt.Errorf("got empty data for %q", filePath) - } - if err := os.WriteFile(filePath, fileData, mode); err != nil { - return fmt.Errorf("writing file %q failed, %w", filePath, err) - } - } - return nil -} - -func loadFileData(filePath string) ([]byte, error) { - if !fileutil.Exist(filePath) { - return nil, fmt.Errorf("cannot find %q", filePath) - } - data, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("read file %q failed, %w", filePath, err) - } - return data, nil -} - -func checkTCPConnect(lg *zap.Logger, target string) error { - for i := 0; i < 10; i++ { - if conn, err := net.Dial("tcp", target); err != nil { - lg.Error("The target isn't reachable", zap.Int("retries", i), zap.String("target", target), zap.Error(err)) - } else { - if conn != nil { - conn.Close() - lg.Info("The target is reachable", zap.Int("retries", i), zap.String("target", target)) - return nil - } - lg.Error("The target isn't reachable due to the returned conn is nil", zap.Int("retries", i), zap.String("target", target)) - } - time.Sleep(time.Second) - } - return fmt.Errorf("timed out waiting for the target (%s) to be reachable", target) -} - -func cleanPageCache() error { - // https://www.kernel.org/doc/Documentation/sysctl/vm.txt - // https://github.com/torvalds/linux/blob/master/fs/drop_caches.c - cmd := exec.Command("/bin/sh", "-c", `echo "echo 1 > /proc/sys/vm/drop_caches" | sudo -s -n`) - return cmd.Run() -} diff --git a/tests/functional/agent/utils_test.go b/tests/functional/agent/utils_test.go deleted file mode 100644 index 16230030438..00000000000 --- a/tests/functional/agent/utils_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package agent - -import ( - "net/url" - "reflect" - "testing" -) - -func TestGetURLAndPort(t *testing.T) { - addr := "https://127.0.0.1:2379" - urlAddr, port, err := getURLAndPort(addr) - if err != nil { - t.Fatal(err) - } - exp := &url.URL{Scheme: "https", Host: "127.0.0.1:2379"} - if !reflect.DeepEqual(urlAddr, exp) { - t.Fatalf("expected %+v, got %+v", exp, urlAddr) - } - if port != 2379 { - t.Fatalf("port expected 2379, got %d", port) - } -} diff --git a/tests/functional/build.sh b/tests/functional/build.sh deleted file mode 100755 index 1c28b6d9a69..00000000000 --- a/tests/functional/build.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -if ! [[ "$0" =~ "tests/functional/build" ]]; then - echo "must be run from repository root" - exit 255 -fi - -outdir="${BINDIR:-../bin}" - -( - cd ./tests - CGO_ENABLED=0 go build -trimpath -v -installsuffix cgo -ldflags "-s -w" -o "${outdir}/etcd-agent" ./functional/cmd/etcd-agent - CGO_ENABLED=0 go build -trimpath -v -installsuffix cgo -ldflags "-s -w" -o "${outdir}/etcd-proxy" ./functional/cmd/etcd-proxy - CGO_ENABLED=0 go build -trimpath -v -installsuffix cgo -ldflags "-s -w" -o "${outdir}/etcd-runner" ./functional/cmd/etcd-runner - CGO_ENABLED=0 go test -v -installsuffix cgo -ldflags "-s -w" -c -o "${outdir}/etcd-tester" ./functional/cmd/etcd-tester -) diff --git a/tests/functional/cmd/etcd-agent/main.go b/tests/functional/cmd/etcd-agent/main.go deleted file mode 100644 index 64aaa26b239..00000000000 --- a/tests/functional/cmd/etcd-agent/main.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// etcd-agent is a program that runs functional-tester agent. -package main - -import ( - "flag" - - "go.uber.org/zap/zapcore" - - "go.etcd.io/etcd/tests/v3/functional/agent" - - "go.uber.org/zap" -) - -var logger *zap.Logger - -func main() { - network := flag.String("network", "tcp", "network to serve agent server") - address := flag.String("address", "127.0.0.1:9027", "address to serve agent server") - flag.Parse() - - lcfg := zap.NewDevelopmentConfig() - lcfg.Level = zap.NewAtomicLevelAt(zapcore.InfoLevel) - logger, err := lcfg.Build() - if err != nil { - panic(err) - } - - logger = logger.Named("agent").With(zap.String("address", *address)) - - defer logger.Sync() - - srv := agent.NewServer(logger, *network, *address) - err = srv.StartServe() - logger.Info("agent exiting", zap.Error(err)) -} diff --git a/tests/functional/cmd/etcd-proxy/main.go b/tests/functional/cmd/etcd-proxy/main.go deleted file mode 100644 index 4c7110849b4..00000000000 --- a/tests/functional/cmd/etcd-proxy/main.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// etcd-proxy is a proxy layer that simulates various network conditions. -package main - -import ( - "context" - "flag" - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - "os/signal" - "syscall" - "time" - - "go.etcd.io/etcd/pkg/v3/proxy" - - "go.uber.org/zap" -) - -var from string -var to string -var httpPort int -var verbose bool - -func main() { - // TODO: support TLS - flag.StringVar(&from, "from", "localhost:23790", "Address URL to proxy from.") - flag.StringVar(&to, "to", "localhost:2379", "Address URL to forward.") - flag.IntVar(&httpPort, "http-port", 2378, "Port to serve etcd-proxy API.") - flag.BoolVar(&verbose, "verbose", false, "'true' to run proxy in verbose mode.") - - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %q:\n", os.Args[0]) - fmt.Fprintln(os.Stderr, ` -etcd-proxy simulates various network conditions for etcd testing purposes. -See README.md for more examples. - -Example: - -# build etcd -$ ./scripts/build.sh -$ ./bin/etcd - -# build etcd-proxy -$ make build-etcd-proxy - -# to test etcd with proxy layer -$ ./bin/etcd-proxy --help -$ ./bin/etcd-proxy --from localhost:23790 --to localhost:2379 --http-port 2378 --verbose - -$ ./bin/etcdctl --endpoints localhost:2379 put foo bar -$ ./bin/etcdctl --endpoints localhost:23790 put foo bar`) - flag.PrintDefaults() - } - - flag.Parse() - - cfg := proxy.ServerConfig{ - From: url.URL{Scheme: "tcp", Host: from}, - To: url.URL{Scheme: "tcp", Host: to}, - } - if verbose { - var err error - cfg.Logger, err = zap.NewDevelopment() - if err != nil { - panic(err) - } - cfg.Logger = cfg.Logger.Named("proxy").With( - zap.String("from", from), - zap.String("to", to), - zap.Int("port", httpPort)) - } - p := proxy.NewServer(cfg) - - select { - case <-p.Ready(): - case err := <-p.Error(): - panic(err) - } - - defer p.Close() - - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte(fmt.Sprintf("proxying [%s -> %s]\n", p.From(), p.To()))) - }) - mux.HandleFunc("/delay-tx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodGet: - w.Write([]byte(fmt.Sprintf("current send latency %v\n", p.LatencyTx()))) - case http.MethodPut, http.MethodPost: - if err := req.ParseForm(); err != nil { - w.Write([]byte(fmt.Sprintf("wrong form %q\n", err.Error()))) - return - } - lat, err := time.ParseDuration(req.PostForm.Get("latency")) - if err != nil { - w.Write([]byte(fmt.Sprintf("wrong latency form %q\n", err.Error()))) - return - } - rv, err := time.ParseDuration(req.PostForm.Get("random-variable")) - if err != nil { - w.Write([]byte(fmt.Sprintf("wrong random-variable form %q\n", err.Error()))) - return - } - p.DelayTx(lat, rv) - w.Write([]byte(fmt.Sprintf("added send latency %v±%v (current latency %v)\n", lat, rv, p.LatencyTx()))) - case http.MethodDelete: - lat := p.LatencyTx() - p.UndelayTx() - w.Write([]byte(fmt.Sprintf("removed latency %v\n", lat))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - mux.HandleFunc("/delay-rx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodGet: - w.Write([]byte(fmt.Sprintf("current receive latency %v\n", p.LatencyRx()))) - case http.MethodPut, http.MethodPost: - if err := req.ParseForm(); err != nil { - w.Write([]byte(fmt.Sprintf("wrong form %q\n", err.Error()))) - return - } - lat, err := time.ParseDuration(req.PostForm.Get("latency")) - if err != nil { - w.Write([]byte(fmt.Sprintf("wrong latency form %q\n", err.Error()))) - return - } - rv, err := time.ParseDuration(req.PostForm.Get("random-variable")) - if err != nil { - w.Write([]byte(fmt.Sprintf("wrong random-variable form %q\n", err.Error()))) - return - } - p.DelayRx(lat, rv) - w.Write([]byte(fmt.Sprintf("added receive latency %v±%v (current latency %v)\n", lat, rv, p.LatencyRx()))) - case http.MethodDelete: - lat := p.LatencyRx() - p.UndelayRx() - w.Write([]byte(fmt.Sprintf("removed latency %v\n", lat))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - mux.HandleFunc("/pause-tx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodPut, http.MethodPost: - p.PauseTx() - w.Write([]byte(fmt.Sprintf("paused forwarding [%s -> %s]\n", p.From(), p.To()))) - case http.MethodDelete: - p.UnpauseTx() - w.Write([]byte(fmt.Sprintf("unpaused forwarding [%s -> %s]\n", p.From(), p.To()))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - mux.HandleFunc("/pause-rx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodPut, http.MethodPost: - p.PauseRx() - w.Write([]byte(fmt.Sprintf("paused forwarding [%s <- %s]\n", p.From(), p.To()))) - case http.MethodDelete: - p.UnpauseRx() - w.Write([]byte(fmt.Sprintf("unpaused forwarding [%s <- %s]\n", p.From(), p.To()))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - mux.HandleFunc("/blackhole-tx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodPut, http.MethodPost: - p.BlackholeTx() - w.Write([]byte(fmt.Sprintf("blackholed; dropping packets [%s -> %s]\n", p.From(), p.To()))) - case http.MethodDelete: - p.UnblackholeTx() - w.Write([]byte(fmt.Sprintf("unblackholed; restart forwarding [%s -> %s]\n", p.From(), p.To()))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - mux.HandleFunc("/blackhole-rx", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodPut, http.MethodPost: - p.BlackholeRx() - w.Write([]byte(fmt.Sprintf("blackholed; dropping packets [%s <- %s]\n", p.From(), p.To()))) - case http.MethodDelete: - p.UnblackholeRx() - w.Write([]byte(fmt.Sprintf("unblackholed; restart forwarding [%s <- %s]\n", p.From(), p.To()))) - default: - w.Write([]byte(fmt.Sprintf("unsupported method %q\n", req.Method))) - } - }) - srv := &http.Server{ - Addr: fmt.Sprintf(":%d", httpPort), - Handler: mux, - ErrorLog: log.New(io.Discard, "net/http", 0), - } - defer srv.Close() - - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGTERM) - defer signal.Stop(sig) - - go func() { - s := <-sig - fmt.Printf("\n\nreceived signal %q, shutting down HTTP server\n\n", s) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - err := srv.Shutdown(ctx) - cancel() - fmt.Printf("gracefully stopped HTTP server with %v\n\n", err) - os.Exit(0) - }() - - fmt.Printf("\nserving HTTP server http://localhost:%d\n\n", httpPort) - err := srv.ListenAndServe() - fmt.Printf("HTTP server exit with error %v\n", err) -} diff --git a/tests/functional/cmd/etcd-runner/main.go b/tests/functional/cmd/etcd-runner/main.go deleted file mode 100644 index 3afe40e1f22..00000000000 --- a/tests/functional/cmd/etcd-runner/main.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// etcd-runner is a program for testing etcd clientv3 features -// against a fault injected cluster. -package main - -import "go.etcd.io/etcd/tests/v3/functional/runner" - -func main() { - runner.Start() -} diff --git a/tests/functional/cmd/etcd-tester/etcd_tester_test.go b/tests/functional/cmd/etcd-tester/etcd_tester_test.go deleted file mode 100644 index e545c57362b..00000000000 --- a/tests/functional/cmd/etcd-tester/etcd_tester_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// etcd-tester is a program that runs functional-tester client. -package main - -import ( - "flag" - "testing" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/functional/tester" -) - -var config = flag.String("config", "../../functional.yaml", "path to tester configuration") - -func TestFunctional(t *testing.T) { - testutil.SkipTestIfShortMode(t, "functional tests are skipped in --short mode") - - lg := zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named("tester") - - clus, err := tester.NewCluster(lg, *config) - if err != nil { - t.Fatalf("failed to create a cluster: %v", err) - } - - if err = clus.Send_INITIAL_START_ETCD(); err != nil { - t.Fatal("Bootstrap failed", zap.Error(err)) - } - - t.Log("wait health after bootstrap") - if err = clus.WaitHealth(); err != nil { - t.Fatal("WaitHealth failed", zap.Error(err)) - } - - if err := clus.Run(t); err == nil { - // Only stop etcd and cleanup data when test is successful. - clus.Send_SIGQUIT_ETCD_AND_REMOVE_DATA() - } -} diff --git a/tests/functional/functional.yaml b/tests/functional/functional.yaml deleted file mode 100644 index 30917bc42e7..00000000000 --- a/tests/functional/functional.yaml +++ /dev/null @@ -1,259 +0,0 @@ -agent-configs: -- etcd-exec: ./bin/etcd - agent-addr: 127.0.0.1:19027 - failpoint-http-addr: http://127.0.0.1:7381 - base-dir: /tmp/etcd-functional-1 - etcd-client-proxy: false - etcd-peer-proxy: true - etcd-client-endpoint: 127.0.0.1:1379 - etcd: - name: s1 - data-dir: /tmp/etcd-functional-1/etcd.data - wal-dir: /tmp/etcd-functional-1/etcd.data/member/wal - heartbeat-interval: 100 - election-timeout: 1000 - listen-client-urls: ["https://127.0.0.1:1379"] - advertise-client-urls: ["https://127.0.0.1:1379"] - auto-tls: true - client-cert-auth: false - cert-file: "" - key-file: "" - trusted-ca-file: "" - listen-peer-urls: ["https://127.0.0.1:1380"] - initial-advertise-peer-urls: ["https://127.0.0.1:1381"] - peer-auto-tls: true - peer-client-cert-auth: false - peer-cert-file: "" - peer-key-file: "" - peer-trusted-ca-file: "" - initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381 - initial-cluster-state: new - initial-cluster-token: tkn - snapshot-count: 2000 - quota-backend-bytes: 10740000000 # 10 GiB - pre-vote: true - initial-corrupt-check: true - logger: zap - log-outputs: [/tmp/etcd-functional-1/etcd.log] - log-level: info - socket-reuse-address: true - socket-reuse-port: true - client-cert-data: "" - client-cert-path: "" - client-key-data: "" - client-key-path: "" - client-trusted-ca-data: "" - client-trusted-ca-path: "" - peer-cert-data: "" - peer-cert-path: "" - peer-key-data: "" - peer-key-path: "" - peer-trusted-ca-data: "" - peer-trusted-ca-path: "" - snapshot-path: /tmp/etcd-functional-1.snapshot.db - -- etcd-exec: ./bin/etcd - agent-addr: 127.0.0.1:29027 - failpoint-http-addr: http://127.0.0.1:7382 - base-dir: /tmp/etcd-functional-2 - etcd-client-proxy: false - etcd-peer-proxy: true - etcd-client-endpoint: 127.0.0.1:2379 - etcd: - name: s2 - data-dir: /tmp/etcd-functional-2/etcd.data - wal-dir: /tmp/etcd-functional-2/etcd.data/member/wal - heartbeat-interval: 100 - election-timeout: 1000 - listen-client-urls: ["https://127.0.0.1:2379"] - advertise-client-urls: ["https://127.0.0.1:2379"] - auto-tls: true - client-cert-auth: false - cert-file: "" - key-file: "" - trusted-ca-file: "" - listen-peer-urls: ["https://127.0.0.1:2380"] - initial-advertise-peer-urls: ["https://127.0.0.1:2381"] - peer-auto-tls: true - peer-client-cert-auth: false - peer-cert-file: "" - peer-key-file: "" - peer-trusted-ca-file: "" - initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381 - initial-cluster-state: new - initial-cluster-token: tkn - snapshot-count: 2000 - quota-backend-bytes: 10740000000 # 10 GiB - pre-vote: true - initial-corrupt-check: true - logger: zap - log-outputs: [/tmp/etcd-functional-2/etcd.log] - log-level: info - socket-reuse-address: true - socket-reuse-port: true - client-cert-data: "" - client-cert-path: "" - client-key-data: "" - client-key-path: "" - client-trusted-ca-data: "" - client-trusted-ca-path: "" - peer-cert-data: "" - peer-cert-path: "" - peer-key-data: "" - peer-key-path: "" - peer-trusted-ca-data: "" - peer-trusted-ca-path: "" - snapshot-path: /tmp/etcd-functional-2.snapshot.db - -- etcd-exec: ./bin/etcd - agent-addr: 127.0.0.1:39027 - failpoint-http-addr: http://127.0.0.1:7383 - base-dir: /tmp/etcd-functional-3 - etcd-client-proxy: false - etcd-peer-proxy: true - etcd-client-endpoint: 127.0.0.1:3379 - etcd: - name: s3 - data-dir: /tmp/etcd-functional-3/etcd.data - wal-dir: /tmp/etcd-functional-3/etcd.data/member/wal - heartbeat-interval: 100 - election-timeout: 1000 - listen-client-urls: ["https://127.0.0.1:3379"] - advertise-client-urls: ["https://127.0.0.1:3379"] - auto-tls: true - client-cert-auth: false - cert-file: "" - key-file: "" - trusted-ca-file: "" - listen-peer-urls: ["https://127.0.0.1:3380"] - initial-advertise-peer-urls: ["https://127.0.0.1:3381"] - peer-auto-tls: true - peer-client-cert-auth: false - peer-cert-file: "" - peer-key-file: "" - peer-trusted-ca-file: "" - initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381 - initial-cluster-state: new - initial-cluster-token: tkn - snapshot-count: 2000 - quota-backend-bytes: 10740000000 # 10 GiB - pre-vote: true - initial-corrupt-check: true - logger: zap - log-outputs: [/tmp/etcd-functional-3/etcd.log] - log-level: info - socket-reuse-address: true - socket-reuse-port: true - client-cert-data: "" - client-cert-path: "" - client-key-data: "" - client-key-path: "" - client-trusted-ca-data: "" - client-trusted-ca-path: "" - peer-cert-data: "" - peer-cert-path: "" - peer-key-data: "" - peer-key-path: "" - peer-trusted-ca-data: "" - peer-trusted-ca-path: "" - snapshot-path: /tmp/etcd-functional-3.snapshot.db - -tester-config: - data-dir: /tmp/etcd-tester-data - network: tcp - addr: 127.0.0.1:9028 - - # slow enough to trigger election - delay-latency-ms: 5000 - delay-latency-ms-rv: 500 - - round-limit: 1 - exit-on-failure: true - enable-pprof: true - - case-delay-ms: 7000 - case-shuffle: true - - # For full descriptions, - # https://pkg.go.dev/go.etcd.io/etcd/tests/v3/functional/rpcpb#Case - cases: - - SIGTERM_ONE_FOLLOWER - - SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - - SIGTERM_LEADER - - SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT - - SIGTERM_QUORUM - - SIGTERM_ALL - - SIGQUIT_AND_REMOVE_ONE_FOLLOWER - - SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - - BLACKHOLE_PEER_PORT_TX_RX_LEADER - - BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT - - BLACKHOLE_PEER_PORT_TX_RX_QUORUM - - BLACKHOLE_PEER_PORT_TX_RX_ALL - - DELAY_PEER_PORT_TX_RX_LEADER - - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER - - DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT - - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT - - DELAY_PEER_PORT_TX_RX_QUORUM - - RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM - - DELAY_PEER_PORT_TX_RX_ALL - - RANDOM_DELAY_PEER_PORT_TX_RX_ALL - - NO_FAIL_WITH_STRESS - - NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS - # - FAILPOINTS_WITH_DISK_IO_LATENCY - - # TODO: use iptables for discarding outbound rafthttp traffic to peer port - # - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER - # - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - # - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER - # - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER - # - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - # - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - # - SIGQUIT_AND_REMOVE_LEADER - # - SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT - # - SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH - - failpoint-commands: - - panic("etcd-tester") - # - panic("etcd-tester"),1*sleep(1000) - # - sleep(3000) - - runner-exec-path: ./bin/etcd-runner - external-exec-path: "" - - # make up ±70% of workloads with writes - stressers: - - type: KV_WRITE_SMALL - weight: 0.35 - - type: KV_WRITE_LARGE - weight: 0.002 - - type: KV_READ_ONE_KEY - weight: 0.07 - - type: KV_READ_RANGE - weight: 0.07 - - type: KV_DELETE_ONE_KEY - weight: 0.07 - - type: KV_DELETE_RANGE - weight: 0.07 - - type: KV_TXN_WRITE_DELETE - weight: 0.35 - - type: LEASE - weight: 0.0 - - # - ELECTION_RUNNER - # - WATCH_RUNNER - # - LOCK_RACER_RUNNER - # - LEASE_RUNNER - - checkers: - - KV_HASH - - LEASE_EXPIRE - #- SHORT_TTL_LEASE_EXPIRE - - stress-key-size: 100 - stress-key-size-large: 32769 - stress-key-suffix-range: 250000 - stress-key-suffix-range-txn: 100 - stress-key-txn-ops: 10 - - stress-clients: 100 - stress-qps: 2000 diff --git a/tests/functional/rpcpb/etcd_config.go b/tests/functional/rpcpb/etcd_config.go deleted file mode 100644 index e1752ede3ac..00000000000 --- a/tests/functional/rpcpb/etcd_config.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcpb - -import ( - "fmt" - "reflect" - "strings" -) - -var etcdFields = []string{ - "Name", - "DataDir", - "WALDir", - - "HeartbeatIntervalMs", - "ElectionTimeoutMs", - - "ListenClientURLs", - "AdvertiseClientURLs", - "ClientAutoTLS", - "ClientCertAuth", - "ClientCertFile", - "ClientKeyFile", - "ClientTrustedCAFile", - - "ListenPeerURLs", - "AdvertisePeerURLs", - "PeerAutoTLS", - "PeerClientCertAuth", - "PeerCertFile", - "PeerKeyFile", - "PeerTrustedCAFile", - - "InitialCluster", - "InitialClusterState", - "InitialClusterToken", - - "SnapshotCount", - "QuotaBackendBytes", - - "PreVote", - "InitialCorruptCheck", - - "Logger", - "LogOutputs", - "LogLevel", - - "SocketReuseAddress", - "SocketReusePort", -} - -// Flags returns etcd flags in string slice. -func (e *Etcd) Flags() (fs []string) { - tp := reflect.TypeOf(*e) - vo := reflect.ValueOf(*e) - for _, name := range etcdFields { - field, ok := tp.FieldByName(name) - if !ok { - panic(fmt.Errorf("field %q not found", name)) - } - fv := reflect.Indirect(vo).FieldByName(name) - var sv string - switch fv.Type().Kind() { - case reflect.String: - sv = fv.String() - case reflect.Slice: - n := fv.Len() - sl := make([]string, n) - for i := 0; i < n; i++ { - sl[i] = fv.Index(i).String() - } - sv = strings.Join(sl, ",") - case reflect.Int64: - sv = fmt.Sprintf("%d", fv.Int()) - case reflect.Bool: - sv = fmt.Sprintf("%v", fv.Bool()) - default: - panic(fmt.Errorf("field %q (%v) cannot be parsed", name, fv.Type().Kind())) - } - - fname := field.Tag.Get("yaml") - - // TODO: remove this - if fname == "initial-corrupt-check" { - fname = "experimental-" + fname - } - - if sv != "" { - fs = append(fs, fmt.Sprintf("--%s=%s", fname, sv)) - } - } - return fs -} diff --git a/tests/functional/rpcpb/etcd_config_test.go b/tests/functional/rpcpb/etcd_config_test.go deleted file mode 100644 index d88044562d9..00000000000 --- a/tests/functional/rpcpb/etcd_config_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcpb - -import ( - "reflect" - "testing" -) - -func TestEtcd(t *testing.T) { - e := &Etcd{ - Name: "s1", - DataDir: "/tmp/etcd-functionl-1/etcd.data", - WALDir: "/tmp/etcd-functionl-1/etcd.data/member/wal", - - HeartbeatIntervalMs: 100, - ElectionTimeoutMs: 1000, - - ListenClientURLs: []string{"https://127.0.0.1:1379"}, - AdvertiseClientURLs: []string{"https://127.0.0.1:13790"}, - ClientAutoTLS: true, - ClientCertAuth: false, - ClientCertFile: "", - ClientKeyFile: "", - ClientTrustedCAFile: "", - - ListenPeerURLs: []string{"https://127.0.0.1:1380"}, - AdvertisePeerURLs: []string{"https://127.0.0.1:13800"}, - PeerAutoTLS: true, - PeerClientCertAuth: false, - PeerCertFile: "", - PeerKeyFile: "", - PeerTrustedCAFile: "", - - InitialCluster: "s1=https://127.0.0.1:13800,s2=https://127.0.0.1:23800,s3=https://127.0.0.1:33800", - InitialClusterState: "new", - InitialClusterToken: "tkn", - - SnapshotCount: 10000, - QuotaBackendBytes: 10740000000, - - PreVote: true, - InitialCorruptCheck: true, - - Logger: "zap", - LogOutputs: []string{"/tmp/etcd-functional-1/etcd.log"}, - LogLevel: "info", - SocketReuseAddress: true, - SocketReusePort: true, - } - - exps := []string{ - "--name=s1", - "--data-dir=/tmp/etcd-functionl-1/etcd.data", - "--wal-dir=/tmp/etcd-functionl-1/etcd.data/member/wal", - "--heartbeat-interval=100", - "--election-timeout=1000", - "--listen-client-urls=https://127.0.0.1:1379", - "--advertise-client-urls=https://127.0.0.1:13790", - "--auto-tls=true", - "--client-cert-auth=false", - "--listen-peer-urls=https://127.0.0.1:1380", - "--initial-advertise-peer-urls=https://127.0.0.1:13800", - "--peer-auto-tls=true", - "--peer-client-cert-auth=false", - "--initial-cluster=s1=https://127.0.0.1:13800,s2=https://127.0.0.1:23800,s3=https://127.0.0.1:33800", - "--initial-cluster-state=new", - "--initial-cluster-token=tkn", - "--snapshot-count=10000", - "--quota-backend-bytes=10740000000", - "--pre-vote=true", - "--experimental-initial-corrupt-check=true", - "--logger=zap", - "--log-outputs=/tmp/etcd-functional-1/etcd.log", - "--log-level=info", - "--socket-reuse-address=true", - "--socket-reuse-port=true", - } - fs := e.Flags() - if !reflect.DeepEqual(exps, fs) { - t.Fatalf("expected %q, got %q", exps, fs) - } -} diff --git a/tests/functional/rpcpb/member.go b/tests/functional/rpcpb/member.go deleted file mode 100644 index 1d9f2905786..00000000000 --- a/tests/functional/rpcpb/member.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcpb - -import ( - "context" - "crypto/tls" - "fmt" - "net/url" - "os" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/etcdutl/v3/snapshot" - - "github.com/dustin/go-humanize" - "go.uber.org/zap" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -// ElectionTimeout returns an election timeout duration. -func (m *Member) ElectionTimeout() time.Duration { - return time.Duration(m.Etcd.ElectionTimeoutMs) * time.Millisecond -} - -// DialEtcdGRPCServer creates a raw gRPC connection to an etcd member. -func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, error) { - dialOpts := []grpc.DialOption{ - grpc.WithTimeout(5 * time.Second), - grpc.WithBlock(), - } - - secure := false - for _, cu := range m.Etcd.AdvertiseClientURLs { - u, err := url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme == "https" { // TODO: handle unix - secure = true - } - } - - if secure { - // assume save TLS assets are already stord on disk - tlsInfo := transport.TLSInfo{ - CertFile: m.ClientCertPath, - KeyFile: m.ClientKeyPath, - TrustedCAFile: m.ClientTrustedCAPath, - - // TODO: remove this with generated certs - // only need it for auto TLS - InsecureSkipVerify: true, - } - tlsConfig, err := tlsInfo.ClientConfig() - if err != nil { - return nil, err - } - creds := credentials.NewTLS(tlsConfig) - dialOpts = append(dialOpts, grpc.WithTransportCredentials(creds)) - } else { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } - dialOpts = append(dialOpts, opts...) - return grpc.Dial(m.EtcdClientEndpoint, dialOpts...) -} - -// CreateEtcdClientConfig creates a client configuration from member. -func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error) { - secure := false - for _, cu := range m.Etcd.AdvertiseClientURLs { - var u *url.URL - u, err = url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme == "https" { // TODO: handle unix - secure = true - } - } - - // TODO: make this configurable - level := "error" - if os.Getenv("ETCD_CLIENT_DEBUG") != "" { - level = "debug" - } - lcfg := logutil.DefaultZapLoggerConfig - lcfg.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(level)) - - cfg = &clientv3.Config{ - Endpoints: []string{m.EtcdClientEndpoint}, - DialTimeout: 10 * time.Second, - DialOptions: opts, - LogConfig: &lcfg, - } - if secure { - // assume save TLS assets are already stord on disk - tlsInfo := transport.TLSInfo{ - CertFile: m.ClientCertPath, - KeyFile: m.ClientKeyPath, - TrustedCAFile: m.ClientTrustedCAPath, - - // TODO: remove this with generated certs - // only need it for auto TLS - InsecureSkipVerify: true, - } - var tlsConfig *tls.Config - tlsConfig, err = tlsInfo.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = tlsConfig - } - return cfg, err -} - -// CreateEtcdClient creates a client from member. -func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) { - cfg, err := m.CreateEtcdClientConfig(opts...) - if err != nil { - return nil, err - } - return clientv3.New(*cfg) -} - -// CheckCompact ensures that historical data before given revision has been compacted. -func (m *Member) CheckCompact(rev int64) error { - cli, err := m.CreateEtcdClient() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1)) - wr, ok := <-wch - cancel() - - if !ok { - return fmt.Errorf("watch channel terminated (endpoint %q)", m.EtcdClientEndpoint) - } - if wr.CompactRevision != rev { - return fmt.Errorf("got compact revision %v, wanted %v (endpoint %q)", wr.CompactRevision, rev, m.EtcdClientEndpoint) - } - - return nil -} - -// Defrag runs defragmentation on this member. -func (m *Member) Defrag() error { - cli, err := m.CreateEtcdClient() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - _, err = cli.Defragment(ctx, m.EtcdClientEndpoint) - cancel() - return err -} - -// RevHash fetches current revision and hash on this member. -func (m *Member) RevHash() (int64, int64, error) { - conn, err := m.DialEtcdGRPCServer() - if err != nil { - return 0, 0, err - } - defer conn.Close() - - mt := pb.NewMaintenanceClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - resp, err := mt.Hash(ctx, &pb.HashRequest{}, grpc.WaitForReady(true)) - cancel() - - if err != nil { - return 0, 0, err - } - - return resp.Header.Revision, int64(resp.Hash), nil -} - -// Rev fetches current revision on this member. -func (m *Member) Rev(ctx context.Context) (int64, error) { - cli, err := m.CreateEtcdClient() - if err != nil { - return 0, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - resp, err := cli.Status(ctx, m.EtcdClientEndpoint) - if err != nil { - return 0, err - } - return resp.Header.Revision, nil -} - -// Compact compacts member storage with given revision. -// It blocks until it's physically done. -func (m *Member) Compact(rev int64, timeout time.Duration) error { - cli, err := m.CreateEtcdClient() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - _, err = cli.Compact(ctx, rev, clientv3.WithCompactPhysical()) - cancel() - return err -} - -// IsLeader returns true if this member is the current cluster leader. -func (m *Member) IsLeader() (bool, error) { - cli, err := m.CreateEtcdClient() - if err != nil { - return false, fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - resp, err := cli.Status(context.Background(), m.EtcdClientEndpoint) - if err != nil { - return false, err - } - return resp.Header.MemberId == resp.Leader, nil -} - -// WriteHealthKey writes a health key to this member. -func (m *Member) WriteHealthKey() error { - cli, err := m.CreateEtcdClient() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - defer cli.Close() - - // give enough time-out in case expensive requests (range/delete) are pending - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - _, err = cli.Put(ctx, "health", "good") - cancel() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - return nil -} - -// SaveSnapshot downloads a snapshot file from this member, locally. -// It's meant to requested remotely, so that local member can store -// snapshot file on local disk. -func (m *Member) SaveSnapshot(lg *zap.Logger) (err error) { - // remove existing snapshot first - if err = os.RemoveAll(m.SnapshotPath); err != nil { - return err - } - - var ccfg *clientv3.Config - ccfg, err = m.CreateEtcdClientConfig() - if err != nil { - return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) - } - - lg.Info( - "snapshot save START", - zap.String("member-name", m.Etcd.Name), - zap.Strings("member-client-urls", m.Etcd.AdvertiseClientURLs), - zap.String("snapshot-path", m.SnapshotPath), - ) - now := time.Now() - mgr := snapshot.NewV3(lg) - version, err := mgr.Save(context.Background(), *ccfg, m.SnapshotPath) - if err != nil { - return err - } - took := time.Since(now) - - var fi os.FileInfo - fi, err = os.Stat(m.SnapshotPath) - if err != nil { - return err - } - var st snapshot.Status - st, err = mgr.Status(m.SnapshotPath) - if err != nil { - return err - } - m.SnapshotInfo = &SnapshotInfo{ - MemberName: m.Etcd.Name, - MemberClientURLs: m.Etcd.AdvertiseClientURLs, - SnapshotPath: m.SnapshotPath, - SnapshotFileSize: humanize.Bytes(uint64(fi.Size())), - SnapshotTotalSize: humanize.Bytes(uint64(st.TotalSize)), - SnapshotTotalKey: int64(st.TotalKey), - SnapshotHash: int64(st.Hash), - SnapshotRevision: st.Revision, - Took: fmt.Sprintf("%v", took), - Version: version, - } - lg.Info( - "snapshot save END", - zap.String("member-name", m.SnapshotInfo.MemberName), - zap.String("member-version", m.SnapshotInfo.Version), - zap.Strings("member-client-urls", m.SnapshotInfo.MemberClientURLs), - zap.String("snapshot-path", m.SnapshotPath), - zap.String("snapshot-file-size", m.SnapshotInfo.SnapshotFileSize), - zap.String("snapshot-total-size", m.SnapshotInfo.SnapshotTotalSize), - zap.Int64("snapshot-total-key", m.SnapshotInfo.SnapshotTotalKey), - zap.Int64("snapshot-hash", m.SnapshotInfo.SnapshotHash), - zap.Int64("snapshot-revision", m.SnapshotInfo.SnapshotRevision), - zap.String("took", m.SnapshotInfo.Took), - ) - return nil -} - -// RestoreSnapshot restores a cluster from a given snapshot file on disk. -// It's meant to requested remotely, so that local member can load the -// snapshot file from local disk. -func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error) { - if err = os.RemoveAll(m.EtcdOnSnapshotRestore.DataDir); err != nil { - return err - } - if err = os.RemoveAll(m.EtcdOnSnapshotRestore.WALDir); err != nil { - return err - } - - lg.Info( - "snapshot restore START", - zap.String("member-name", m.Etcd.Name), - zap.Strings("member-client-urls", m.Etcd.AdvertiseClientURLs), - zap.String("snapshot-path", m.SnapshotPath), - ) - now := time.Now() - mgr := snapshot.NewV3(lg) - err = mgr.Restore(snapshot.RestoreConfig{ - SnapshotPath: m.SnapshotInfo.SnapshotPath, - Name: m.EtcdOnSnapshotRestore.Name, - OutputDataDir: m.EtcdOnSnapshotRestore.DataDir, - OutputWALDir: m.EtcdOnSnapshotRestore.WALDir, - PeerURLs: m.EtcdOnSnapshotRestore.AdvertisePeerURLs, - InitialCluster: m.EtcdOnSnapshotRestore.InitialCluster, - InitialClusterToken: m.EtcdOnSnapshotRestore.InitialClusterToken, - SkipHashCheck: false, - // TODO: set SkipHashCheck it true, to recover from existing db file - }) - took := time.Since(now) - lg.Info( - "snapshot restore END", - zap.String("member-name", m.SnapshotInfo.MemberName), - zap.String("member-version", m.SnapshotInfo.Version), - zap.Strings("member-client-urls", m.SnapshotInfo.MemberClientURLs), - zap.String("snapshot-path", m.SnapshotPath), - zap.String("snapshot-file-size", m.SnapshotInfo.SnapshotFileSize), - zap.String("snapshot-total-size", m.SnapshotInfo.SnapshotTotalSize), - zap.Int64("snapshot-total-key", m.SnapshotInfo.SnapshotTotalKey), - zap.Int64("snapshot-hash", m.SnapshotInfo.SnapshotHash), - zap.Int64("snapshot-revision", m.SnapshotInfo.SnapshotRevision), - zap.String("took", took.String()), - zap.Error(err), - ) - return err -} diff --git a/tests/functional/rpcpb/rpc.pb.go b/tests/functional/rpcpb/rpc.pb.go deleted file mode 100644 index 376ee76864c..00000000000 --- a/tests/functional/rpcpb/rpc.pb.go +++ /dev/null @@ -1,6058 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rpcpb/rpc.proto - -package rpcpb - -import ( - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type StresserType int32 - -const ( - StresserType_KV_WRITE_SMALL StresserType = 0 - StresserType_KV_WRITE_LARGE StresserType = 1 - StresserType_KV_READ_ONE_KEY StresserType = 2 - StresserType_KV_READ_RANGE StresserType = 3 - StresserType_KV_DELETE_ONE_KEY StresserType = 4 - StresserType_KV_DELETE_RANGE StresserType = 5 - StresserType_KV_TXN_WRITE_DELETE StresserType = 6 - StresserType_LEASE StresserType = 10 - StresserType_ELECTION_RUNNER StresserType = 20 - StresserType_WATCH_RUNNER StresserType = 31 - StresserType_LOCK_RACER_RUNNER StresserType = 41 - StresserType_LEASE_RUNNER StresserType = 51 -) - -var StresserType_name = map[int32]string{ - 0: "KV_WRITE_SMALL", - 1: "KV_WRITE_LARGE", - 2: "KV_READ_ONE_KEY", - 3: "KV_READ_RANGE", - 4: "KV_DELETE_ONE_KEY", - 5: "KV_DELETE_RANGE", - 6: "KV_TXN_WRITE_DELETE", - 10: "LEASE", - 20: "ELECTION_RUNNER", - 31: "WATCH_RUNNER", - 41: "LOCK_RACER_RUNNER", - 51: "LEASE_RUNNER", -} - -var StresserType_value = map[string]int32{ - "KV_WRITE_SMALL": 0, - "KV_WRITE_LARGE": 1, - "KV_READ_ONE_KEY": 2, - "KV_READ_RANGE": 3, - "KV_DELETE_ONE_KEY": 4, - "KV_DELETE_RANGE": 5, - "KV_TXN_WRITE_DELETE": 6, - "LEASE": 10, - "ELECTION_RUNNER": 20, - "WATCH_RUNNER": 31, - "LOCK_RACER_RUNNER": 41, - "LEASE_RUNNER": 51, -} - -func (x StresserType) String() string { - return proto.EnumName(StresserType_name, int32(x)) -} - -func (StresserType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{0} -} - -type Checker int32 - -const ( - Checker_KV_HASH Checker = 0 - Checker_LEASE_EXPIRE Checker = 1 - Checker_RUNNER Checker = 2 - Checker_NO_CHECK Checker = 3 - Checker_SHORT_TTL_LEASE_EXPIRE Checker = 4 -) - -var Checker_name = map[int32]string{ - 0: "KV_HASH", - 1: "LEASE_EXPIRE", - 2: "RUNNER", - 3: "NO_CHECK", - 4: "SHORT_TTL_LEASE_EXPIRE", -} - -var Checker_value = map[string]int32{ - "KV_HASH": 0, - "LEASE_EXPIRE": 1, - "RUNNER": 2, - "NO_CHECK": 3, - "SHORT_TTL_LEASE_EXPIRE": 4, -} - -func (x Checker) String() string { - return proto.EnumName(Checker_name, int32(x)) -} - -func (Checker) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{1} -} - -type Operation int32 - -const ( - // NOT_STARTED is the agent status before etcd first start. - Operation_NOT_STARTED Operation = 0 - // INITIAL_START_ETCD is only called to start etcd, the very first time. - Operation_INITIAL_START_ETCD Operation = 10 - // RESTART_ETCD is sent to restart killed etcd. - Operation_RESTART_ETCD Operation = 11 - // SIGTERM_ETCD pauses etcd process while keeping data directories - // and previous etcd configurations. - Operation_SIGTERM_ETCD Operation = 20 - // SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data - // directories to simulate destroying the whole machine. - Operation_SIGQUIT_ETCD_AND_REMOVE_DATA Operation = 21 - // SAVE_SNAPSHOT is sent to trigger local member to download its snapshot - // onto its local disk with the specified path from tester. - Operation_SAVE_SNAPSHOT Operation = 30 - // RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to - // restore a cluster from existing snapshot from disk, and restart - // an etcd instance from recovered data. - Operation_RESTORE_RESTART_FROM_SNAPSHOT Operation = 31 - // RESTART_FROM_SNAPSHOT is sent to trigger local member to restart - // and join an existing cluster that has been recovered from a snapshot. - // Local member joins this cluster with fresh data. - Operation_RESTART_FROM_SNAPSHOT Operation = 32 - // SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed, - // thus need to archive etcd data directories. - Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA Operation = 40 - // BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to - // the peer port on target member's peer port. - Operation_BLACKHOLE_PEER_PORT_TX_RX Operation = 100 - // UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping. - Operation_UNBLACKHOLE_PEER_PORT_TX_RX Operation = 101 - // DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to - // the peer port on target member's peer port. - Operation_DELAY_PEER_PORT_TX_RX Operation = 200 - // UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays. - Operation_UNDELAY_PEER_PORT_TX_RX Operation = 201 -) - -var Operation_name = map[int32]string{ - 0: "NOT_STARTED", - 10: "INITIAL_START_ETCD", - 11: "RESTART_ETCD", - 20: "SIGTERM_ETCD", - 21: "SIGQUIT_ETCD_AND_REMOVE_DATA", - 30: "SAVE_SNAPSHOT", - 31: "RESTORE_RESTART_FROM_SNAPSHOT", - 32: "RESTART_FROM_SNAPSHOT", - 40: "SIGQUIT_ETCD_AND_ARCHIVE_DATA", - 100: "BLACKHOLE_PEER_PORT_TX_RX", - 101: "UNBLACKHOLE_PEER_PORT_TX_RX", - 200: "DELAY_PEER_PORT_TX_RX", - 201: "UNDELAY_PEER_PORT_TX_RX", -} - -var Operation_value = map[string]int32{ - "NOT_STARTED": 0, - "INITIAL_START_ETCD": 10, - "RESTART_ETCD": 11, - "SIGTERM_ETCD": 20, - "SIGQUIT_ETCD_AND_REMOVE_DATA": 21, - "SAVE_SNAPSHOT": 30, - "RESTORE_RESTART_FROM_SNAPSHOT": 31, - "RESTART_FROM_SNAPSHOT": 32, - "SIGQUIT_ETCD_AND_ARCHIVE_DATA": 40, - "BLACKHOLE_PEER_PORT_TX_RX": 100, - "UNBLACKHOLE_PEER_PORT_TX_RX": 101, - "DELAY_PEER_PORT_TX_RX": 200, - "UNDELAY_PEER_PORT_TX_RX": 201, -} - -func (x Operation) String() string { - return proto.EnumName(Operation_name, int32(x)) -} - -func (Operation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{2} -} - -// Case defines various system faults or test case in distributed systems, -// in order to verify correct behavior of etcd servers and clients. -type Case int32 - -const ( - // SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader) - // but does not delete its data directories on disk for next restart. - // It waits "delay-ms" before recovering this failure. - // The expected behavior is that the follower comes back online - // and rejoins the cluster, and then each member continues to process - // client requests ('Put' request that requires Raft consensus). - Case_SIGTERM_ONE_FOLLOWER Case = 0 - // SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen - // follower but does not delete its data directories on disk for next - // restart. And waits until most up-to-date node (leader) applies the - // snapshot count of entries since the stop operation. - // The expected behavior is that the follower comes back online and - // rejoins the cluster, and then active leader sends snapshot - // to the follower to force it to follow the leader's log. - // As always, after recovery, each member must be able to process - // client requests. - Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1 - // SIGTERM_LEADER stops the active leader node but does not delete its - // data directories on disk for next restart. Then it waits "delay-ms" - // before recovering this failure, in order to trigger election timeouts. - // The expected behavior is that a new leader gets elected, and the - // old leader comes back online and rejoins the cluster as a follower. - // As always, after recovery, each member must be able to process - // client requests. - Case_SIGTERM_LEADER Case = 2 - // SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node - // but does not delete its data directories on disk for next restart. - // And waits until most up-to-date node ("new" leader) applies the - // snapshot count of entries since the stop operation. - // The expected behavior is that cluster elects a new leader, and the - // old leader comes back online and rejoins the cluster as a follower. - // And it receives the snapshot from the new leader to overwrite its - // store. As always, after recovery, each member must be able to - // process client requests. - Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3 - // SIGTERM_QUORUM stops majority number of nodes to make the whole cluster - // inoperable but does not delete data directories on stopped nodes - // for next restart. And it waits "delay-ms" before recovering failure. - // The expected behavior is that nodes come back online, thus cluster - // comes back operative as well. As always, after recovery, each member - // must be able to process client requests. - Case_SIGTERM_QUORUM Case = 4 - // SIGTERM_ALL stops the whole cluster but does not delete data directories - // on disk for next restart. And it waits "delay-ms" before recovering - // this failure. - // The expected behavior is that nodes come back online, thus cluster - // comes back operative as well. As always, after recovery, each member - // must be able to process client requests. - Case_SIGTERM_ALL Case = 5 - // SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower - // (non-leader), deletes its data directories on disk, and removes - // this member from cluster (membership reconfiguration). On recovery, - // tester adds a new member, and this member joins the existing cluster - // with fresh data. It waits "delay-ms" before recovering this - // failure. This simulates destroying one follower machine, where operator - // needs to add a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and then each member continues to process client requests. - Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10 - // SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly - // chosen follower, deletes its data directories on disk, and removes - // this member from cluster (membership reconfiguration). On recovery, - // tester adds a new member, and this member joins the existing cluster - // restart. On member remove, cluster waits until most up-to-date node - // (leader) applies the snapshot count of entries since the stop operation. - // This simulates destroying a leader machine, where operator needs to add - // a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and receives a snapshot from the active leader. As always, after - // recovery, each member must be able to process client requests. - Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11 - // SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its - // data directories on disk, and removes this member from cluster. - // On recovery, tester adds a new member, and this member joins the - // existing cluster with fresh data. It waits "delay-ms" before - // recovering this failure. This simulates destroying a leader machine, - // where operator needs to add a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and then each member continues to process client requests. - Case_SIGQUIT_AND_REMOVE_LEADER Case = 12 - // SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader, - // deletes its data directories on disk, and removes this member from - // cluster (membership reconfiguration). On recovery, tester adds a new - // member, and this member joins the existing cluster restart. On member - // remove, cluster waits until most up-to-date node (new leader) applies - // the snapshot count of entries since the stop operation. This simulates - // destroying a leader machine, where operator needs to add a new member - // from a fresh machine. - // The expected behavior is that on member remove, cluster elects a new - // leader, and a new member joins the existing cluster and receives a - // snapshot from the newly elected leader. As always, after recovery, each - // member must be able to process client requests. - Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13 - // SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first - // stops majority number of nodes, deletes data directories on those quorum - // nodes, to make the whole cluster inoperable. Now that quorum and their - // data are totally destroyed, cluster cannot even remove unavailable nodes - // (e.g. 2 out of 3 are lost, so no leader can be elected). - // Let's assume 3-node cluster of node A, B, and C. One day, node A and B - // are destroyed and all their data are gone. The only viable solution is - // to recover from C's latest snapshot. - // - // To simulate: - // 1. Assume node C is the current leader with most up-to-date data. - // 2. Download snapshot from node C, before destroying node A and B. - // 3. Destroy node A and B, and make the whole cluster inoperable. - // 4. Now node C cannot operate either. - // 5. SIGTERM node C and remove its data directories. - // 6. Restore a new seed member from node C's latest snapshot file. - // 7. Add another member to establish 2-node cluster. - // 8. Add another member to establish 3-node cluster. - // 9. Add more if any. - // - // The expected behavior is that etcd successfully recovers from such - // disastrous situation as only 1-node survives out of 3-node cluster, - // new members joins the existing cluster, and previous data from snapshot - // are still preserved after recovery process. As always, after recovery, - // each member must be able to process client requests. - Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14 - // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming - // packets from/to the peer port on a randomly chosen follower - // (non-leader), and waits for "delay-ms" until recovery. - // The expected behavior is that once dropping operation is undone, - // each member must be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100 - // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops - // all outgoing/incoming packets from/to the peer port on a randomly - // chosen follower (non-leader), and waits for most up-to-date node - // (leader) applies the snapshot count of entries since the blackhole - // operation. - // The expected behavior is that once packet drop operation is undone, - // the slow follower tries to catch up, possibly receiving the snapshot - // from the active leader. As always, after recovery, each member must - // be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101 - // BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets - // from/to the peer port on the active leader (isolated), and waits for - // "delay-ms" until recovery, in order to trigger election timeout. - // The expected behavior is that after election timeout, a new leader gets - // elected, and once dropping operation is undone, the old leader comes - // back and rejoins the cluster as a follower. As always, after recovery, - // each member must be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102 - // BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all - // outgoing/incoming packets from/to the peer port on the active leader, - // and waits for most up-to-date node (leader) applies the snapshot - // count of entries since the blackhole operation. - // The expected behavior is that cluster elects a new leader, and once - // dropping operation is undone, the old leader comes back and rejoins - // the cluster as a follower. The slow follower tries to catch up, likely - // receiving the snapshot from the new active leader. As always, after - // recovery, each member must be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103 - // BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets - // from/to the peer ports on majority nodes of cluster, thus losing its - // leader and cluster being inoperable. And it waits for "delay-ms" - // until recovery. - // The expected behavior is that once packet drop operation is undone, - // nodes come back online, thus cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104 - // BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets - // from/to the peer ports on all nodes, thus making cluster totally - // inoperable. It waits for "delay-ms" until recovery. - // The expected behavior is that once packet drop operation is undone, - // nodes come back online, thus cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105 - // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets - // from/to the peer port on a randomly chosen follower (non-leader). - // It waits for "delay-ms" until recovery. - // The expected behavior is that once packet delay operation is undone, - // the follower comes back and tries to catch up with latest changes from - // cluster. And as always, after recovery, each member must be able to - // process client requests. - Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200 - // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming - // packets from/to the peer port on a randomly chosen follower - // (non-leader) with a randomized time duration (thus isolated). It - // waits for "delay-ms" until recovery. - // The expected behavior is that once packet delay operation is undone, - // each member must be able to process client requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201 - // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on a randomly chosen - // follower (non-leader), and waits for most up-to-date node (leader) - // applies the snapshot count of entries since the delay operation. - // The expected behavior is that the delayed follower gets isolated - // and behind the current active leader, and once delay operation is undone, - // the slow follower comes back and catches up possibly receiving snapshot - // from the active leader. As always, after recovery, each member must be - // able to process client requests. - Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202 - // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on a randomly chosen - // follower (non-leader) with a randomized time duration, and waits for - // most up-to-date node (leader) applies the snapshot count of entries - // since the delay operation. - // The expected behavior is that the delayed follower gets isolated - // and behind the current active leader, and once delay operation is undone, - // the slow follower comes back and catches up, possibly receiving a - // snapshot from the active leader. As always, after recovery, each member - // must be able to process client requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203 - // DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to - // the peer port on the active leader. And waits for "delay-ms" until - // recovery. - // The expected behavior is that cluster may elect a new leader, and - // once packet delay operation is undone, the (old) leader comes back - // and tries to catch up with latest changes from cluster. As always, - // after recovery, each member must be able to process client requests. - Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204 - // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets - // from/to the peer port on the active leader with a randomized time - // duration. And waits for "delay-ms" until recovery. - // The expected behavior is that cluster may elect a new leader, and - // once packet delay operation is undone, the (old) leader comes back - // and tries to catch up with latest changes from cluster. As always, - // after recovery, each member must be able to process client requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205 - // DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on the active leader, - // and waits for most up-to-date node (current or new leader) applies the - // snapshot count of entries since the delay operation. - // The expected behavior is that cluster may elect a new leader, and - // the old leader gets isolated and behind the current active leader, - // and once delay operation is undone, the slow follower comes back - // and catches up, likely receiving a snapshot from the active leader. - // As always, after recovery, each member must be able to process client - // requests. - Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206 - // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on the active leader, - // with a randomized time duration. And it waits for most up-to-date node - // (current or new leader) applies the snapshot count of entries since the - // delay operation. - // The expected behavior is that cluster may elect a new leader, and - // the old leader gets isolated and behind the current active leader, - // and once delay operation is undone, the slow follower comes back - // and catches up, likely receiving a snapshot from the active leader. - // As always, after recovery, each member must be able to process client - // requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207 - // DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to - // the peer ports on majority nodes of cluster. And it waits for - // "delay-ms" until recovery, likely to trigger election timeouts. - // The expected behavior is that cluster may elect a new leader, while - // quorum of nodes struggle with slow networks, and once delay operation - // is undone, nodes come back and cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208 - // RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets - // from/to the peer ports on majority nodes of cluster, with randomized - // time durations. And it waits for "delay-ms" until recovery, likely - // to trigger election timeouts. - // The expected behavior is that cluster may elect a new leader, while - // quorum of nodes struggle with slow networks, and once delay operation - // is undone, nodes come back and cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209 - // DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the - // peer ports on all nodes. And it waits for "delay-ms" until recovery, - // likely to trigger election timeouts. - // The expected behavior is that cluster may become totally inoperable, - // struggling with slow networks across the whole cluster. Once delay - // operation is undone, nodes come back and cluster comes back operative. - // As always, after recovery, each member must be able to process client - // requests. - Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210 - // RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets - // from/to the peer ports on all nodes, with randomized time durations. - // And it waits for "delay-ms" until recovery, likely to trigger - // election timeouts. - // The expected behavior is that cluster may become totally inoperable, - // struggling with slow networks across the whole cluster. Once delay - // operation is undone, nodes come back and cluster comes back operative. - // As always, after recovery, each member must be able to process client - // requests. - Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211 - // NO_FAIL_WITH_STRESS stops injecting failures while testing the - // consistency and correctness under pressure loads, for the duration of - // "delay-ms". Goal is to ensure cluster be still making progress - // on recovery, and verify system does not deadlock following a sequence - // of failure injections. - // The expected behavior is that cluster remains fully operative in healthy - // condition. As always, after recovery, each member must be able to process - // client requests. - Case_NO_FAIL_WITH_STRESS Case = 300 - // NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor - // sends stressig client requests to the cluster, for the duration of - // "delay-ms". Goal is to ensure cluster be still making progress - // on recovery, and verify system does not deadlock following a sequence - // of failure injections. - // The expected behavior is that cluster remains fully operative in healthy - // condition, and clients requests during liveness period succeed without - // errors. - // Note: this is how Google Chubby does failure injection testing - // https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf. - Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301 - // FAILPOINTS injects failpoints to etcd server runtime, triggering panics - // in critical code paths. - Case_FAILPOINTS Case = 400 - // FAILPOINTS_WITH_DISK_IO_LATENCY injects high disk I/O latency failure in raftAfterSave code paths. - Case_FAILPOINTS_WITH_DISK_IO_LATENCY Case = 401 - // EXTERNAL runs external failure injection scripts. - Case_EXTERNAL Case = 500 -) - -var Case_name = map[int32]string{ - 0: "SIGTERM_ONE_FOLLOWER", - 1: "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - 2: "SIGTERM_LEADER", - 3: "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT", - 4: "SIGTERM_QUORUM", - 5: "SIGTERM_ALL", - 10: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER", - 11: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - 12: "SIGQUIT_AND_REMOVE_LEADER", - 13: "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT", - 14: "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH", - 100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER", - 101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - 102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER", - 103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - 104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM", - 105: "BLACKHOLE_PEER_PORT_TX_RX_ALL", - 200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER", - 201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER", - 202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - 203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - 204: "DELAY_PEER_PORT_TX_RX_LEADER", - 205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER", - 206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - 207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - 208: "DELAY_PEER_PORT_TX_RX_QUORUM", - 209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM", - 210: "DELAY_PEER_PORT_TX_RX_ALL", - 211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL", - 300: "NO_FAIL_WITH_STRESS", - 301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS", - 400: "FAILPOINTS", - 401: "FAILPOINTS_WITH_DISK_IO_LATENCY", - 500: "EXTERNAL", -} - -var Case_value = map[string]int32{ - "SIGTERM_ONE_FOLLOWER": 0, - "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1, - "SIGTERM_LEADER": 2, - "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT": 3, - "SIGTERM_QUORUM": 4, - "SIGTERM_ALL": 5, - "SIGQUIT_AND_REMOVE_ONE_FOLLOWER": 10, - "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 11, - "SIGQUIT_AND_REMOVE_LEADER": 12, - "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT": 13, - "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": 14, - "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 100, - "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 101, - "BLACKHOLE_PEER_PORT_TX_RX_LEADER": 102, - "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 103, - "BLACKHOLE_PEER_PORT_TX_RX_QUORUM": 104, - "BLACKHOLE_PEER_PORT_TX_RX_ALL": 105, - "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 200, - "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 201, - "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 202, - "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 203, - "DELAY_PEER_PORT_TX_RX_LEADER": 204, - "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": 205, - "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 206, - "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 207, - "DELAY_PEER_PORT_TX_RX_QUORUM": 208, - "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": 209, - "DELAY_PEER_PORT_TX_RX_ALL": 210, - "RANDOM_DELAY_PEER_PORT_TX_RX_ALL": 211, - "NO_FAIL_WITH_STRESS": 300, - "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 301, - "FAILPOINTS": 400, - "FAILPOINTS_WITH_DISK_IO_LATENCY": 401, - "EXTERNAL": 500, -} - -func (x Case) String() string { - return proto.EnumName(Case_name, int32(x)) -} - -func (Case) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{3} -} - -type Request struct { - Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"` - // Member contains the same Member object from tester configuration. - Member *Member `protobuf:"bytes,2,opt,name=Member,proto3" json:"Member,omitempty"` - // Tester contains tester configuration. - Tester *Tester `protobuf:"bytes,3,opt,name=Tester,proto3" json:"Tester,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(m, src) -} -func (m *Request) XXX_Size() int { - return m.Size() -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -// SnapshotInfo contains SAVE_SNAPSHOT request results. -type SnapshotInfo struct { - MemberName string `protobuf:"bytes,1,opt,name=MemberName,proto3" json:"MemberName,omitempty"` - MemberClientURLs []string `protobuf:"bytes,2,rep,name=MemberClientURLs,proto3" json:"MemberClientURLs,omitempty"` - SnapshotPath string `protobuf:"bytes,3,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty"` - SnapshotFileSize string `protobuf:"bytes,4,opt,name=SnapshotFileSize,proto3" json:"SnapshotFileSize,omitempty"` - SnapshotTotalSize string `protobuf:"bytes,5,opt,name=SnapshotTotalSize,proto3" json:"SnapshotTotalSize,omitempty"` - SnapshotTotalKey int64 `protobuf:"varint,6,opt,name=SnapshotTotalKey,proto3" json:"SnapshotTotalKey,omitempty"` - SnapshotHash int64 `protobuf:"varint,7,opt,name=SnapshotHash,proto3" json:"SnapshotHash,omitempty"` - SnapshotRevision int64 `protobuf:"varint,8,opt,name=SnapshotRevision,proto3" json:"SnapshotRevision,omitempty"` - Took string `protobuf:"bytes,9,opt,name=Took,proto3" json:"Took,omitempty"` - Version string `protobuf:"bytes,10,opt,name=Version,proto3" json:"Version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotInfo) Reset() { *m = SnapshotInfo{} } -func (m *SnapshotInfo) String() string { return proto.CompactTextString(m) } -func (*SnapshotInfo) ProtoMessage() {} -func (*SnapshotInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{1} -} -func (m *SnapshotInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotInfo.Merge(m, src) -} -func (m *SnapshotInfo) XXX_Size() int { - return m.Size() -} -func (m *SnapshotInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotInfo proto.InternalMessageInfo - -type Response struct { - Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"` - Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` - // Member contains the same Member object from tester request. - Member *Member `protobuf:"bytes,3,opt,name=Member,proto3" json:"Member,omitempty"` - // SnapshotInfo contains SAVE_SNAPSHOT request results. - SnapshotInfo *SnapshotInfo `protobuf:"bytes,4,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{2} -} -func (m *Response) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) -} -func (m *Response) XXX_Size() int { - return m.Size() -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -type Member struct { - // EtcdExec is the executable etcd binary path in agent server. - EtcdExec string `protobuf:"bytes,1,opt,name=EtcdExec,proto3" json:"EtcdExec,omitempty" yaml:"etcd-exec"` - // AgentAddr is the agent HTTP server address. - AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"` - // FailpointHTTPAddr is the agent's failpoints HTTP server address. - FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"` - // BaseDir is the base directory where all logs and etcd data are stored. - BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"` - // EtcdClientProxy is true when client traffic needs to be proxied. - // If true, listen client URL port must be different than advertise client URL port. - EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"` - // EtcdPeerProxy is true when peer traffic needs to be proxied. - // If true, listen peer URL port must be different than advertise peer URL port. - EtcdPeerProxy bool `protobuf:"varint,202,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"` - // EtcdClientEndpoint is the etcd client endpoint. - EtcdClientEndpoint string `protobuf:"bytes,301,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"` - // Etcd defines etcd binary configuration flags. - Etcd *Etcd `protobuf:"bytes,302,opt,name=Etcd,proto3" json:"Etcd,omitempty" yaml:"etcd"` - // EtcdOnSnapshotRestore defines one-time use configuration during etcd - // snapshot recovery process. - EtcdOnSnapshotRestore *Etcd `protobuf:"bytes,303,opt,name=EtcdOnSnapshotRestore,proto3" json:"EtcdOnSnapshotRestore,omitempty"` - // ClientCertData contains cert file contents from this member's etcd server. - ClientCertData string `protobuf:"bytes,401,opt,name=ClientCertData,proto3" json:"ClientCertData,omitempty" yaml:"client-cert-data"` - ClientCertPath string `protobuf:"bytes,402,opt,name=ClientCertPath,proto3" json:"ClientCertPath,omitempty" yaml:"client-cert-path"` - // ClientKeyData contains key file contents from this member's etcd server. - ClientKeyData string `protobuf:"bytes,403,opt,name=ClientKeyData,proto3" json:"ClientKeyData,omitempty" yaml:"client-key-data"` - ClientKeyPath string `protobuf:"bytes,404,opt,name=ClientKeyPath,proto3" json:"ClientKeyPath,omitempty" yaml:"client-key-path"` - // ClientTrustedCAData contains trusted CA file contents from this member's etcd server. - ClientTrustedCAData string `protobuf:"bytes,405,opt,name=ClientTrustedCAData,proto3" json:"ClientTrustedCAData,omitempty" yaml:"client-trusted-ca-data"` - ClientTrustedCAPath string `protobuf:"bytes,406,opt,name=ClientTrustedCAPath,proto3" json:"ClientTrustedCAPath,omitempty" yaml:"client-trusted-ca-path"` - // PeerCertData contains cert file contents from this member's etcd server. - PeerCertData string `protobuf:"bytes,501,opt,name=PeerCertData,proto3" json:"PeerCertData,omitempty" yaml:"peer-cert-data"` - PeerCertPath string `protobuf:"bytes,502,opt,name=PeerCertPath,proto3" json:"PeerCertPath,omitempty" yaml:"peer-cert-path"` - // PeerKeyData contains key file contents from this member's etcd server. - PeerKeyData string `protobuf:"bytes,503,opt,name=PeerKeyData,proto3" json:"PeerKeyData,omitempty" yaml:"peer-key-data"` - PeerKeyPath string `protobuf:"bytes,504,opt,name=PeerKeyPath,proto3" json:"PeerKeyPath,omitempty" yaml:"peer-key-path"` - // PeerTrustedCAData contains trusted CA file contents from this member's etcd server. - PeerTrustedCAData string `protobuf:"bytes,505,opt,name=PeerTrustedCAData,proto3" json:"PeerTrustedCAData,omitempty" yaml:"peer-trusted-ca-data"` - PeerTrustedCAPath string `protobuf:"bytes,506,opt,name=PeerTrustedCAPath,proto3" json:"PeerTrustedCAPath,omitempty" yaml:"peer-trusted-ca-path"` - // SnapshotPath is the snapshot file path to store or restore from. - SnapshotPath string `protobuf:"bytes,601,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty" yaml:"snapshot-path"` - // SnapshotInfo contains last SAVE_SNAPSHOT request results. - SnapshotInfo *SnapshotInfo `protobuf:"bytes,602,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"` - // Failpoints is the GOFAIL_FAILPOINTS environment variable value to use when starting etcd. - Failpoints string `protobuf:"bytes,701,opt,name=Failpoints,proto3" json:"Failpoints,omitempty" yaml:"failpoints"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{3} -} -func (m *Member) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Member.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Member) XXX_Merge(src proto.Message) { - xxx_messageInfo_Member.Merge(m, src) -} -func (m *Member) XXX_Size() int { - return m.Size() -} -func (m *Member) XXX_DiscardUnknown() { - xxx_messageInfo_Member.DiscardUnknown(m) -} - -var xxx_messageInfo_Member proto.InternalMessageInfo - -type Tester struct { - DataDir string `protobuf:"bytes,1,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"` - Network string `protobuf:"bytes,2,opt,name=Network,proto3" json:"Network,omitempty" yaml:"network"` - Addr string `protobuf:"bytes,3,opt,name=Addr,proto3" json:"Addr,omitempty" yaml:"addr"` - // DelayLatencyMsRv is the delay latency in milliseconds, - // to inject to simulated slow network. - DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"` - // DelayLatencyMsRv is the delay latency random variable in milliseconds. - DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"` - // UpdatedDelayLatencyMs is the update delay latency in milliseconds, - // to inject to simulated slow network. It's the final latency to apply, - // in case the latency numbers are randomly generated from given delay latency field. - UpdatedDelayLatencyMs uint32 `protobuf:"varint,13,opt,name=UpdatedDelayLatencyMs,proto3" json:"UpdatedDelayLatencyMs,omitempty" yaml:"updated-delay-latency-ms"` - // RoundLimit is the limit of rounds to run failure set (-1 to run without limits). - RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"` - // ExitOnCaseFail is true, then exit tester on first failure. - ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"` - // EnablePprof is true to enable profiler. - EnablePprof bool `protobuf:"varint,23,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"` - // CaseDelayMs is the delay duration after failure is injected. - // Useful when triggering snapshot or no-op failure cases. - CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"` - // CaseShuffle is true to randomize failure injecting order. - CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"` - // Cases is the selected test cases to schedule. - // If empty, run all failure cases. - Cases []string `protobuf:"bytes,33,rep,name=Cases,proto3" json:"Cases,omitempty" yaml:"cases"` - // FailpointCommands is the list of "gofail" commands - // (e.g. panic("etcd-tester"),1*sleep(1000). - FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands,proto3" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"` - // RunnerExecPath is a path of etcd-runner binary. - RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"` - // ExternalExecPath is a path of script for enabling/disabling an external fault injector. - ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"` - // Stressers is the list of stresser types: - // KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER. - Stressers []*Stresser `protobuf:"bytes,101,rep,name=Stressers,proto3" json:"Stressers,omitempty" yaml:"stressers"` - // Checkers is the list of consistency checker types: - // KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER. - // Leave empty to skip consistency checks. - Checkers []string `protobuf:"bytes,102,rep,name=Checkers,proto3" json:"Checkers,omitempty" yaml:"checkers"` - // StressKeySize is the size of each small key written into etcd. - StressKeySize int32 `protobuf:"varint,201,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"` - // StressKeySizeLarge is the size of each large key written into etcd. - StressKeySizeLarge int32 `protobuf:"varint,202,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"` - // StressKeySuffixRange is the count of key range written into etcd. - // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)". - StressKeySuffixRange int32 `protobuf:"varint,203,opt,name=StressKeySuffixRange,proto3" json:"StressKeySuffixRange,omitempty" yaml:"stress-key-suffix-range"` - // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100). - // Stress keys are created with "fmt.Sprintf("/k%03d", i)". - StressKeySuffixRangeTxn int32 `protobuf:"varint,204,opt,name=StressKeySuffixRangeTxn,proto3" json:"StressKeySuffixRangeTxn,omitempty" yaml:"stress-key-suffix-range-txn"` - // StressKeyTxnOps is the number of operations per a transaction (max 64). - StressKeyTxnOps int32 `protobuf:"varint,205,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"` - // StressClients is the number of concurrent stressing clients - // with "one" shared TCP connection. - StressClients int32 `protobuf:"varint,301,opt,name=StressClients,proto3" json:"StressClients,omitempty" yaml:"stress-clients"` - // StressQPS is the maximum number of stresser requests per second. - StressQPS int32 `protobuf:"varint,302,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tester) Reset() { *m = Tester{} } -func (m *Tester) String() string { return proto.CompactTextString(m) } -func (*Tester) ProtoMessage() {} -func (*Tester) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{4} -} -func (m *Tester) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Tester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Tester.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Tester) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tester.Merge(m, src) -} -func (m *Tester) XXX_Size() int { - return m.Size() -} -func (m *Tester) XXX_DiscardUnknown() { - xxx_messageInfo_Tester.DiscardUnknown(m) -} - -var xxx_messageInfo_Tester proto.InternalMessageInfo - -type Stresser struct { - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty" yaml:"type"` - Weight float64 `protobuf:"fixed64,2,opt,name=Weight,proto3" json:"Weight,omitempty" yaml:"weight"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stresser) Reset() { *m = Stresser{} } -func (m *Stresser) String() string { return proto.CompactTextString(m) } -func (*Stresser) ProtoMessage() {} -func (*Stresser) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{5} -} -func (m *Stresser) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stresser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stresser.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stresser) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stresser.Merge(m, src) -} -func (m *Stresser) XXX_Size() int { - return m.Size() -} -func (m *Stresser) XXX_DiscardUnknown() { - xxx_messageInfo_Stresser.DiscardUnknown(m) -} - -var xxx_messageInfo_Stresser proto.InternalMessageInfo - -type Etcd struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"` - DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"` - WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"` - // HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval. - // Default value is 100, which is 100ms. - HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"` - // ElectionTimeoutMs is the time (in milliseconds) for an election to timeout. - // Default value is 1000, which is 1s. - ElectionTimeoutMs int64 `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"` - ListenClientURLs []string `protobuf:"bytes,21,rep,name=ListenClientURLs,proto3" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"` - AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs,proto3" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"` - ClientAutoTLS bool `protobuf:"varint,23,opt,name=ClientAutoTLS,proto3" json:"ClientAutoTLS,omitempty" yaml:"auto-tls"` - ClientCertAuth bool `protobuf:"varint,24,opt,name=ClientCertAuth,proto3" json:"ClientCertAuth,omitempty" yaml:"client-cert-auth"` - ClientCertFile string `protobuf:"bytes,25,opt,name=ClientCertFile,proto3" json:"ClientCertFile,omitempty" yaml:"cert-file"` - ClientKeyFile string `protobuf:"bytes,26,opt,name=ClientKeyFile,proto3" json:"ClientKeyFile,omitempty" yaml:"key-file"` - ClientTrustedCAFile string `protobuf:"bytes,27,opt,name=ClientTrustedCAFile,proto3" json:"ClientTrustedCAFile,omitempty" yaml:"trusted-ca-file"` - ListenPeerURLs []string `protobuf:"bytes,31,rep,name=ListenPeerURLs,proto3" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"` - AdvertisePeerURLs []string `protobuf:"bytes,32,rep,name=AdvertisePeerURLs,proto3" json:"AdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"` - PeerAutoTLS bool `protobuf:"varint,33,opt,name=PeerAutoTLS,proto3" json:"PeerAutoTLS,omitempty" yaml:"peer-auto-tls"` - PeerClientCertAuth bool `protobuf:"varint,34,opt,name=PeerClientCertAuth,proto3" json:"PeerClientCertAuth,omitempty" yaml:"peer-client-cert-auth"` - PeerCertFile string `protobuf:"bytes,35,opt,name=PeerCertFile,proto3" json:"PeerCertFile,omitempty" yaml:"peer-cert-file"` - PeerKeyFile string `protobuf:"bytes,36,opt,name=PeerKeyFile,proto3" json:"PeerKeyFile,omitempty" yaml:"peer-key-file"` - PeerTrustedCAFile string `protobuf:"bytes,37,opt,name=PeerTrustedCAFile,proto3" json:"PeerTrustedCAFile,omitempty" yaml:"peer-trusted-ca-file"` - InitialCluster string `protobuf:"bytes,41,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"` - InitialClusterState string `protobuf:"bytes,42,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"` - InitialClusterToken string `protobuf:"bytes,43,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"` - SnapshotCount int64 `protobuf:"varint,51,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"` - QuotaBackendBytes int64 `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"` - PreVote bool `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"` - InitialCorruptCheck bool `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"` - Logger string `protobuf:"bytes,71,opt,name=Logger,proto3" json:"Logger,omitempty" yaml:"logger"` - // LogOutputs is the log file to store current etcd server logs. - LogOutputs []string `protobuf:"bytes,72,rep,name=LogOutputs,proto3" json:"LogOutputs,omitempty" yaml:"log-outputs"` - LogLevel string `protobuf:"bytes,73,opt,name=LogLevel,proto3" json:"LogLevel,omitempty" yaml:"log-level"` - SocketReuseAddress bool `protobuf:"varint,81,opt,name=SocketReuseAddress,proto3" json:"SocketReuseAddress,omitempty" yaml:"socket-reuse-address"` - SocketReusePort bool `protobuf:"varint,82,opt,name=SocketReusePort,proto3" json:"SocketReusePort,omitempty" yaml:"socket-reuse-port"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Etcd) Reset() { *m = Etcd{} } -func (m *Etcd) String() string { return proto.CompactTextString(m) } -func (*Etcd) ProtoMessage() {} -func (*Etcd) Descriptor() ([]byte, []int) { - return fileDescriptor_4fbc93a8dcc3881e, []int{6} -} -func (m *Etcd) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Etcd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Etcd.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Etcd) XXX_Merge(src proto.Message) { - xxx_messageInfo_Etcd.Merge(m, src) -} -func (m *Etcd) XXX_Size() int { - return m.Size() -} -func (m *Etcd) XXX_DiscardUnknown() { - xxx_messageInfo_Etcd.DiscardUnknown(m) -} - -var xxx_messageInfo_Etcd proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("rpcpb.StresserType", StresserType_name, StresserType_value) - proto.RegisterEnum("rpcpb.Checker", Checker_name, Checker_value) - proto.RegisterEnum("rpcpb.Operation", Operation_name, Operation_value) - proto.RegisterEnum("rpcpb.Case", Case_name, Case_value) - proto.RegisterType((*Request)(nil), "rpcpb.Request") - proto.RegisterType((*SnapshotInfo)(nil), "rpcpb.SnapshotInfo") - proto.RegisterType((*Response)(nil), "rpcpb.Response") - proto.RegisterType((*Member)(nil), "rpcpb.Member") - proto.RegisterType((*Tester)(nil), "rpcpb.Tester") - proto.RegisterType((*Stresser)(nil), "rpcpb.Stresser") - proto.RegisterType((*Etcd)(nil), "rpcpb.Etcd") -} - -func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptor_4fbc93a8dcc3881e) } - -var fileDescriptor_4fbc93a8dcc3881e = []byte{ - // 3073 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x5a, 0x4b, 0x74, 0xdb, 0xc6, - 0xd5, 0x36, 0x44, 0x49, 0x96, 0x46, 0x2f, 0x68, 0x24, 0xd9, 0xf0, 0x4b, 0x90, 0xe1, 0x38, 0xbf, - 0xac, 0x04, 0xb6, 0x7f, 0x3b, 0x27, 0x0f, 0xa7, 0x89, 0x03, 0x52, 0x90, 0xc4, 0x12, 0x22, 0xe9, - 0x21, 0x24, 0xdb, 0x5d, 0x14, 0x07, 0x22, 0x47, 0x12, 0x8f, 0x29, 0x80, 0x01, 0x86, 0x8e, 0x94, - 0x65, 0x37, 0xdd, 0x36, 0x6d, 0xda, 0xd3, 0x45, 0xf7, 0xdd, 0x34, 0xed, 0xa2, 0xeb, 0xee, 0x9d, - 0x57, 0x9b, 0xb6, 0xab, 0x76, 0xc1, 0xd3, 0xa6, 0x9b, 0xae, 0x79, 0xfa, 0x5e, 0xf4, 0xf4, 0xcc, - 0x0c, 0x40, 0x0e, 0x40, 0x52, 0xf6, 0xca, 0xc2, 0xbd, 0xdf, 0xf7, 0xcd, 0xc5, 0xdc, 0x8b, 0xb9, - 0x77, 0x64, 0x81, 0xb9, 0xa0, 0x59, 0x6d, 0xee, 0xdd, 0x0a, 0x9a, 0xd5, 0x9b, 0xcd, 0xc0, 0x27, - 0x3e, 0x1c, 0x63, 0x86, 0x8b, 0x8b, 0x07, 0xfe, 0x81, 0xcf, 0x2c, 0xb7, 0xe8, 0x4f, 0xdc, 0xa9, - 0x7d, 0x57, 0x02, 0x67, 0x11, 0x7e, 0xbf, 0x85, 0x43, 0x02, 0x6f, 0x82, 0xc9, 0x52, 0x13, 0x07, - 0x2e, 0xa9, 0xfb, 0x9e, 0x22, 0xad, 0x48, 0xab, 0xb3, 0x77, 0xe4, 0x9b, 0x8c, 0x7c, 0xb3, 0x6b, - 0x47, 0x3d, 0x08, 0xbc, 0x0e, 0xc6, 0xb7, 0xf1, 0xd1, 0x1e, 0x0e, 0x94, 0x91, 0x15, 0x69, 0x75, - 0xea, 0xce, 0x4c, 0x04, 0xe6, 0x46, 0x14, 0x39, 0x29, 0xcc, 0xc6, 0x21, 0xc1, 0x81, 0x92, 0x49, - 0xc0, 0xb8, 0x11, 0x45, 0x4e, 0xed, 0x3b, 0x19, 0x30, 0x5d, 0xf1, 0xdc, 0x66, 0x78, 0xe8, 0x93, - 0xbc, 0xb7, 0xef, 0xc3, 0x65, 0x00, 0xb8, 0x42, 0xd1, 0x3d, 0xc2, 0x2c, 0x9e, 0x49, 0x24, 0x58, - 0xe0, 0x1a, 0x90, 0xf9, 0x53, 0xae, 0x51, 0xc7, 0x1e, 0xd9, 0x41, 0x56, 0xa8, 0x8c, 0xac, 0x64, - 0x56, 0x27, 0x51, 0x9f, 0x1d, 0x6a, 0x3d, 0xed, 0xb2, 0x4b, 0x0e, 0x59, 0x24, 0x93, 0x28, 0x61, - 0xa3, 0x7a, 0xf1, 0xf3, 0x46, 0xbd, 0x81, 0x2b, 0xf5, 0x0f, 0xb1, 0x32, 0xca, 0x70, 0x7d, 0x76, - 0xf8, 0x2a, 0x98, 0x8f, 0x6d, 0xb6, 0x4f, 0xdc, 0x06, 0x03, 0x8f, 0x31, 0x70, 0xbf, 0x43, 0x54, - 0x66, 0xc6, 0x02, 0x3e, 0x51, 0xc6, 0x57, 0xa4, 0xd5, 0x0c, 0xea, 0xb3, 0x8b, 0x91, 0x6e, 0xb9, - 0xe1, 0xa1, 0x72, 0x96, 0xe1, 0x12, 0x36, 0x51, 0x0f, 0xe1, 0xa7, 0xf5, 0x90, 0xe6, 0x6b, 0x22, - 0xa9, 0x17, 0xdb, 0x21, 0x04, 0xa3, 0xb6, 0xef, 0x3f, 0x51, 0x26, 0x59, 0x70, 0xec, 0x67, 0xa8, - 0x80, 0xb3, 0xbb, 0x38, 0x60, 0x34, 0xc0, 0xcc, 0xf1, 0xa3, 0xf6, 0x13, 0x09, 0x4c, 0x20, 0x1c, - 0x36, 0x7d, 0x2f, 0xc4, 0x14, 0x56, 0x69, 0x55, 0xab, 0x38, 0x0c, 0xd9, 0xee, 0x4f, 0xa0, 0xf8, - 0x11, 0x9e, 0x03, 0xe3, 0x15, 0xe2, 0x92, 0x56, 0xc8, 0x32, 0x3f, 0x89, 0xa2, 0x27, 0xa1, 0x22, - 0x32, 0xa7, 0x55, 0xc4, 0x1b, 0xc9, 0x4c, 0xb3, 0x5d, 0x9e, 0xba, 0xb3, 0x10, 0x81, 0x45, 0x17, - 0x4a, 0x00, 0xb5, 0xcf, 0xa6, 0xe3, 0x05, 0xe0, 0x6d, 0x30, 0x61, 0x92, 0x6a, 0xcd, 0x3c, 0xc6, - 0x55, 0x5e, 0x1b, 0xd9, 0xc5, 0x4e, 0x5b, 0x95, 0x4f, 0xdc, 0xa3, 0xc6, 0x3d, 0x0d, 0x93, 0x6a, - 0x4d, 0xc7, 0xc7, 0xb8, 0xaa, 0xa1, 0x2e, 0x0a, 0xde, 0x05, 0x93, 0xc6, 0x01, 0xf6, 0x88, 0x51, - 0xab, 0x05, 0xca, 0x14, 0xa3, 0x2c, 0x75, 0xda, 0xea, 0x3c, 0xa7, 0xb8, 0xd4, 0xa5, 0xbb, 0xb5, - 0x5a, 0xa0, 0xa1, 0x1e, 0x0e, 0x5a, 0x60, 0x7e, 0xc3, 0xad, 0x37, 0x9a, 0x7e, 0xdd, 0x23, 0x5b, - 0xb6, 0x5d, 0x66, 0xe4, 0x69, 0x46, 0x5e, 0xee, 0xb4, 0xd5, 0x8b, 0x9c, 0xbc, 0x1f, 0x43, 0xf4, - 0x43, 0x42, 0x9a, 0x91, 0x4a, 0x3f, 0x11, 0xea, 0xe0, 0x6c, 0xd6, 0x0d, 0xf1, 0x7a, 0x3d, 0x50, - 0x30, 0xd3, 0x58, 0xe8, 0xb4, 0xd5, 0x39, 0xae, 0xb1, 0xe7, 0x86, 0x58, 0xaf, 0xd5, 0x03, 0x0d, - 0xc5, 0x18, 0xb8, 0x09, 0xe6, 0x68, 0xf4, 0xbc, 0x8e, 0xcb, 0x81, 0x7f, 0x7c, 0xa2, 0x7c, 0xca, - 0x32, 0x91, 0xbd, 0xdc, 0x69, 0xab, 0x8a, 0xf0, 0xae, 0x55, 0x06, 0xd1, 0x9b, 0x14, 0xa3, 0xa1, - 0x34, 0x0b, 0x1a, 0x60, 0x86, 0x9a, 0xca, 0x18, 0x07, 0x5c, 0xe6, 0x33, 0x2e, 0x73, 0xb1, 0xd3, - 0x56, 0xcf, 0x09, 0x32, 0x4d, 0x8c, 0x83, 0x58, 0x24, 0xc9, 0x80, 0x65, 0x00, 0x7b, 0xaa, 0xa6, - 0x57, 0x63, 0x2f, 0xa6, 0x7c, 0xc2, 0xf2, 0x9f, 0x55, 0x3b, 0x6d, 0xf5, 0x52, 0x7f, 0x38, 0x38, - 0x82, 0x69, 0x68, 0x00, 0x17, 0xfe, 0x3f, 0x18, 0xa5, 0x56, 0xe5, 0xe7, 0xfc, 0xf4, 0x98, 0x8a, - 0xd2, 0x4f, 0x6d, 0xd9, 0xb9, 0x4e, 0x5b, 0x9d, 0xea, 0x09, 0x6a, 0x88, 0x41, 0x61, 0x16, 0x2c, - 0xd1, 0x7f, 0x4b, 0x5e, 0xaf, 0xcc, 0x43, 0xe2, 0x07, 0x58, 0xf9, 0x45, 0xbf, 0x06, 0x1a, 0x0c, - 0x85, 0xeb, 0x60, 0x96, 0x07, 0x92, 0xc3, 0x01, 0x59, 0x77, 0x89, 0xab, 0x7c, 0xc4, 0x4e, 0x83, - 0xec, 0xa5, 0x4e, 0x5b, 0x3d, 0xcf, 0xd7, 0x8c, 0xe2, 0xaf, 0xe2, 0x80, 0xe8, 0x35, 0x97, 0xb8, - 0x1a, 0x4a, 0x71, 0x92, 0x2a, 0xec, 0x48, 0xf9, 0xfe, 0xa9, 0x2a, 0x4d, 0x97, 0x1c, 0x26, 0x54, - 0xd8, 0x91, 0x63, 0x80, 0x19, 0x6e, 0x29, 0xe0, 0x13, 0x16, 0xca, 0x0f, 0xb8, 0x88, 0x90, 0x97, - 0x48, 0xe4, 0x09, 0x3e, 0x89, 0x22, 0x49, 0x32, 0x12, 0x12, 0x2c, 0x8e, 0x8f, 0x4f, 0x93, 0xe0, - 0x61, 0x24, 0x19, 0xd0, 0x06, 0x0b, 0xdc, 0x60, 0x07, 0xad, 0x90, 0xe0, 0x5a, 0xce, 0x60, 0xb1, - 0xfc, 0x90, 0x0b, 0x5d, 0xed, 0xb4, 0xd5, 0x2b, 0x09, 0x21, 0xc2, 0x61, 0x7a, 0xd5, 0x8d, 0x42, - 0x1a, 0x44, 0x1f, 0xa0, 0xca, 0xc2, 0xfb, 0xd1, 0x0b, 0xa8, 0xf2, 0x28, 0x07, 0xd1, 0xe1, 0xbb, - 0x60, 0x9a, 0xd6, 0x64, 0x37, 0x77, 0x7f, 0xe7, 0x72, 0x17, 0x3a, 0x6d, 0x75, 0x89, 0xcb, 0xb1, - 0x1a, 0x16, 0x32, 0x97, 0xc0, 0x8b, 0x7c, 0x16, 0xce, 0x3f, 0x4e, 0xe1, 0xf3, 0x30, 0x12, 0x78, - 0xf8, 0x36, 0x98, 0xa2, 0xcf, 0x71, 0xbe, 0xfe, 0xc9, 0xe9, 0x4a, 0xa7, 0xad, 0x2e, 0x0a, 0xf4, - 0x5e, 0xb6, 0x44, 0xb4, 0x40, 0x66, 0x6b, 0xff, 0x6b, 0x38, 0x99, 0x2f, 0x2d, 0xa2, 0x61, 0x11, - 0xcc, 0xd3, 0xc7, 0x64, 0x8e, 0xfe, 0x9d, 0x49, 0x7f, 0x7f, 0x4c, 0xa2, 0x2f, 0x43, 0xfd, 0xd4, - 0x3e, 0x3d, 0x16, 0xd2, 0x7f, 0x9e, 0xab, 0xc7, 0x23, 0xeb, 0xa7, 0xc2, 0x77, 0x52, 0x2d, 0xf6, - 0x0f, 0xa3, 0xe9, 0xb7, 0x0b, 0x23, 0x77, 0xbc, 0xb1, 0x89, 0xee, 0xfb, 0x66, 0xaa, 0x27, 0xfc, - 0xf1, 0x45, 0x9b, 0x02, 0x7c, 0x1d, 0x80, 0xee, 0x49, 0x1b, 0x2a, 0xbf, 0x1a, 0x4b, 0x9f, 0xec, - 0xdd, 0xc3, 0x39, 0xd4, 0x90, 0x80, 0xd4, 0x7e, 0x39, 0x1d, 0x0f, 0x26, 0xf4, 0x5c, 0xa6, 0x7b, - 0x42, 0xcf, 0x65, 0x29, 0x7d, 0x2e, 0xd3, 0x0d, 0x8c, 0xce, 0xe5, 0x08, 0x03, 0x5f, 0x05, 0x67, - 0x8b, 0x98, 0x7c, 0xe0, 0x07, 0x4f, 0x78, 0xff, 0xcb, 0xc2, 0x4e, 0x5b, 0x9d, 0xe5, 0x70, 0x8f, - 0x3b, 0x34, 0x14, 0x43, 0xe0, 0x35, 0x30, 0xca, 0xba, 0x06, 0xdf, 0x5a, 0xe1, 0x64, 0xe3, 0x6d, - 0x82, 0x39, 0x61, 0x0e, 0xcc, 0xae, 0xe3, 0x86, 0x7b, 0x62, 0xb9, 0x04, 0x7b, 0xd5, 0x93, 0xed, - 0x90, 0x75, 0xa8, 0x19, 0xf1, 0x38, 0xa9, 0x51, 0xbf, 0xde, 0xe0, 0x00, 0xfd, 0x28, 0xd4, 0x50, - 0x8a, 0x02, 0xbf, 0x09, 0xe4, 0xa4, 0x05, 0x3d, 0x65, 0xbd, 0x6a, 0x46, 0xec, 0x55, 0x69, 0x19, - 0x3d, 0x78, 0xaa, 0xa1, 0x3e, 0x1e, 0x7c, 0x0c, 0x96, 0x76, 0x9a, 0x35, 0x97, 0xe0, 0x5a, 0x2a, - 0xae, 0x19, 0x26, 0x78, 0xad, 0xd3, 0x56, 0x55, 0x2e, 0xd8, 0xe2, 0x30, 0xbd, 0x3f, 0xbe, 0xc1, - 0x0a, 0x34, 0x61, 0xc8, 0x6f, 0x79, 0x35, 0xab, 0x7e, 0x54, 0x27, 0xca, 0xd2, 0x8a, 0xb4, 0x3a, - 0x96, 0x3d, 0xd7, 0x69, 0xab, 0x90, 0xeb, 0x05, 0xd4, 0xa7, 0x37, 0xa8, 0x53, 0x43, 0x02, 0x12, - 0x66, 0xc1, 0xac, 0x79, 0x5c, 0x27, 0x25, 0x2f, 0xe7, 0x86, 0x98, 0x26, 0x52, 0x39, 0xd7, 0xd7, - 0xc5, 0x8e, 0xeb, 0x44, 0xf7, 0x3d, 0x9d, 0xe6, 0xbc, 0x15, 0x60, 0x0d, 0xa5, 0x18, 0xf0, 0x2d, - 0x30, 0x65, 0x7a, 0xee, 0x5e, 0x03, 0x97, 0x9b, 0x81, 0xbf, 0xaf, 0x9c, 0x67, 0x02, 0xe7, 0x3b, - 0x6d, 0x75, 0x21, 0x12, 0x60, 0x4e, 0xbd, 0x49, 0xbd, 0x1a, 0x12, 0xb1, 0xf0, 0x1e, 0x98, 0xa2, - 0x32, 0xec, 0x65, 0xb6, 0x43, 0x45, 0x65, 0xfb, 0x20, 0x94, 0x77, 0x95, 0x35, 0x70, 0xb6, 0x09, - 0xf4, 0xe5, 0x45, 0x30, 0x5d, 0x96, 0x3e, 0x56, 0x0e, 0x5b, 0xfb, 0xfb, 0x0d, 0xac, 0xac, 0xa4, - 0x97, 0x65, 0xdc, 0x90, 0x7b, 0x23, 0x6a, 0x84, 0x85, 0x2f, 0x83, 0x31, 0xfa, 0x18, 0x2a, 0x57, - 0xe9, 0x6c, 0x9b, 0x95, 0x3b, 0x6d, 0x75, 0xba, 0x47, 0x0a, 0x35, 0xc4, 0xdd, 0xb0, 0x20, 0x4c, - 0x2a, 0x39, 0xff, 0xe8, 0xc8, 0xf5, 0x6a, 0xa1, 0xa2, 0x31, 0xce, 0x95, 0x4e, 0x5b, 0xbd, 0x90, - 0x9e, 0x54, 0xaa, 0x11, 0x46, 0x1c, 0x54, 0x62, 0x1e, 0x2d, 0x47, 0xd4, 0xf2, 0x3c, 0x1c, 0xd0, - 0xc9, 0x89, 0x7d, 0xce, 0x37, 0xd2, 0xdd, 0x2d, 0x60, 0x7e, 0x36, 0x65, 0xc5, 0xdd, 0x2d, 0x49, - 0x81, 0x79, 0x20, 0x9b, 0xc7, 0x04, 0x07, 0x9e, 0xdb, 0xe8, 0xca, 0xac, 0x31, 0x19, 0x21, 0x20, - 0x1c, 0x21, 0x44, 0xa1, 0x3e, 0x1a, 0xcc, 0x81, 0xc9, 0x0a, 0x09, 0x70, 0x18, 0xe2, 0x20, 0x54, - 0xf0, 0x4a, 0x66, 0x75, 0xea, 0xce, 0x5c, 0x7c, 0x32, 0x44, 0x76, 0x71, 0xfe, 0x0b, 0x63, 0xac, - 0x86, 0x7a, 0x3c, 0x78, 0x0b, 0x4c, 0xe4, 0x0e, 0x71, 0xf5, 0x09, 0xd5, 0xd8, 0x67, 0x1b, 0x23, - 0x7c, 0xe6, 0xd5, 0xc8, 0xa3, 0xa1, 0x2e, 0x88, 0xf6, 0x56, 0xce, 0x2e, 0xe0, 0x13, 0x36, 0xe1, - 0xb3, 0xe9, 0x6b, 0x4c, 0x2c, 0x38, 0xbe, 0x12, 0x3b, 0xb3, 0xc3, 0xfa, 0x87, 0x58, 0x43, 0x49, - 0x06, 0x7c, 0x00, 0x60, 0xc2, 0x60, 0xb9, 0xc1, 0x01, 0xe6, 0xe3, 0xd7, 0x58, 0x76, 0xa5, 0xd3, - 0x56, 0x2f, 0x0f, 0xd4, 0xd1, 0x1b, 0x14, 0xa7, 0xa1, 0x01, 0x64, 0xf8, 0x10, 0x2c, 0xf6, 0xac, - 0xad, 0xfd, 0xfd, 0xfa, 0x31, 0x72, 0xbd, 0x03, 0xac, 0x7c, 0xce, 0x45, 0xb5, 0x4e, 0x5b, 0x5d, - 0xee, 0x17, 0x65, 0x40, 0x3d, 0xa0, 0x48, 0x0d, 0x0d, 0x14, 0x80, 0x2e, 0x38, 0x3f, 0xc8, 0x6e, - 0x1f, 0x7b, 0xca, 0x17, 0x5c, 0xfb, 0xe5, 0x4e, 0x5b, 0xd5, 0x4e, 0xd5, 0xd6, 0xc9, 0xb1, 0xa7, - 0xa1, 0x61, 0x3a, 0x70, 0x0b, 0xcc, 0x75, 0x5d, 0xf6, 0xb1, 0x57, 0x6a, 0x86, 0xca, 0x97, 0x5c, - 0x5a, 0x28, 0x09, 0x41, 0x9a, 0x1c, 0x7b, 0xba, 0xdf, 0x0c, 0x35, 0x94, 0xa6, 0xc1, 0xf7, 0xe2, - 0xdc, 0xf0, 0x29, 0x21, 0xe4, 0xa3, 0xe8, 0x98, 0xd8, 0xc9, 0x23, 0x1d, 0x3e, 0x5f, 0x84, 0xdd, - 0xd4, 0x44, 0x04, 0xf8, 0x5a, 0x5c, 0x53, 0x0f, 0xca, 0x15, 0x3e, 0x84, 0x8e, 0x89, 0x6d, 0x23, - 0x62, 0xbf, 0xdf, 0xec, 0x15, 0xd1, 0x83, 0x72, 0x45, 0xfb, 0x16, 0x98, 0x88, 0x2b, 0x8a, 0x9e, - 0xec, 0xf6, 0x49, 0x33, 0xba, 0x9b, 0x8a, 0x27, 0x3b, 0x39, 0x69, 0x62, 0x0d, 0x31, 0x27, 0xbc, - 0x01, 0xc6, 0x1f, 0xe2, 0xfa, 0xc1, 0x21, 0x61, 0xbd, 0x42, 0xca, 0xce, 0x77, 0xda, 0xea, 0x0c, - 0x87, 0x7d, 0xc0, 0xec, 0x1a, 0x8a, 0x00, 0xda, 0x4f, 0x65, 0x3e, 0x12, 0x53, 0xe1, 0xde, 0xa5, - 0x57, 0x14, 0xf6, 0xdc, 0x23, 0x2a, 0xcc, 0xee, 0xbf, 0x42, 0xd3, 0x1a, 0x79, 0x81, 0xa6, 0xb5, - 0x06, 0xc6, 0x1f, 0x1a, 0x16, 0x45, 0x67, 0xd2, 0x3d, 0xeb, 0x03, 0xb7, 0xc1, 0xc1, 0x11, 0x02, - 0x96, 0xc0, 0xc2, 0x16, 0x76, 0x03, 0xb2, 0x87, 0x5d, 0x92, 0xf7, 0x08, 0x0e, 0x9e, 0xba, 0x8d, - 0xa8, 0x25, 0x65, 0xc4, 0x4c, 0x1d, 0xc6, 0x20, 0xbd, 0x1e, 0xa1, 0x34, 0x34, 0x88, 0x09, 0xf3, - 0x60, 0xde, 0x6c, 0xe0, 0x2a, 0xa9, 0xfb, 0x9e, 0x5d, 0x3f, 0xc2, 0x7e, 0x8b, 0x6c, 0x87, 0xac, - 0x35, 0x65, 0xc4, 0x23, 0x05, 0x47, 0x10, 0x9d, 0x70, 0x8c, 0x86, 0xfa, 0x59, 0xf4, 0x54, 0xb1, - 0xea, 0x21, 0xc1, 0x9e, 0x70, 0xed, 0x5f, 0x4a, 0x1f, 0x73, 0x0d, 0x86, 0x88, 0xef, 0x21, 0xad, - 0xa0, 0x11, 0x6a, 0xa8, 0x8f, 0x06, 0x11, 0x58, 0x30, 0x6a, 0x4f, 0x71, 0x40, 0xea, 0x21, 0x16, - 0xd4, 0xce, 0x31, 0x35, 0xe1, 0xe3, 0x74, 0x63, 0x50, 0x52, 0x70, 0x10, 0x19, 0xbe, 0x15, 0xcf, - 0xe3, 0x46, 0x8b, 0xf8, 0xb6, 0x55, 0x89, 0x5a, 0x8c, 0x90, 0x1b, 0xb7, 0x45, 0x7c, 0x9d, 0x50, - 0x81, 0x24, 0x92, 0x1e, 0xba, 0xbd, 0xfb, 0x81, 0xd1, 0x22, 0x87, 0x8a, 0xc2, 0xb8, 0x43, 0xae, - 0x14, 0x6e, 0x2b, 0x75, 0xa5, 0xa0, 0x14, 0xf8, 0x0d, 0x51, 0x64, 0xa3, 0xde, 0xc0, 0xca, 0x85, - 0xf4, 0xed, 0x98, 0xb1, 0xf7, 0xeb, 0xb4, 0xd3, 0xa4, 0xb0, 0xbd, 0xe8, 0x0b, 0xf8, 0x84, 0x91, - 0x2f, 0xa6, 0x2b, 0x8b, 0x7e, 0x95, 0x9c, 0x9b, 0x44, 0x42, 0xab, 0x6f, 0xde, 0x67, 0x02, 0x97, - 0xd2, 0xb7, 0x11, 0x61, 0x96, 0xe4, 0x3a, 0x83, 0x68, 0x74, 0x2f, 0x78, 0xba, 0xe8, 0xa0, 0xc9, - 0xb2, 0xa2, 0xb2, 0xac, 0x08, 0x7b, 0x11, 0xe5, 0x98, 0x0d, 0xa8, 0x3c, 0x21, 0x29, 0x0a, 0xb4, - 0xc1, 0x7c, 0x37, 0x45, 0x5d, 0x9d, 0x15, 0xa6, 0x23, 0x9c, 0x64, 0x75, 0xaf, 0x4e, 0xea, 0x6e, - 0x43, 0xef, 0x65, 0x59, 0x90, 0xec, 0x17, 0xa0, 0x73, 0x00, 0xfd, 0x39, 0xce, 0xef, 0x55, 0x96, - 0xa3, 0xf4, 0x10, 0xdf, 0x4b, 0xb2, 0x08, 0xa6, 0xb7, 0x68, 0x76, 0x9d, 0x48, 0xa6, 0x59, 0x63, - 0x12, 0x42, 0xc1, 0xf1, 0x3b, 0x48, 0x5f, 0xae, 0x07, 0x70, 0xe9, 0xd8, 0x1d, 0x5f, 0x50, 0xd8, - 0x7e, 0x5f, 0x1b, 0x7e, 0x9f, 0xe1, 0xdb, 0x9d, 0x80, 0xc7, 0x2f, 0x13, 0xa7, 0xfb, 0xa5, 0xa1, - 0x37, 0x12, 0x4e, 0x16, 0xc1, 0x70, 0x3b, 0x75, 0x83, 0x60, 0x0a, 0xd7, 0x9f, 0x77, 0x81, 0xe0, - 0x42, 0xfd, 0x4c, 0x3a, 0xde, 0xe5, 0x79, 0x2a, 0x72, 0x8d, 0x16, 0xfb, 0x7d, 0xe1, 0x8d, 0x74, - 0xed, 0xc4, 0xa9, 0xaa, 0x72, 0x80, 0x86, 0x52, 0x0c, 0xfa, 0x45, 0x27, 0x2d, 0x15, 0xe2, 0x12, - 0x1c, 0x4d, 0x1d, 0xc2, 0x06, 0xa7, 0x84, 0xf4, 0x90, 0xc2, 0x34, 0x34, 0x88, 0xdc, 0xaf, 0x69, - 0xfb, 0x4f, 0xb0, 0xa7, 0xbc, 0xf2, 0x3c, 0x4d, 0x42, 0x61, 0x7d, 0x9a, 0x8c, 0x0c, 0xef, 0x83, - 0x99, 0xf8, 0x0e, 0x93, 0xf3, 0x5b, 0x1e, 0x51, 0xee, 0xb2, 0xb3, 0x50, 0x6c, 0x5e, 0xf1, 0x65, - 0xa9, 0x4a, 0xfd, 0xb4, 0x79, 0x89, 0x78, 0x68, 0x81, 0xf9, 0x07, 0x2d, 0x9f, 0xb8, 0x59, 0xb7, - 0xfa, 0x04, 0x7b, 0xb5, 0xec, 0x09, 0xc1, 0xa1, 0xf2, 0x1a, 0x13, 0x11, 0x66, 0xfd, 0xf7, 0x29, - 0x44, 0xdf, 0xe3, 0x18, 0x7d, 0x8f, 0x82, 0x34, 0xd4, 0x4f, 0xa4, 0xad, 0xa4, 0x1c, 0xe0, 0x5d, - 0x9f, 0x60, 0xe5, 0x7e, 0xfa, 0xb8, 0x6a, 0x06, 0x58, 0x7f, 0xea, 0xd3, 0xdd, 0x89, 0x31, 0xe2, - 0x8e, 0xf8, 0x41, 0xd0, 0x6a, 0x12, 0x36, 0x31, 0x29, 0xef, 0xa5, 0xcb, 0xb8, 0xbb, 0x23, 0x1c, - 0xa5, 0xb3, 0x19, 0x4b, 0xd8, 0x11, 0x81, 0x4c, 0xdb, 0xa4, 0xe5, 0x1f, 0x1c, 0xe0, 0x40, 0xd9, - 0x64, 0x1b, 0x2b, 0xb4, 0xc9, 0x06, 0xb3, 0x6b, 0x28, 0x02, 0xd0, 0xfb, 0x83, 0xe5, 0x1f, 0x94, - 0x5a, 0xa4, 0xd9, 0x22, 0xa1, 0xb2, 0xc5, 0xbe, 0x67, 0xe1, 0xfe, 0xd0, 0xf0, 0x0f, 0x74, 0x9f, - 0x3b, 0x35, 0x24, 0x20, 0xe1, 0x6d, 0x30, 0x61, 0xf9, 0x07, 0x16, 0x7e, 0x8a, 0x1b, 0x4a, 0x3e, - 0x7d, 0x28, 0x52, 0x56, 0x83, 0xba, 0x34, 0xd4, 0x45, 0xc1, 0x12, 0x80, 0x15, 0xbf, 0xfa, 0x04, - 0x13, 0x84, 0x5b, 0x21, 0xa6, 0x17, 0x35, 0x1c, 0x86, 0xca, 0x03, 0xf6, 0x9e, 0x42, 0x89, 0x87, - 0x0c, 0xa3, 0x07, 0x14, 0xc4, 0x7e, 0xf9, 0x87, 0xc3, 0x90, 0xce, 0x6e, 0x7d, 0x54, 0xb8, 0x01, - 0xe6, 0x04, 0x6b, 0xd9, 0x0f, 0x88, 0x82, 0xd2, 0xbf, 0xd0, 0x4b, 0xa8, 0x35, 0xfd, 0x80, 0xd0, - 0xe9, 0x27, 0x49, 0x5a, 0xfb, 0xaf, 0x04, 0xa6, 0xe3, 0x31, 0x84, 0x4d, 0x19, 0x10, 0xcc, 0x16, - 0x76, 0x9d, 0x87, 0x28, 0x6f, 0x9b, 0x4e, 0x65, 0xdb, 0xb0, 0x2c, 0xf9, 0x4c, 0xc2, 0x66, 0x19, - 0x68, 0xd3, 0x94, 0x25, 0xb8, 0x00, 0xe6, 0x0a, 0xbb, 0x0e, 0x32, 0x8d, 0x75, 0xa7, 0x54, 0x34, - 0x9d, 0x82, 0xf9, 0x58, 0x1e, 0x81, 0xf3, 0x60, 0x26, 0x36, 0x22, 0xa3, 0xb8, 0x69, 0xca, 0x19, - 0xb8, 0x04, 0xe6, 0x0b, 0xbb, 0xce, 0xba, 0x69, 0x99, 0xb6, 0xd9, 0x45, 0x8e, 0x46, 0xf4, 0xc8, - 0xcc, 0xb1, 0x63, 0xf0, 0x3c, 0x58, 0x28, 0xec, 0x3a, 0xf6, 0xa3, 0x62, 0xb4, 0x16, 0x77, 0xcb, - 0xe3, 0x70, 0x12, 0x8c, 0x59, 0xa6, 0x51, 0x31, 0x65, 0x40, 0x89, 0xa6, 0x65, 0xe6, 0xec, 0x7c, - 0xa9, 0xe8, 0xa0, 0x9d, 0x62, 0xd1, 0x44, 0xf2, 0x22, 0x94, 0xc1, 0xf4, 0x43, 0xc3, 0xce, 0x6d, - 0xc5, 0x16, 0x95, 0x2e, 0x6b, 0x95, 0x72, 0x05, 0x07, 0x19, 0x39, 0x13, 0xc5, 0xe6, 0x1b, 0x14, - 0xc8, 0x84, 0x62, 0xcb, 0xdd, 0xb5, 0x6f, 0x83, 0xb3, 0xd1, 0x98, 0x0e, 0xa7, 0xc0, 0xd9, 0xc2, - 0xae, 0xb3, 0x65, 0x54, 0xb6, 0xe4, 0x33, 0x3d, 0xa4, 0xf9, 0xa8, 0x9c, 0x47, 0xf4, 0x8d, 0x01, - 0x18, 0x8f, 0x58, 0x23, 0x70, 0x1a, 0x4c, 0x14, 0x4b, 0x4e, 0x6e, 0xcb, 0xcc, 0x15, 0xe4, 0x0c, - 0xbc, 0x08, 0xce, 0x55, 0xb6, 0x4a, 0xc8, 0x76, 0x6c, 0xdb, 0x72, 0x12, 0xac, 0xd1, 0xb5, 0xbf, - 0x8e, 0x08, 0xff, 0x19, 0x02, 0xe7, 0xc0, 0x54, 0xb1, 0x64, 0x3b, 0x15, 0xdb, 0x40, 0xb6, 0xb9, - 0x2e, 0x9f, 0x81, 0xe7, 0x00, 0xcc, 0x17, 0xf3, 0x76, 0xde, 0xb0, 0xb8, 0xd1, 0x31, 0xed, 0xdc, - 0xba, 0x0c, 0xe8, 0xf2, 0xc8, 0x14, 0x2c, 0x53, 0xd4, 0x52, 0xc9, 0x6f, 0xda, 0x26, 0xda, 0xe6, - 0x96, 0x45, 0xb8, 0x02, 0x2e, 0x57, 0xf2, 0x9b, 0x0f, 0x76, 0xf2, 0x1c, 0xe3, 0x18, 0xc5, 0x75, - 0x07, 0x99, 0xdb, 0xa5, 0x5d, 0xd3, 0x59, 0x37, 0x6c, 0x43, 0x5e, 0xa2, 0xf9, 0xa8, 0x18, 0xbb, - 0xa6, 0x53, 0x29, 0x1a, 0xe5, 0xca, 0x56, 0xc9, 0x96, 0x97, 0xe1, 0x55, 0x70, 0x85, 0x0a, 0x97, - 0x90, 0xe9, 0xc4, 0x0b, 0x6c, 0xa0, 0xd2, 0x76, 0x0f, 0xa2, 0xc2, 0x0b, 0x60, 0x69, 0xb0, 0x6b, - 0x85, 0xb2, 0xfb, 0x96, 0x34, 0x50, 0x6e, 0x2b, 0x1f, 0xaf, 0xb9, 0x0a, 0xaf, 0x80, 0x0b, 0x59, - 0xcb, 0xc8, 0x15, 0xb6, 0x4a, 0x96, 0xe9, 0x94, 0x4d, 0x13, 0x39, 0x65, 0xb6, 0x35, 0x8f, 0x1c, - 0xf4, 0x48, 0xae, 0x41, 0x15, 0x5c, 0xda, 0x29, 0x0e, 0x07, 0x60, 0x78, 0x11, 0x2c, 0xad, 0x9b, - 0x96, 0xf1, 0xb8, 0xcf, 0xf5, 0x4c, 0x82, 0x97, 0xc1, 0xf9, 0x9d, 0xe2, 0x60, 0xef, 0xa7, 0xd2, - 0xda, 0xc7, 0x53, 0x60, 0x94, 0x5e, 0x61, 0xa1, 0x02, 0x16, 0xe3, 0xad, 0xa2, 0x15, 0xb7, 0x51, - 0xb2, 0xac, 0xd2, 0x43, 0x13, 0xc9, 0x67, 0xe0, 0x2d, 0xf0, 0xca, 0x20, 0x8f, 0xb3, 0x53, 0xb4, - 0xf3, 0x96, 0x63, 0xa3, 0xfc, 0xe6, 0xa6, 0x89, 0x7a, 0x2f, 0x2c, 0xd1, 0xd2, 0x8f, 0x09, 0x96, - 0x69, 0xac, 0xb3, 0xe4, 0xdf, 0x00, 0xd7, 0x93, 0xb6, 0x61, 0xf4, 0x8c, 0x48, 0x7f, 0xb0, 0x53, - 0x42, 0x3b, 0xdb, 0xf2, 0x28, 0xad, 0x81, 0xd8, 0x46, 0x3f, 0xaf, 0x31, 0x78, 0x0d, 0xa8, 0xf1, - 0xa6, 0x0a, 0x29, 0x4c, 0x44, 0x0e, 0xe0, 0x3d, 0xf0, 0xfa, 0x73, 0x40, 0xc3, 0xa2, 0x98, 0xa2, - 0x29, 0x19, 0xc0, 0x8d, 0xde, 0x67, 0x1a, 0xbe, 0x06, 0x6e, 0x0f, 0x75, 0x0f, 0x13, 0x9d, 0x81, - 0x1b, 0x20, 0x3b, 0x80, 0xc5, 0xdf, 0x32, 0xb2, 0xf0, 0x32, 0x8b, 0x84, 0x62, 0x6a, 0x54, 0x53, - 0x39, 0x44, 0x3f, 0x58, 0x79, 0x16, 0xae, 0x81, 0x97, 0x87, 0x96, 0x43, 0x72, 0x13, 0x6a, 0xd0, - 0x00, 0xef, 0xbc, 0x18, 0x76, 0x58, 0xd8, 0x18, 0xbe, 0x04, 0x56, 0x86, 0x4b, 0x44, 0x5b, 0xb2, - 0x0f, 0xdf, 0x06, 0x6f, 0x3c, 0x0f, 0x35, 0x6c, 0x89, 0x83, 0xd3, 0x97, 0x88, 0xca, 0xe0, 0x90, - 0x7e, 0x4a, 0xc3, 0x51, 0xb4, 0x30, 0xea, 0xf0, 0xff, 0x80, 0x36, 0xb0, 0xd8, 0x93, 0xdb, 0xf2, - 0x4c, 0x82, 0x37, 0xc1, 0x0d, 0x64, 0x14, 0xd7, 0x4b, 0xdb, 0xce, 0x0b, 0xe0, 0x3f, 0x95, 0xe0, - 0xbb, 0xe0, 0xad, 0xe7, 0x03, 0x87, 0xbd, 0xe0, 0x67, 0x12, 0x34, 0xc1, 0x7b, 0x2f, 0xbc, 0xde, - 0x30, 0x99, 0xcf, 0x25, 0x78, 0x15, 0x5c, 0x1e, 0xcc, 0x8f, 0xf2, 0xf0, 0x85, 0x04, 0x57, 0xc1, - 0xb5, 0x53, 0x57, 0x8a, 0x90, 0x5f, 0x4a, 0xf0, 0x4d, 0x70, 0xf7, 0x34, 0xc8, 0xb0, 0x30, 0x7e, - 0x2d, 0xc1, 0xfb, 0xe0, 0xde, 0x0b, 0xac, 0x31, 0x4c, 0xe0, 0x37, 0xa7, 0xbc, 0x47, 0x94, 0xec, - 0xaf, 0x9e, 0xff, 0x1e, 0x11, 0xf2, 0xb7, 0x12, 0x5c, 0x06, 0x17, 0x06, 0x43, 0x68, 0x4d, 0xfc, - 0x4e, 0x82, 0xd7, 0xc1, 0xca, 0xa9, 0x4a, 0x14, 0xf6, 0x7b, 0x09, 0x2a, 0x60, 0xa1, 0x58, 0x72, - 0x36, 0x8c, 0xbc, 0xe5, 0x3c, 0xcc, 0xdb, 0x5b, 0x4e, 0xc5, 0x46, 0x66, 0xa5, 0x22, 0xff, 0x6c, - 0x84, 0x86, 0x92, 0xf0, 0x14, 0x4b, 0x91, 0xd3, 0xd9, 0x28, 0x21, 0xc7, 0xca, 0xef, 0x9a, 0x45, - 0x8a, 0xfc, 0x64, 0x04, 0xce, 0x01, 0x40, 0x61, 0xe5, 0x52, 0xbe, 0x68, 0x57, 0xe4, 0xef, 0x65, - 0xe0, 0x4b, 0x40, 0xed, 0x19, 0x38, 0x7b, 0x3d, 0x5f, 0x29, 0x38, 0xf9, 0x92, 0x63, 0x19, 0xb6, - 0x59, 0xcc, 0x3d, 0x96, 0x3f, 0xca, 0xc0, 0x19, 0x30, 0x61, 0x3e, 0xb2, 0x4d, 0x54, 0x34, 0x2c, - 0xf9, 0x6f, 0x99, 0x3b, 0xf7, 0xc1, 0xa4, 0x1d, 0xb8, 0x5e, 0x48, 0x27, 0x10, 0x78, 0x47, 0x7c, - 0x98, 0x8d, 0x7e, 0xf3, 0x16, 0xfd, 0xdd, 0xc0, 0xc5, 0xb9, 0xee, 0x33, 0xff, 0x8f, 0x63, 0xed, - 0xcc, 0xaa, 0x74, 0x5b, 0xca, 0x2e, 0x3e, 0xfb, 0xf3, 0xf2, 0x99, 0x67, 0x5f, 0x2f, 0x4b, 0x5f, - 0x7d, 0xbd, 0x2c, 0xfd, 0xe9, 0xeb, 0x65, 0xe9, 0xc7, 0x7f, 0x59, 0x3e, 0xb3, 0x37, 0xce, 0xfe, - 0xee, 0xe0, 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x2c, 0x48, 0xfe, 0xa7, 0x20, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TransportClient is the client API for Transport service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TransportClient interface { - Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error) -} - -type transportClient struct { - cc *grpc.ClientConn -} - -func NewTransportClient(cc *grpc.ClientConn) TransportClient { - return &transportClient{cc} -} - -func (c *transportClient) Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error) { - stream, err := c.cc.NewStream(ctx, &_Transport_serviceDesc.Streams[0], "/rpcpb.Transport/Transport", opts...) - if err != nil { - return nil, err - } - x := &transportTransportClient{stream} - return x, nil -} - -type Transport_TransportClient interface { - Send(*Request) error - Recv() (*Response, error) - grpc.ClientStream -} - -type transportTransportClient struct { - grpc.ClientStream -} - -func (x *transportTransportClient) Send(m *Request) error { - return x.ClientStream.SendMsg(m) -} - -func (x *transportTransportClient) Recv() (*Response, error) { - m := new(Response) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TransportServer is the server API for Transport service. -type TransportServer interface { - Transport(Transport_TransportServer) error -} - -// UnimplementedTransportServer can be embedded to have forward compatible implementations. -type UnimplementedTransportServer struct { -} - -func (*UnimplementedTransportServer) Transport(srv Transport_TransportServer) error { - return status.Errorf(codes.Unimplemented, "method Transport not implemented") -} - -func RegisterTransportServer(s *grpc.Server, srv TransportServer) { - s.RegisterService(&_Transport_serviceDesc, srv) -} - -func _Transport_Transport_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TransportServer).Transport(&transportTransportServer{stream}) -} - -type Transport_TransportServer interface { - Send(*Response) error - Recv() (*Request, error) - grpc.ServerStream -} - -type transportTransportServer struct { - grpc.ServerStream -} - -func (x *transportTransportServer) Send(m *Response) error { - return x.ServerStream.SendMsg(m) -} - -func (x *transportTransportServer) Recv() (*Request, error) { - m := new(Request) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Transport_serviceDesc = grpc.ServiceDesc{ - ServiceName: "rpcpb.Transport", - HandlerType: (*TransportServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Transport", - Handler: _Transport_Transport_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpcpb/rpc.proto", -} - -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Tester != nil { - { - size, err := m.Tester.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Member != nil { - { - size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Operation != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Operation)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *SnapshotInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x52 - } - if len(m.Took) > 0 { - i -= len(m.Took) - copy(dAtA[i:], m.Took) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Took))) - i-- - dAtA[i] = 0x4a - } - if m.SnapshotRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotRevision)) - i-- - dAtA[i] = 0x40 - } - if m.SnapshotHash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotHash)) - i-- - dAtA[i] = 0x38 - } - if m.SnapshotTotalKey != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotTotalKey)) - i-- - dAtA[i] = 0x30 - } - if len(m.SnapshotTotalSize) > 0 { - i -= len(m.SnapshotTotalSize) - copy(dAtA[i:], m.SnapshotTotalSize) - i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotTotalSize))) - i-- - dAtA[i] = 0x2a - } - if len(m.SnapshotFileSize) > 0 { - i -= len(m.SnapshotFileSize) - copy(dAtA[i:], m.SnapshotFileSize) - i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotFileSize))) - i-- - dAtA[i] = 0x22 - } - if len(m.SnapshotPath) > 0 { - i -= len(m.SnapshotPath) - copy(dAtA[i:], m.SnapshotPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotPath))) - i-- - dAtA[i] = 0x1a - } - if len(m.MemberClientURLs) > 0 { - for iNdEx := len(m.MemberClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MemberClientURLs[iNdEx]) - copy(dAtA[i:], m.MemberClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.MemberClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.MemberName) > 0 { - i -= len(m.MemberName) - copy(dAtA[i:], m.MemberName) - i = encodeVarintRpc(dAtA, i, uint64(len(m.MemberName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Response) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Response) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SnapshotInfo != nil { - { - size, err := m.SnapshotInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Member != nil { - { - size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Status) > 0 { - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - } - if m.Success { - i-- - if m.Success { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Failpoints) > 0 { - i -= len(m.Failpoints) - copy(dAtA[i:], m.Failpoints) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Failpoints))) - i-- - dAtA[i] = 0x2b - i-- - dAtA[i] = 0xea - } - if m.SnapshotInfo != nil { - { - size, err := m.SnapshotInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x25 - i-- - dAtA[i] = 0xd2 - } - if len(m.SnapshotPath) > 0 { - i -= len(m.SnapshotPath) - copy(dAtA[i:], m.SnapshotPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotPath))) - i-- - dAtA[i] = 0x25 - i-- - dAtA[i] = 0xca - } - if len(m.PeerTrustedCAPath) > 0 { - i -= len(m.PeerTrustedCAPath) - copy(dAtA[i:], m.PeerTrustedCAPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAPath))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xd2 - } - if len(m.PeerTrustedCAData) > 0 { - i -= len(m.PeerTrustedCAData) - copy(dAtA[i:], m.PeerTrustedCAData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAData))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xca - } - if len(m.PeerKeyPath) > 0 { - i -= len(m.PeerKeyPath) - copy(dAtA[i:], m.PeerKeyPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyPath))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xc2 - } - if len(m.PeerKeyData) > 0 { - i -= len(m.PeerKeyData) - copy(dAtA[i:], m.PeerKeyData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyData))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xba - } - if len(m.PeerCertPath) > 0 { - i -= len(m.PeerCertPath) - copy(dAtA[i:], m.PeerCertPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertPath))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xb2 - } - if len(m.PeerCertData) > 0 { - i -= len(m.PeerCertData) - copy(dAtA[i:], m.PeerCertData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertData))) - i-- - dAtA[i] = 0x1f - i-- - dAtA[i] = 0xaa - } - if len(m.ClientTrustedCAPath) > 0 { - i -= len(m.ClientTrustedCAPath) - copy(dAtA[i:], m.ClientTrustedCAPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAPath))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0xb2 - } - if len(m.ClientTrustedCAData) > 0 { - i -= len(m.ClientTrustedCAData) - copy(dAtA[i:], m.ClientTrustedCAData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAData))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0xaa - } - if len(m.ClientKeyPath) > 0 { - i -= len(m.ClientKeyPath) - copy(dAtA[i:], m.ClientKeyPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyPath))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0xa2 - } - if len(m.ClientKeyData) > 0 { - i -= len(m.ClientKeyData) - copy(dAtA[i:], m.ClientKeyData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyData))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0x9a - } - if len(m.ClientCertPath) > 0 { - i -= len(m.ClientCertPath) - copy(dAtA[i:], m.ClientCertPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertPath))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0x92 - } - if len(m.ClientCertData) > 0 { - i -= len(m.ClientCertData) - copy(dAtA[i:], m.ClientCertData) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertData))) - i-- - dAtA[i] = 0x19 - i-- - dAtA[i] = 0x8a - } - if m.EtcdOnSnapshotRestore != nil { - { - size, err := m.EtcdOnSnapshotRestore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i-- - dAtA[i] = 0xfa - } - if m.Etcd != nil { - { - size, err := m.Etcd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i-- - dAtA[i] = 0xf2 - } - if len(m.EtcdClientEndpoint) > 0 { - i -= len(m.EtcdClientEndpoint) - copy(dAtA[i:], m.EtcdClientEndpoint) - i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdClientEndpoint))) - i-- - dAtA[i] = 0x12 - i-- - dAtA[i] = 0xea - } - if m.EtcdPeerProxy { - i-- - if m.EtcdPeerProxy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xd0 - } - if m.EtcdClientProxy { - i-- - if m.EtcdClientProxy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xc8 - } - if len(m.BaseDir) > 0 { - i -= len(m.BaseDir) - copy(dAtA[i:], m.BaseDir) - i = encodeVarintRpc(dAtA, i, uint64(len(m.BaseDir))) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xaa - } - if len(m.FailpointHTTPAddr) > 0 { - i -= len(m.FailpointHTTPAddr) - copy(dAtA[i:], m.FailpointHTTPAddr) - i = encodeVarintRpc(dAtA, i, uint64(len(m.FailpointHTTPAddr))) - i-- - dAtA[i] = 0x62 - } - if len(m.AgentAddr) > 0 { - i -= len(m.AgentAddr) - copy(dAtA[i:], m.AgentAddr) - i = encodeVarintRpc(dAtA, i, uint64(len(m.AgentAddr))) - i-- - dAtA[i] = 0x5a - } - if len(m.EtcdExec) > 0 { - i -= len(m.EtcdExec) - copy(dAtA[i:], m.EtcdExec) - i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdExec))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Tester) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Tester) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Tester) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.StressQPS != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressQPS)) - i-- - dAtA[i] = 0x12 - i-- - dAtA[i] = 0xf0 - } - if m.StressClients != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressClients)) - i-- - dAtA[i] = 0x12 - i-- - dAtA[i] = 0xe8 - } - if m.StressKeyTxnOps != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressKeyTxnOps)) - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xe8 - } - if m.StressKeySuffixRangeTxn != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRangeTxn)) - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xe0 - } - if m.StressKeySuffixRange != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySuffixRange)) - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xd8 - } - if m.StressKeySizeLarge != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySizeLarge)) - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xd0 - } - if m.StressKeySize != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StressKeySize)) - i-- - dAtA[i] = 0xc - i-- - dAtA[i] = 0xc8 - } - if len(m.Checkers) > 0 { - for iNdEx := len(m.Checkers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Checkers[iNdEx]) - copy(dAtA[i:], m.Checkers[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Checkers[iNdEx]))) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xb2 - } - } - if len(m.Stressers) > 0 { - for iNdEx := len(m.Stressers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Stressers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ExternalExecPath) > 0 { - i -= len(m.ExternalExecPath) - copy(dAtA[i:], m.ExternalExecPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ExternalExecPath))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if len(m.RunnerExecPath) > 0 { - i -= len(m.RunnerExecPath) - copy(dAtA[i:], m.RunnerExecPath) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RunnerExecPath))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xca - } - if len(m.FailpointCommands) > 0 { - for iNdEx := len(m.FailpointCommands) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.FailpointCommands[iNdEx]) - copy(dAtA[i:], m.FailpointCommands[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.FailpointCommands[iNdEx]))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - } - if len(m.Cases) > 0 { - for iNdEx := len(m.Cases) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cases[iNdEx]) - copy(dAtA[i:], m.Cases[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Cases[iNdEx]))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - } - if m.CaseShuffle { - i-- - if m.CaseShuffle { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - if m.CaseDelayMs != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CaseDelayMs)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf8 - } - if m.EnablePprof { - i-- - if m.EnablePprof { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.ExitOnCaseFail { - i-- - if m.ExitOnCaseFail { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.RoundLimit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RoundLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if m.UpdatedDelayLatencyMs != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.UpdatedDelayLatencyMs)) - i-- - dAtA[i] = 0x68 - } - if m.DelayLatencyMsRv != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMsRv)) - i-- - dAtA[i] = 0x60 - } - if m.DelayLatencyMs != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DelayLatencyMs)) - i-- - dAtA[i] = 0x58 - } - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x1a - } - if len(m.Network) > 0 { - i -= len(m.Network) - copy(dAtA[i:], m.Network) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Network))) - i-- - dAtA[i] = 0x12 - } - if len(m.DataDir) > 0 { - i -= len(m.DataDir) - copy(dAtA[i:], m.DataDir) - i = encodeVarintRpc(dAtA, i, uint64(len(m.DataDir))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Stresser) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stresser) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stresser) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Weight != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Weight)))) - i-- - dAtA[i] = 0x11 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Etcd) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Etcd) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Etcd) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SocketReusePort { - i-- - if m.SocketReusePort { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x5 - i-- - dAtA[i] = 0x90 - } - if m.SocketReuseAddress { - i-- - if m.SocketReuseAddress { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x5 - i-- - dAtA[i] = 0x88 - } - if len(m.LogLevel) > 0 { - i -= len(m.LogLevel) - copy(dAtA[i:], m.LogLevel) - i = encodeVarintRpc(dAtA, i, uint64(len(m.LogLevel))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0xca - } - if len(m.LogOutputs) > 0 { - for iNdEx := len(m.LogOutputs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LogOutputs[iNdEx]) - copy(dAtA[i:], m.LogOutputs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.LogOutputs[iNdEx]))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0xc2 - } - } - if len(m.Logger) > 0 { - i -= len(m.Logger) - copy(dAtA[i:], m.Logger) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Logger))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0xba - } - if m.InitialCorruptCheck { - i-- - if m.InitialCorruptCheck { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0x80 - } - if m.PreVote { - i-- - if m.PreVote { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x3 - i-- - dAtA[i] = 0xf8 - } - if m.QuotaBackendBytes != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.QuotaBackendBytes)) - i-- - dAtA[i] = 0x3 - i-- - dAtA[i] = 0xa0 - } - if m.SnapshotCount != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotCount)) - i-- - dAtA[i] = 0x3 - i-- - dAtA[i] = 0x98 - } - if len(m.InitialClusterToken) > 0 { - i -= len(m.InitialClusterToken) - copy(dAtA[i:], m.InitialClusterToken) - i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterToken))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xda - } - if len(m.InitialClusterState) > 0 { - i -= len(m.InitialClusterState) - copy(dAtA[i:], m.InitialClusterState) - i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialClusterState))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if len(m.InitialCluster) > 0 { - i -= len(m.InitialCluster) - copy(dAtA[i:], m.InitialCluster) - i = encodeVarintRpc(dAtA, i, uint64(len(m.InitialCluster))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xca - } - if len(m.PeerTrustedCAFile) > 0 { - i -= len(m.PeerTrustedCAFile) - copy(dAtA[i:], m.PeerTrustedCAFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerTrustedCAFile))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xaa - } - if len(m.PeerKeyFile) > 0 { - i -= len(m.PeerKeyFile) - copy(dAtA[i:], m.PeerKeyFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerKeyFile))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if len(m.PeerCertFile) > 0 { - i -= len(m.PeerCertFile) - copy(dAtA[i:], m.PeerCertFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerCertFile))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.PeerClientCertAuth { - i-- - if m.PeerClientCertAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x90 - } - if m.PeerAutoTLS { - i-- - if m.PeerAutoTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x88 - } - if len(m.AdvertisePeerURLs) > 0 { - for iNdEx := len(m.AdvertisePeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdvertisePeerURLs[iNdEx]) - copy(dAtA[i:], m.AdvertisePeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.AdvertisePeerURLs[iNdEx]))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x82 - } - } - if len(m.ListenPeerURLs) > 0 { - for iNdEx := len(m.ListenPeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ListenPeerURLs[iNdEx]) - copy(dAtA[i:], m.ListenPeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ListenPeerURLs[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - } - } - if len(m.ClientTrustedCAFile) > 0 { - i -= len(m.ClientTrustedCAFile) - copy(dAtA[i:], m.ClientTrustedCAFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientTrustedCAFile))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - } - if len(m.ClientKeyFile) > 0 { - i -= len(m.ClientKeyFile) - copy(dAtA[i:], m.ClientKeyFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientKeyFile))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - if len(m.ClientCertFile) > 0 { - i -= len(m.ClientCertFile) - copy(dAtA[i:], m.ClientCertFile) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientCertFile))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - } - if m.ClientCertAuth { - i-- - if m.ClientCertAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if m.ClientAutoTLS { - i-- - if m.ClientAutoTLS { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if len(m.AdvertiseClientURLs) > 0 { - for iNdEx := len(m.AdvertiseClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdvertiseClientURLs[iNdEx]) - copy(dAtA[i:], m.AdvertiseClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.AdvertiseClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - } - if len(m.ListenClientURLs) > 0 { - for iNdEx := len(m.ListenClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ListenClientURLs[iNdEx]) - copy(dAtA[i:], m.ListenClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ListenClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if m.ElectionTimeoutMs != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ElectionTimeoutMs)) - i-- - dAtA[i] = 0x60 - } - if m.HeartbeatIntervalMs != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.HeartbeatIntervalMs)) - i-- - dAtA[i] = 0x58 - } - if len(m.WALDir) > 0 { - i -= len(m.WALDir) - copy(dAtA[i:], m.WALDir) - i = encodeVarintRpc(dAtA, i, uint64(len(m.WALDir))) - i-- - dAtA[i] = 0x1a - } - if len(m.DataDir) > 0 { - i -= len(m.DataDir) - copy(dAtA[i:], m.DataDir) - i = encodeVarintRpc(dAtA, i, uint64(len(m.DataDir))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - offset -= sovRpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Request) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Operation != 0 { - n += 1 + sovRpc(uint64(m.Operation)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Tester != nil { - l = m.Tester.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.MemberName) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.MemberClientURLs) > 0 { - for _, s := range m.MemberClientURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - l = len(m.SnapshotPath) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.SnapshotFileSize) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.SnapshotTotalSize) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.SnapshotTotalKey != 0 { - n += 1 + sovRpc(uint64(m.SnapshotTotalKey)) - } - if m.SnapshotHash != 0 { - n += 1 + sovRpc(uint64(m.SnapshotHash)) - } - if m.SnapshotRevision != 0 { - n += 1 + sovRpc(uint64(m.SnapshotRevision)) - } - l = len(m.Took) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Response) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Success { - n += 2 - } - l = len(m.Status) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.SnapshotInfo != nil { - l = m.SnapshotInfo.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Member) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.EtcdExec) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.AgentAddr) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.FailpointHTTPAddr) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.BaseDir) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.EtcdClientProxy { - n += 3 - } - if m.EtcdPeerProxy { - n += 3 - } - l = len(m.EtcdClientEndpoint) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.Etcd != nil { - l = m.Etcd.Size() - n += 2 + l + sovRpc(uint64(l)) - } - if m.EtcdOnSnapshotRestore != nil { - l = m.EtcdOnSnapshotRestore.Size() - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientCertData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientCertPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientKeyData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientKeyPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientTrustedCAData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientTrustedCAPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerCertData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerCertPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerKeyData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerKeyPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerTrustedCAData) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerTrustedCAPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.SnapshotPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.SnapshotInfo != nil { - l = m.SnapshotInfo.Size() - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.Failpoints) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Tester) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DataDir) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Network) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.DelayLatencyMs != 0 { - n += 1 + sovRpc(uint64(m.DelayLatencyMs)) - } - if m.DelayLatencyMsRv != 0 { - n += 1 + sovRpc(uint64(m.DelayLatencyMsRv)) - } - if m.UpdatedDelayLatencyMs != 0 { - n += 1 + sovRpc(uint64(m.UpdatedDelayLatencyMs)) - } - if m.RoundLimit != 0 { - n += 2 + sovRpc(uint64(m.RoundLimit)) - } - if m.ExitOnCaseFail { - n += 3 - } - if m.EnablePprof { - n += 3 - } - if m.CaseDelayMs != 0 { - n += 2 + sovRpc(uint64(m.CaseDelayMs)) - } - if m.CaseShuffle { - n += 3 - } - if len(m.Cases) > 0 { - for _, s := range m.Cases { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if len(m.FailpointCommands) > 0 { - for _, s := range m.FailpointCommands { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - l = len(m.RunnerExecPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ExternalExecPath) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if len(m.Stressers) > 0 { - for _, e := range m.Stressers { - l = e.Size() - n += 2 + l + sovRpc(uint64(l)) - } - } - if len(m.Checkers) > 0 { - for _, s := range m.Checkers { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if m.StressKeySize != 0 { - n += 2 + sovRpc(uint64(m.StressKeySize)) - } - if m.StressKeySizeLarge != 0 { - n += 2 + sovRpc(uint64(m.StressKeySizeLarge)) - } - if m.StressKeySuffixRange != 0 { - n += 2 + sovRpc(uint64(m.StressKeySuffixRange)) - } - if m.StressKeySuffixRangeTxn != 0 { - n += 2 + sovRpc(uint64(m.StressKeySuffixRangeTxn)) - } - if m.StressKeyTxnOps != 0 { - n += 2 + sovRpc(uint64(m.StressKeyTxnOps)) - } - if m.StressClients != 0 { - n += 2 + sovRpc(uint64(m.StressClients)) - } - if m.StressQPS != 0 { - n += 2 + sovRpc(uint64(m.StressQPS)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Stresser) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Weight != 0 { - n += 9 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Etcd) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.DataDir) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.WALDir) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.HeartbeatIntervalMs != 0 { - n += 1 + sovRpc(uint64(m.HeartbeatIntervalMs)) - } - if m.ElectionTimeoutMs != 0 { - n += 1 + sovRpc(uint64(m.ElectionTimeoutMs)) - } - if len(m.ListenClientURLs) > 0 { - for _, s := range m.ListenClientURLs { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if len(m.AdvertiseClientURLs) > 0 { - for _, s := range m.AdvertiseClientURLs { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if m.ClientAutoTLS { - n += 3 - } - if m.ClientCertAuth { - n += 3 - } - l = len(m.ClientCertFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientKeyFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.ClientTrustedCAFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if len(m.ListenPeerURLs) > 0 { - for _, s := range m.ListenPeerURLs { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if len(m.AdvertisePeerURLs) > 0 { - for _, s := range m.AdvertisePeerURLs { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - if m.PeerAutoTLS { - n += 3 - } - if m.PeerClientCertAuth { - n += 3 - } - l = len(m.PeerCertFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerKeyFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.PeerTrustedCAFile) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.InitialCluster) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.InitialClusterState) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - l = len(m.InitialClusterToken) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.SnapshotCount != 0 { - n += 2 + sovRpc(uint64(m.SnapshotCount)) - } - if m.QuotaBackendBytes != 0 { - n += 2 + sovRpc(uint64(m.QuotaBackendBytes)) - } - if m.PreVote { - n += 3 - } - if m.InitialCorruptCheck { - n += 3 - } - l = len(m.Logger) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if len(m.LogOutputs) > 0 { - for _, s := range m.LogOutputs { - l = len(s) - n += 2 + l + sovRpc(uint64(l)) - } - } - l = len(m.LogLevel) - if l > 0 { - n += 2 + l + sovRpc(uint64(l)) - } - if m.SocketReuseAddress { - n += 3 - } - if m.SocketReusePort { - n += 3 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Request) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - m.Operation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Operation |= Operation(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tester", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tester == nil { - m.Tester = &Tester{} - } - if err := m.Tester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MemberName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MemberClientURLs = append(m.MemberClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotFileSize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotFileSize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTotalSize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotTotalSize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTotalKey", wireType) - } - m.SnapshotTotalKey = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SnapshotTotalKey |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotHash", wireType) - } - m.SnapshotHash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SnapshotHash |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotRevision", wireType) - } - m.SnapshotRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SnapshotRevision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Took", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Took = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Response) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Response: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Success = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SnapshotInfo == nil { - m.SnapshotInfo = &SnapshotInfo{} - } - if err := m.SnapshotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EtcdExec", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EtcdExec = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentAddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentAddr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailpointHTTPAddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FailpointHTTPAddr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BaseDir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 201: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientProxy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EtcdClientProxy = bool(v != 0) - case 202: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EtcdPeerProxy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EtcdPeerProxy = bool(v != 0) - case 301: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientEndpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EtcdClientEndpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 302: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Etcd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Etcd == nil { - m.Etcd = &Etcd{} - } - if err := m.Etcd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 303: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EtcdOnSnapshotRestore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EtcdOnSnapshotRestore == nil { - m.EtcdOnSnapshotRestore = &Etcd{} - } - if err := m.EtcdOnSnapshotRestore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 401: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCertData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCertData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 402: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCertPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCertPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 403: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientKeyData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 404: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientKeyPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 405: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientTrustedCAData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 406: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientTrustedCAPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 501: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerCertData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerCertData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 502: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerCertPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerCertPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 503: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerKeyData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 504: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerKeyPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 505: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerTrustedCAData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 506: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerTrustedCAPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 601: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 602: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SnapshotInfo == nil { - m.SnapshotInfo = &SnapshotInfo{} - } - if err := m.SnapshotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 701: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Failpoints", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Failpoints = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Tester) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Tester: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Tester: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataDir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Network = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMs", wireType) - } - m.DelayLatencyMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DelayLatencyMs |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DelayLatencyMsRv", wireType) - } - m.DelayLatencyMsRv = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DelayLatencyMsRv |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedDelayLatencyMs", wireType) - } - m.UpdatedDelayLatencyMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedDelayLatencyMs |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RoundLimit", wireType) - } - m.RoundLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RoundLimit |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitOnCaseFail", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExitOnCaseFail = bool(v != 0) - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnablePprof", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EnablePprof = bool(v != 0) - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CaseDelayMs", wireType) - } - m.CaseDelayMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CaseDelayMs |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CaseShuffle", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CaseShuffle = bool(v != 0) - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cases", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cases = append(m.Cases, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailpointCommands", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FailpointCommands = append(m.FailpointCommands, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 41: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunnerExecPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RunnerExecPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalExecPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalExecPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stressers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stressers = append(m.Stressers, &Stresser{}) - if err := m.Stressers[len(m.Stressers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkers = append(m.Checkers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 201: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressKeySize", wireType) - } - m.StressKeySize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressKeySize |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 202: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressKeySizeLarge", wireType) - } - m.StressKeySizeLarge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressKeySizeLarge |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 203: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRange", wireType) - } - m.StressKeySuffixRange = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressKeySuffixRange |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 204: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressKeySuffixRangeTxn", wireType) - } - m.StressKeySuffixRangeTxn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressKeySuffixRangeTxn |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 205: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressKeyTxnOps", wireType) - } - m.StressKeyTxnOps = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressKeyTxnOps |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 301: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressClients", wireType) - } - m.StressClients = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressClients |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 302: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StressQPS", wireType) - } - m.StressQPS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StressQPS |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Stresser) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stresser: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stresser: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Weight = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Etcd) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Etcd: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Etcd: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataDir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WALDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WALDir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatIntervalMs", wireType) - } - m.HeartbeatIntervalMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HeartbeatIntervalMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ElectionTimeoutMs", wireType) - } - m.ElectionTimeoutMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ElectionTimeoutMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListenClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListenClientURLs = append(m.ListenClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdvertiseClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdvertiseClientURLs = append(m.AdvertiseClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientAutoTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClientAutoTLS = bool(v != 0) - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCertAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ClientCertAuth = bool(v != 0) - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCertFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCertFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientKeyFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientKeyFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientTrustedCAFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientTrustedCAFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListenPeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListenPeerURLs = append(m.ListenPeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 32: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdvertisePeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdvertisePeerURLs = append(m.AdvertisePeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 33: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerAutoTLS", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PeerAutoTLS = bool(v != 0) - case 34: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerClientCertAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PeerClientCertAuth = bool(v != 0) - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerCertFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerCertFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerKeyFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerKeyFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 37: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerTrustedCAFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerTrustedCAFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 41: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitialCluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterState", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitialClusterState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 43: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialClusterToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitialClusterToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 51: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType) - } - m.SnapshotCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SnapshotCount |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 52: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QuotaBackendBytes", wireType) - } - m.QuotaBackendBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.QuotaBackendBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 63: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreVote", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PreVote = bool(v != 0) - case 64: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCorruptCheck", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InitialCorruptCheck = bool(v != 0) - case 71: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logger", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Logger = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 72: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogOutputs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogOutputs = append(m.LogOutputs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 73: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogLevel", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogLevel = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 81: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SocketReuseAddress", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SocketReuseAddress = bool(v != 0) - case 82: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SocketReusePort", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SocketReusePort = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRpc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRpc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRpc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") -) diff --git a/tests/functional/rpcpb/rpc.proto b/tests/functional/rpcpb/rpc.proto deleted file mode 100644 index 1ff2ed62f84..00000000000 --- a/tests/functional/rpcpb/rpc.proto +++ /dev/null @@ -1,635 +0,0 @@ -syntax = "proto3"; -package rpcpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Request { - Operation Operation = 1; - // Member contains the same Member object from tester configuration. - Member Member = 2; - // Tester contains tester configuration. - Tester Tester = 3; -} - -// SnapshotInfo contains SAVE_SNAPSHOT request results. -message SnapshotInfo { - string MemberName = 1; - repeated string MemberClientURLs = 2; - string SnapshotPath = 3; - string SnapshotFileSize = 4; - string SnapshotTotalSize = 5; - int64 SnapshotTotalKey = 6; - int64 SnapshotHash = 7; - int64 SnapshotRevision = 8; - string Took = 9; - string Version = 10; -} - -message Response { - bool Success = 1; - string Status = 2; - - // Member contains the same Member object from tester request. - Member Member = 3; - - // SnapshotInfo contains SAVE_SNAPSHOT request results. - SnapshotInfo SnapshotInfo = 4; -} - -service Transport { - rpc Transport(stream Request) returns (stream Response) {} -} - -message Member { - // EtcdExec is the executable etcd binary path in agent server. - string EtcdExec = 1 [(gogoproto.moretags) = "yaml:\"etcd-exec\""]; - - // AgentAddr is the agent HTTP server address. - string AgentAddr = 11 [(gogoproto.moretags) = "yaml:\"agent-addr\""]; - // FailpointHTTPAddr is the agent's failpoints HTTP server address. - string FailpointHTTPAddr = 12 [(gogoproto.moretags) = "yaml:\"failpoint-http-addr\""]; - - // BaseDir is the base directory where all logs and etcd data are stored. - string BaseDir = 101 [(gogoproto.moretags) = "yaml:\"base-dir\""]; - - // EtcdClientProxy is true when client traffic needs to be proxied. - // If true, listen client URL port must be different than advertise client URL port. - bool EtcdClientProxy = 201 [(gogoproto.moretags) = "yaml:\"etcd-client-proxy\""]; - // EtcdPeerProxy is true when peer traffic needs to be proxied. - // If true, listen peer URL port must be different than advertise peer URL port. - bool EtcdPeerProxy = 202 [(gogoproto.moretags) = "yaml:\"etcd-peer-proxy\""]; - - // EtcdClientEndpoint is the etcd client endpoint. - string EtcdClientEndpoint = 301 [(gogoproto.moretags) = "yaml:\"etcd-client-endpoint\""]; - // Etcd defines etcd binary configuration flags. - Etcd Etcd = 302 [(gogoproto.moretags) = "yaml:\"etcd\""]; - // EtcdOnSnapshotRestore defines one-time use configuration during etcd - // snapshot recovery process. - Etcd EtcdOnSnapshotRestore = 303; - - // ClientCertData contains cert file contents from this member's etcd server. - string ClientCertData = 401 [(gogoproto.moretags) = "yaml:\"client-cert-data\""]; - string ClientCertPath = 402 [(gogoproto.moretags) = "yaml:\"client-cert-path\""]; - // ClientKeyData contains key file contents from this member's etcd server. - string ClientKeyData = 403 [(gogoproto.moretags) = "yaml:\"client-key-data\""]; - string ClientKeyPath = 404 [(gogoproto.moretags) = "yaml:\"client-key-path\""]; - // ClientTrustedCAData contains trusted CA file contents from this member's etcd server. - string ClientTrustedCAData = 405 [(gogoproto.moretags) = "yaml:\"client-trusted-ca-data\""]; - string ClientTrustedCAPath = 406 [(gogoproto.moretags) = "yaml:\"client-trusted-ca-path\""]; - - // PeerCertData contains cert file contents from this member's etcd server. - string PeerCertData = 501 [(gogoproto.moretags) = "yaml:\"peer-cert-data\""]; - string PeerCertPath = 502 [(gogoproto.moretags) = "yaml:\"peer-cert-path\""]; - // PeerKeyData contains key file contents from this member's etcd server. - string PeerKeyData = 503 [(gogoproto.moretags) = "yaml:\"peer-key-data\""]; - string PeerKeyPath = 504 [(gogoproto.moretags) = "yaml:\"peer-key-path\""]; - // PeerTrustedCAData contains trusted CA file contents from this member's etcd server. - string PeerTrustedCAData = 505 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-data\""]; - string PeerTrustedCAPath = 506 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-path\""]; - - // SnapshotPath is the snapshot file path to store or restore from. - string SnapshotPath = 601 [(gogoproto.moretags) = "yaml:\"snapshot-path\""]; - // SnapshotInfo contains last SAVE_SNAPSHOT request results. - SnapshotInfo SnapshotInfo = 602; - - // Failpoints is the GOFAIL_FAILPOINTS environment variable value to use when starting etcd. - string Failpoints = 701 [(gogoproto.moretags) = "yaml:\"failpoints\""]; -} - -message Tester { - string DataDir = 1 [(gogoproto.moretags) = "yaml:\"data-dir\""]; - string Network = 2 [(gogoproto.moretags) = "yaml:\"network\""]; - string Addr = 3 [(gogoproto.moretags) = "yaml:\"addr\""]; - - // DelayLatencyMsRv is the delay latency in milliseconds, - // to inject to simulated slow network. - uint32 DelayLatencyMs = 11 [(gogoproto.moretags) = "yaml:\"delay-latency-ms\""]; - // DelayLatencyMsRv is the delay latency random variable in milliseconds. - uint32 DelayLatencyMsRv = 12 [(gogoproto.moretags) = "yaml:\"delay-latency-ms-rv\""]; - // UpdatedDelayLatencyMs is the update delay latency in milliseconds, - // to inject to simulated slow network. It's the final latency to apply, - // in case the latency numbers are randomly generated from given delay latency field. - uint32 UpdatedDelayLatencyMs = 13 [(gogoproto.moretags) = "yaml:\"updated-delay-latency-ms\""]; - - // RoundLimit is the limit of rounds to run failure set (-1 to run without limits). - int32 RoundLimit = 21 [(gogoproto.moretags) = "yaml:\"round-limit\""]; - // ExitOnCaseFail is true, then exit tester on first failure. - bool ExitOnCaseFail = 22 [(gogoproto.moretags) = "yaml:\"exit-on-failure\""]; - // EnablePprof is true to enable profiler. - bool EnablePprof = 23 [(gogoproto.moretags) = "yaml:\"enable-pprof\""]; - - // CaseDelayMs is the delay duration after failure is injected. - // Useful when triggering snapshot or no-op failure cases. - uint32 CaseDelayMs = 31 [(gogoproto.moretags) = "yaml:\"case-delay-ms\""]; - // CaseShuffle is true to randomize failure injecting order. - bool CaseShuffle = 32 [(gogoproto.moretags) = "yaml:\"case-shuffle\""]; - // Cases is the selected test cases to schedule. - // If empty, run all failure cases. - repeated string Cases = 33 [(gogoproto.moretags) = "yaml:\"cases\""]; - // FailpointCommands is the list of "gofail" commands - // (e.g. panic("etcd-tester"),1*sleep(1000). - repeated string FailpointCommands = 34 [(gogoproto.moretags) = "yaml:\"failpoint-commands\""]; - - // RunnerExecPath is a path of etcd-runner binary. - string RunnerExecPath = 41 [(gogoproto.moretags) = "yaml:\"runner-exec-path\""]; - // ExternalExecPath is a path of script for enabling/disabling an external fault injector. - string ExternalExecPath = 42 [(gogoproto.moretags) = "yaml:\"external-exec-path\""]; - - // Stressers is the list of stresser types: - // KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER. - repeated Stresser Stressers = 101 [(gogoproto.moretags) = "yaml:\"stressers\""]; - // Checkers is the list of consistency checker types: - // KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER. - // Leave empty to skip consistency checks. - repeated string Checkers = 102 [(gogoproto.moretags) = "yaml:\"checkers\""]; - - // StressKeySize is the size of each small key written into etcd. - int32 StressKeySize = 201 [(gogoproto.moretags) = "yaml:\"stress-key-size\""]; - // StressKeySizeLarge is the size of each large key written into etcd. - int32 StressKeySizeLarge = 202 [(gogoproto.moretags) = "yaml:\"stress-key-size-large\""]; - // StressKeySuffixRange is the count of key range written into etcd. - // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)". - int32 StressKeySuffixRange = 203 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range\""]; - // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100). - // Stress keys are created with "fmt.Sprintf("/k%03d", i)". - int32 StressKeySuffixRangeTxn = 204 [(gogoproto.moretags) = "yaml:\"stress-key-suffix-range-txn\""]; - // StressKeyTxnOps is the number of operations per a transaction (max 64). - int32 StressKeyTxnOps = 205 [(gogoproto.moretags) = "yaml:\"stress-key-txn-ops\""]; - - // StressClients is the number of concurrent stressing clients - // with "one" shared TCP connection. - int32 StressClients = 301 [(gogoproto.moretags) = "yaml:\"stress-clients\""]; - // StressQPS is the maximum number of stresser requests per second. - int32 StressQPS = 302 [(gogoproto.moretags) = "yaml:\"stress-qps\""]; -} - -enum StresserType { - KV_WRITE_SMALL = 0; - KV_WRITE_LARGE = 1; - KV_READ_ONE_KEY = 2; - KV_READ_RANGE = 3; - KV_DELETE_ONE_KEY = 4; - KV_DELETE_RANGE = 5; - KV_TXN_WRITE_DELETE = 6; - - LEASE = 10; - - ELECTION_RUNNER = 20; - WATCH_RUNNER = 31; - LOCK_RACER_RUNNER = 41; - LEASE_RUNNER = 51; -} - -message Stresser { - string Type = 1 [(gogoproto.moretags) = "yaml:\"type\""]; - double Weight = 2 [(gogoproto.moretags) = "yaml:\"weight\""]; -} - -enum Checker { - KV_HASH = 0; - LEASE_EXPIRE = 1; - RUNNER = 2; - NO_CHECK = 3; - SHORT_TTL_LEASE_EXPIRE = 4; -} - -message Etcd { - string Name = 1 [(gogoproto.moretags) = "yaml:\"name\""]; - string DataDir = 2 [(gogoproto.moretags) = "yaml:\"data-dir\""]; - string WALDir = 3 [(gogoproto.moretags) = "yaml:\"wal-dir\""]; - - // HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval. - // Default value is 100, which is 100ms. - int64 HeartbeatIntervalMs = 11 [(gogoproto.moretags) = "yaml:\"heartbeat-interval\""]; - // ElectionTimeoutMs is the time (in milliseconds) for an election to timeout. - // Default value is 1000, which is 1s. - int64 ElectionTimeoutMs = 12 [(gogoproto.moretags) = "yaml:\"election-timeout\""]; - - repeated string ListenClientURLs = 21 [(gogoproto.moretags) = "yaml:\"listen-client-urls\""]; - repeated string AdvertiseClientURLs = 22 [(gogoproto.moretags) = "yaml:\"advertise-client-urls\""]; - bool ClientAutoTLS = 23 [(gogoproto.moretags) = "yaml:\"auto-tls\""]; - bool ClientCertAuth = 24 [(gogoproto.moretags) = "yaml:\"client-cert-auth\""]; - string ClientCertFile = 25 [(gogoproto.moretags) = "yaml:\"cert-file\""]; - string ClientKeyFile = 26 [(gogoproto.moretags) = "yaml:\"key-file\""]; - string ClientTrustedCAFile = 27 [(gogoproto.moretags) = "yaml:\"trusted-ca-file\""]; - - repeated string ListenPeerURLs = 31 [(gogoproto.moretags) = "yaml:\"listen-peer-urls\""]; - repeated string AdvertisePeerURLs = 32 [(gogoproto.moretags) = "yaml:\"initial-advertise-peer-urls\""]; - bool PeerAutoTLS = 33 [(gogoproto.moretags) = "yaml:\"peer-auto-tls\""]; - bool PeerClientCertAuth = 34 [(gogoproto.moretags) = "yaml:\"peer-client-cert-auth\""]; - string PeerCertFile = 35 [(gogoproto.moretags) = "yaml:\"peer-cert-file\""]; - string PeerKeyFile = 36 [(gogoproto.moretags) = "yaml:\"peer-key-file\""]; - string PeerTrustedCAFile = 37 [(gogoproto.moretags) = "yaml:\"peer-trusted-ca-file\""]; - - string InitialCluster = 41 [(gogoproto.moretags) = "yaml:\"initial-cluster\""]; - string InitialClusterState = 42 [(gogoproto.moretags) = "yaml:\"initial-cluster-state\""]; - string InitialClusterToken = 43 [(gogoproto.moretags) = "yaml:\"initial-cluster-token\""]; - - int64 SnapshotCount = 51 [(gogoproto.moretags) = "yaml:\"snapshot-count\""]; - int64 QuotaBackendBytes = 52 [(gogoproto.moretags) = "yaml:\"quota-backend-bytes\""]; - - bool PreVote = 63 [(gogoproto.moretags) = "yaml:\"pre-vote\""]; - bool InitialCorruptCheck = 64 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""]; - - string Logger = 71 [(gogoproto.moretags) = "yaml:\"logger\""]; - // LogOutputs is the log file to store current etcd server logs. - repeated string LogOutputs = 72 [(gogoproto.moretags) = "yaml:\"log-outputs\""]; - string LogLevel = 73 [(gogoproto.moretags) = "yaml:\"log-level\""]; - - bool SocketReuseAddress = 81 [(gogoproto.moretags) = "yaml:\"socket-reuse-address\""]; - bool SocketReusePort = 82 [(gogoproto.moretags) = "yaml:\"socket-reuse-port\""]; -} - -enum Operation { - // NOT_STARTED is the agent status before etcd first start. - NOT_STARTED = 0; - - // INITIAL_START_ETCD is only called to start etcd, the very first time. - INITIAL_START_ETCD = 10; - // RESTART_ETCD is sent to restart killed etcd. - RESTART_ETCD = 11; - - // SIGTERM_ETCD pauses etcd process while keeping data directories - // and previous etcd configurations. - SIGTERM_ETCD = 20; - // SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data - // directories to simulate destroying the whole machine. - SIGQUIT_ETCD_AND_REMOVE_DATA = 21; - - // SAVE_SNAPSHOT is sent to trigger local member to download its snapshot - // onto its local disk with the specified path from tester. - SAVE_SNAPSHOT = 30; - // RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to - // restore a cluster from existing snapshot from disk, and restart - // an etcd instance from recovered data. - RESTORE_RESTART_FROM_SNAPSHOT = 31; - // RESTART_FROM_SNAPSHOT is sent to trigger local member to restart - // and join an existing cluster that has been recovered from a snapshot. - // Local member joins this cluster with fresh data. - RESTART_FROM_SNAPSHOT = 32; - - // SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed, - // thus need to archive etcd data directories. - SIGQUIT_ETCD_AND_ARCHIVE_DATA = 40; - - // BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to - // the peer port on target member's peer port. - BLACKHOLE_PEER_PORT_TX_RX = 100; - // UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping. - UNBLACKHOLE_PEER_PORT_TX_RX = 101; - - // DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to - // the peer port on target member's peer port. - DELAY_PEER_PORT_TX_RX = 200; - // UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays. - UNDELAY_PEER_PORT_TX_RX = 201; -} - -// Case defines various system faults or test case in distributed systems, -// in order to verify correct behavior of etcd servers and clients. -enum Case { - // SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader) - // but does not delete its data directories on disk for next restart. - // It waits "delay-ms" before recovering this failure. - // The expected behavior is that the follower comes back online - // and rejoins the cluster, and then each member continues to process - // client requests ('Put' request that requires Raft consensus). - SIGTERM_ONE_FOLLOWER = 0; - - // SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen - // follower but does not delete its data directories on disk for next - // restart. And waits until most up-to-date node (leader) applies the - // snapshot count of entries since the stop operation. - // The expected behavior is that the follower comes back online and - // rejoins the cluster, and then active leader sends snapshot - // to the follower to force it to follow the leader's log. - // As always, after recovery, each member must be able to process - // client requests. - SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 1; - - // SIGTERM_LEADER stops the active leader node but does not delete its - // data directories on disk for next restart. Then it waits "delay-ms" - // before recovering this failure, in order to trigger election timeouts. - // The expected behavior is that a new leader gets elected, and the - // old leader comes back online and rejoins the cluster as a follower. - // As always, after recovery, each member must be able to process - // client requests. - SIGTERM_LEADER = 2; - - // SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node - // but does not delete its data directories on disk for next restart. - // And waits until most up-to-date node ("new" leader) applies the - // snapshot count of entries since the stop operation. - // The expected behavior is that cluster elects a new leader, and the - // old leader comes back online and rejoins the cluster as a follower. - // And it receives the snapshot from the new leader to overwrite its - // store. As always, after recovery, each member must be able to - // process client requests. - SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT = 3; - - // SIGTERM_QUORUM stops majority number of nodes to make the whole cluster - // inoperable but does not delete data directories on stopped nodes - // for next restart. And it waits "delay-ms" before recovering failure. - // The expected behavior is that nodes come back online, thus cluster - // comes back operative as well. As always, after recovery, each member - // must be able to process client requests. - SIGTERM_QUORUM = 4; - - // SIGTERM_ALL stops the whole cluster but does not delete data directories - // on disk for next restart. And it waits "delay-ms" before recovering - // this failure. - // The expected behavior is that nodes come back online, thus cluster - // comes back operative as well. As always, after recovery, each member - // must be able to process client requests. - SIGTERM_ALL = 5; - - // SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower - // (non-leader), deletes its data directories on disk, and removes - // this member from cluster (membership reconfiguration). On recovery, - // tester adds a new member, and this member joins the existing cluster - // with fresh data. It waits "delay-ms" before recovering this - // failure. This simulates destroying one follower machine, where operator - // needs to add a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and then each member continues to process client requests. - SIGQUIT_AND_REMOVE_ONE_FOLLOWER = 10; - - // SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly - // chosen follower, deletes its data directories on disk, and removes - // this member from cluster (membership reconfiguration). On recovery, - // tester adds a new member, and this member joins the existing cluster - // restart. On member remove, cluster waits until most up-to-date node - // (leader) applies the snapshot count of entries since the stop operation. - // This simulates destroying a leader machine, where operator needs to add - // a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and receives a snapshot from the active leader. As always, after - // recovery, each member must be able to process client requests. - SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 11; - - // SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its - // data directories on disk, and removes this member from cluster. - // On recovery, tester adds a new member, and this member joins the - // existing cluster with fresh data. It waits "delay-ms" before - // recovering this failure. This simulates destroying a leader machine, - // where operator needs to add a new member from a fresh machine. - // The expected behavior is that a new member joins the existing cluster, - // and then each member continues to process client requests. - SIGQUIT_AND_REMOVE_LEADER = 12; - - // SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader, - // deletes its data directories on disk, and removes this member from - // cluster (membership reconfiguration). On recovery, tester adds a new - // member, and this member joins the existing cluster restart. On member - // remove, cluster waits until most up-to-date node (new leader) applies - // the snapshot count of entries since the stop operation. This simulates - // destroying a leader machine, where operator needs to add a new member - // from a fresh machine. - // The expected behavior is that on member remove, cluster elects a new - // leader, and a new member joins the existing cluster and receives a - // snapshot from the newly elected leader. As always, after recovery, each - // member must be able to process client requests. - SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT = 13; - - // SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first - // stops majority number of nodes, deletes data directories on those quorum - // nodes, to make the whole cluster inoperable. Now that quorum and their - // data are totally destroyed, cluster cannot even remove unavailable nodes - // (e.g. 2 out of 3 are lost, so no leader can be elected). - // Let's assume 3-node cluster of node A, B, and C. One day, node A and B - // are destroyed and all their data are gone. The only viable solution is - // to recover from C's latest snapshot. - // - // To simulate: - // 1. Assume node C is the current leader with most up-to-date data. - // 2. Download snapshot from node C, before destroying node A and B. - // 3. Destroy node A and B, and make the whole cluster inoperable. - // 4. Now node C cannot operate either. - // 5. SIGTERM node C and remove its data directories. - // 6. Restore a new seed member from node C's latest snapshot file. - // 7. Add another member to establish 2-node cluster. - // 8. Add another member to establish 3-node cluster. - // 9. Add more if any. - // - // The expected behavior is that etcd successfully recovers from such - // disastrous situation as only 1-node survives out of 3-node cluster, - // new members joins the existing cluster, and previous data from snapshot - // are still preserved after recovery process. As always, after recovery, - // each member must be able to process client requests. - SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH = 14; - - // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming - // packets from/to the peer port on a randomly chosen follower - // (non-leader), and waits for "delay-ms" until recovery. - // The expected behavior is that once dropping operation is undone, - // each member must be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER = 100; - - // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops - // all outgoing/incoming packets from/to the peer port on a randomly - // chosen follower (non-leader), and waits for most up-to-date node - // (leader) applies the snapshot count of entries since the blackhole - // operation. - // The expected behavior is that once packet drop operation is undone, - // the slow follower tries to catch up, possibly receiving the snapshot - // from the active leader. As always, after recovery, each member must - // be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 101; - - // BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets - // from/to the peer port on the active leader (isolated), and waits for - // "delay-ms" until recovery, in order to trigger election timeout. - // The expected behavior is that after election timeout, a new leader gets - // elected, and once dropping operation is undone, the old leader comes - // back and rejoins the cluster as a follower. As always, after recovery, - // each member must be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_LEADER = 102; - - // BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all - // outgoing/incoming packets from/to the peer port on the active leader, - // and waits for most up-to-date node (leader) applies the snapshot - // count of entries since the blackhole operation. - // The expected behavior is that cluster elects a new leader, and once - // dropping operation is undone, the old leader comes back and rejoins - // the cluster as a follower. The slow follower tries to catch up, likely - // receiving the snapshot from the new active leader. As always, after - // recovery, each member must be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 103; - - // BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets - // from/to the peer ports on majority nodes of cluster, thus losing its - // leader and cluster being inoperable. And it waits for "delay-ms" - // until recovery. - // The expected behavior is that once packet drop operation is undone, - // nodes come back online, thus cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_QUORUM = 104; - - // BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets - // from/to the peer ports on all nodes, thus making cluster totally - // inoperable. It waits for "delay-ms" until recovery. - // The expected behavior is that once packet drop operation is undone, - // nodes come back online, thus cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - BLACKHOLE_PEER_PORT_TX_RX_ALL = 105; - - // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets - // from/to the peer port on a randomly chosen follower (non-leader). - // It waits for "delay-ms" until recovery. - // The expected behavior is that once packet delay operation is undone, - // the follower comes back and tries to catch up with latest changes from - // cluster. And as always, after recovery, each member must be able to - // process client requests. - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 200; - - // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming - // packets from/to the peer port on a randomly chosen follower - // (non-leader) with a randomized time duration (thus isolated). It - // waits for "delay-ms" until recovery. - // The expected behavior is that once packet delay operation is undone, - // each member must be able to process client requests. - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER = 201; - - // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on a randomly chosen - // follower (non-leader), and waits for most up-to-date node (leader) - // applies the snapshot count of entries since the delay operation. - // The expected behavior is that the delayed follower gets isolated - // and behind the current active leader, and once delay operation is undone, - // the slow follower comes back and catches up possibly receiving snapshot - // from the active leader. As always, after recovery, each member must be - // able to process client requests. - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 202; - - // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on a randomly chosen - // follower (non-leader) with a randomized time duration, and waits for - // most up-to-date node (leader) applies the snapshot count of entries - // since the delay operation. - // The expected behavior is that the delayed follower gets isolated - // and behind the current active leader, and once delay operation is undone, - // the slow follower comes back and catches up, possibly receiving a - // snapshot from the active leader. As always, after recovery, each member - // must be able to process client requests. - RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT = 203; - - // DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to - // the peer port on the active leader. And waits for "delay-ms" until - // recovery. - // The expected behavior is that cluster may elect a new leader, and - // once packet delay operation is undone, the (old) leader comes back - // and tries to catch up with latest changes from cluster. As always, - // after recovery, each member must be able to process client requests. - DELAY_PEER_PORT_TX_RX_LEADER = 204; - - // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets - // from/to the peer port on the active leader with a randomized time - // duration. And waits for "delay-ms" until recovery. - // The expected behavior is that cluster may elect a new leader, and - // once packet delay operation is undone, the (old) leader comes back - // and tries to catch up with latest changes from cluster. As always, - // after recovery, each member must be able to process client requests. - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER = 205; - - // DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on the active leader, - // and waits for most up-to-date node (current or new leader) applies the - // snapshot count of entries since the delay operation. - // The expected behavior is that cluster may elect a new leader, and - // the old leader gets isolated and behind the current active leader, - // and once delay operation is undone, the slow follower comes back - // and catches up, likely receiving a snapshot from the active leader. - // As always, after recovery, each member must be able to process client - // requests. - DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 206; - - // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays - // outgoing/incoming packets from/to the peer port on the active leader, - // with a randomized time duration. And it waits for most up-to-date node - // (current or new leader) applies the snapshot count of entries since the - // delay operation. - // The expected behavior is that cluster may elect a new leader, and - // the old leader gets isolated and behind the current active leader, - // and once delay operation is undone, the slow follower comes back - // and catches up, likely receiving a snapshot from the active leader. - // As always, after recovery, each member must be able to process client - // requests. - RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT = 207; - - // DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to - // the peer ports on majority nodes of cluster. And it waits for - // "delay-ms" until recovery, likely to trigger election timeouts. - // The expected behavior is that cluster may elect a new leader, while - // quorum of nodes struggle with slow networks, and once delay operation - // is undone, nodes come back and cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - DELAY_PEER_PORT_TX_RX_QUORUM = 208; - - // RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets - // from/to the peer ports on majority nodes of cluster, with randomized - // time durations. And it waits for "delay-ms" until recovery, likely - // to trigger election timeouts. - // The expected behavior is that cluster may elect a new leader, while - // quorum of nodes struggle with slow networks, and once delay operation - // is undone, nodes come back and cluster comes back operative. As always, - // after recovery, each member must be able to process client requests. - RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM = 209; - - // DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the - // peer ports on all nodes. And it waits for "delay-ms" until recovery, - // likely to trigger election timeouts. - // The expected behavior is that cluster may become totally inoperable, - // struggling with slow networks across the whole cluster. Once delay - // operation is undone, nodes come back and cluster comes back operative. - // As always, after recovery, each member must be able to process client - // requests. - DELAY_PEER_PORT_TX_RX_ALL = 210; - - // RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets - // from/to the peer ports on all nodes, with randomized time durations. - // And it waits for "delay-ms" until recovery, likely to trigger - // election timeouts. - // The expected behavior is that cluster may become totally inoperable, - // struggling with slow networks across the whole cluster. Once delay - // operation is undone, nodes come back and cluster comes back operative. - // As always, after recovery, each member must be able to process client - // requests. - RANDOM_DELAY_PEER_PORT_TX_RX_ALL = 211; - - // NO_FAIL_WITH_STRESS stops injecting failures while testing the - // consistency and correctness under pressure loads, for the duration of - // "delay-ms". Goal is to ensure cluster be still making progress - // on recovery, and verify system does not deadlock following a sequence - // of failure injections. - // The expected behavior is that cluster remains fully operative in healthy - // condition. As always, after recovery, each member must be able to process - // client requests. - NO_FAIL_WITH_STRESS = 300; - - // NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor - // sends stressig client requests to the cluster, for the duration of - // "delay-ms". Goal is to ensure cluster be still making progress - // on recovery, and verify system does not deadlock following a sequence - // of failure injections. - // The expected behavior is that cluster remains fully operative in healthy - // condition, and clients requests during liveness period succeed without - // errors. - // Note: this is how Google Chubby does failure injection testing - // https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf. - NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS = 301; - - // FAILPOINTS injects failpoints to etcd server runtime, triggering panics - // in critical code paths. - FAILPOINTS = 400; - - // FAILPOINTS_WITH_DISK_IO_LATENCY injects high disk I/O latency failure in raftAfterSave code paths. - FAILPOINTS_WITH_DISK_IO_LATENCY = 401; - - // EXTERNAL runs external failure injection scripts. - EXTERNAL = 500; -} diff --git a/tests/functional/runner/election_command.go b/tests/functional/runner/election_command.go deleted file mode 100644 index 4a0d194bdda..00000000000 --- a/tests/functional/runner/election_command.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "context" - "errors" - "fmt" - - "go.etcd.io/etcd/client/v3/concurrency" - - "github.com/spf13/cobra" -) - -// NewElectionCommand returns the cobra command for "election runner". -func NewElectionCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "election [election name (defaults to 'elector')]", - Short: "Performs election operation", - Run: runElectionFunc, - } - cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections") - return cmd -} - -func runElectionFunc(cmd *cobra.Command, args []string) { - election := "elector" - if len(args) == 1 { - election = args[0] - } - if len(args) > 1 { - ExitWithError(ExitBadArgs, errors.New("election takes at most one argument")) - } - - rcs := make([]roundClient, totalClientConnections) - validatec := make(chan struct{}, len(rcs)) - // nextc closes when election is ready for next round. - nextc := make(chan struct{}) - eps := endpointsFromFlag(cmd) - - for i := range rcs { - v := fmt.Sprintf("%d", i) - observedLeader := "" - validateWaiters := 0 - var rcNextc chan struct{} - setRcNextc := func() { - rcNextc = nextc - } - - rcs[i].c = newClient(eps, dialTimeout) - var ( - s *concurrency.Session - err error - ) - for { - s, err = concurrency.NewSession(rcs[i].c) - if err == nil { - break - } - } - - e := concurrency.NewElection(s, election) - rcs[i].acquire = func() (err error) { - ctx, cancel := context.WithCancel(context.Background()) - donec := make(chan struct{}) - go func() { - defer close(donec) - for ctx.Err() == nil { - if ol, ok := <-e.Observe(ctx); ok { - observedLeader = string(ol.Kvs[0].Value) - break - } - } - if observedLeader != v { - cancel() - } - }() - err = e.Campaign(ctx, v) - cancel() - <-donec - if err == nil { - observedLeader = v - } - if observedLeader == v { - validateWaiters = len(rcs) - } - select { - case <-ctx.Done(): - return nil - default: - return err - } - } - rcs[i].validate = func() error { - l, err := e.Leader(context.TODO()) - if err == nil && string(l.Kvs[0].Value) != observedLeader { - return fmt.Errorf("expected leader %q, got %q", observedLeader, l.Kvs[0].Value) - } - if err != nil { - return err - } - setRcNextc() - validatec <- struct{}{} - return nil - } - rcs[i].release = func() error { - for validateWaiters > 0 { - select { - case <-validatec: - validateWaiters-- - default: - return fmt.Errorf("waiting on followers") - } - } - if err := e.Resign(context.TODO()); err != nil { - return err - } - if observedLeader == v { - oldNextc := nextc - nextc = make(chan struct{}) - close(oldNextc) - - } - <-rcNextc - observedLeader = "" - return nil - } - } - // each client creates 1 key from Campaign() and delete it from Resign() - // a round involves in 2*len(rcs) requests. - doRounds(rcs, rounds, 2*len(rcs)) -} diff --git a/tests/functional/runner/error.go b/tests/functional/runner/error.go deleted file mode 100644 index b9c279bce68..00000000000 --- a/tests/functional/runner/error.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "fmt" - "os" - - "go.etcd.io/etcd/client/v2" -) - -const ( - // http://tldp.org/LDP/abs/html/exitcodes.html - ExitSuccess = iota - ExitError - ExitBadConnection - ExitInvalidInput // for txn, watch command - ExitBadFeature // provided a valid flag with an unsupported value - ExitInterrupted - ExitIO - ExitBadArgs = 128 -) - -func ExitWithError(code int, err error) { - fmt.Fprintln(os.Stderr, "Error: ", err) - if cerr, ok := err.(*client.ClusterError); ok { - fmt.Fprintln(os.Stderr, cerr.Detail()) - } - os.Exit(code) -} diff --git a/tests/functional/runner/global.go b/tests/functional/runner/global.go deleted file mode 100644 index 1e5023f397e..00000000000 --- a/tests/functional/runner/global.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - - "github.com/spf13/cobra" - "golang.org/x/time/rate" -) - -// shared flags -var ( - totalClientConnections int // total number of client connections to be made with server - endpoints []string - dialTimeout time.Duration - rounds int // total number of rounds to run; set to <= 0 to run forever. - reqRate int // maximum number of requests per second. -) - -type roundClient struct { - c *clientv3.Client - progress int - acquire func() error - validate func() error - release func() error -} - -func newClient(eps []string, timeout time.Duration) *clientv3.Client { - c, err := clientv3.New(clientv3.Config{ - Endpoints: eps, - DialTimeout: timeout * time.Second, - }) - if err != nil { - log.Fatal(err) - } - return c -} - -func doRounds(rcs []roundClient, rounds int, requests int) { - var wg sync.WaitGroup - - wg.Add(len(rcs)) - finished := make(chan struct{}) - limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate) - for i := range rcs { - go func(rc *roundClient) { - defer wg.Done() - for rc.progress < rounds || rounds <= 0 { - if err := limiter.WaitN(context.Background(), requests/len(rcs)); err != nil { - log.Panicf("rate limiter error %v", err) - } - - for rc.acquire() != nil { /* spin */ - } - - if err := rc.validate(); err != nil { - log.Fatal(err) - } - - time.Sleep(10 * time.Millisecond) - rc.progress++ - finished <- struct{}{} - - for rc.release() != nil { /* spin */ - } - } - }(&rcs[i]) - } - - start := time.Now() - for i := 1; i < len(rcs)*rounds+1 || rounds <= 0; i++ { - select { - case <-finished: - if i%100 == 0 { - fmt.Printf("finished %d, took %v\n", i, time.Since(start)) - start = time.Now() - } - case <-time.After(time.Minute): - log.Panic("no progress after 1 minute!") - } - } - wg.Wait() - - for _, rc := range rcs { - rc.c.Close() - } -} - -func endpointsFromFlag(cmd *cobra.Command) []string { - eps, err := cmd.Flags().GetStringSlice("endpoints") - if err != nil { - ExitWithError(ExitError, err) - } - return eps -} diff --git a/tests/functional/runner/help.go b/tests/functional/runner/help.go deleted file mode 100644 index eb64b533f52..00000000000 --- a/tests/functional/runner/help.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// copied from https://github.com/rkt/rkt/blob/master/rkt/help.go - -package runner - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" - "text/tabwriter" - "text/template" - - "go.etcd.io/etcd/api/v3/version" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -var ( - commandUsageTemplate *template.Template - templFuncs = template.FuncMap{ - "descToLines": func(s string) []string { - // trim leading/trailing whitespace and split into slice of lines - return strings.Split(strings.Trim(s, "\n\t "), "\n") - }, - "cmdName": func(cmd *cobra.Command, startCmd *cobra.Command) string { - parts := []string{cmd.Name()} - for cmd.HasParent() && cmd.Parent().Name() != startCmd.Name() { - cmd = cmd.Parent() - parts = append([]string{cmd.Name()}, parts...) - } - return strings.Join(parts, " ") - }, - } -) - -func init() { - commandUsage := ` -{{ $cmd := .Cmd }}\ -{{ $cmdname := cmdName .Cmd .Cmd.Root }}\ -NAME: -{{ if not .Cmd.HasParent }}\ -{{printf "\t%s - %s" .Cmd.Name .Cmd.Short}} -{{else}}\ -{{printf "\t%s - %s" $cmdname .Cmd.Short}} -{{end}}\ - -USAGE: -{{printf "\t%s" .Cmd.UseLine}} -{{ if not .Cmd.HasParent }}\ - -VERSION: -{{printf "\t%s" .Version}} -{{end}}\ -{{if .Cmd.HasSubCommands}}\ - -API VERSION: -{{printf "\t%s" .APIVersion}} -{{end}}\ -{{if .Cmd.HasSubCommands}}\ - - -COMMANDS: -{{range .SubCommands}}\ -{{ $cmdname := cmdName . $cmd }}\ -{{ if .Runnable }}\ -{{printf "\t%s\t%s" $cmdname .Short}} -{{end}}\ -{{end}}\ -{{end}}\ -{{ if .Cmd.Long }}\ - -DESCRIPTION: -{{range $line := descToLines .Cmd.Long}}{{printf "\t%s" $line}} -{{end}}\ -{{end}}\ -{{if .Cmd.HasLocalFlags}}\ - -OPTIONS: -{{.LocalFlags}}\ -{{end}}\ -{{if .Cmd.HasInheritedFlags}}\ - -GLOBAL OPTIONS: -{{.GlobalFlags}}\ -{{end}} -`[1:] - - commandUsageTemplate = template.Must(template.New("command_usage").Funcs(templFuncs).Parse(strings.ReplaceAll(commandUsage, "\\\n", ""))) -} - -func etcdFlagUsages(flagSet *pflag.FlagSet) string { - x := new(bytes.Buffer) - - flagSet.VisitAll(func(flag *pflag.Flag) { - if len(flag.Deprecated) > 0 { - return - } - var format string - if len(flag.Shorthand) > 0 { - format = " -%s, --%s" - } else { - format = " %s --%s" - } - if len(flag.NoOptDefVal) > 0 { - format = format + "[" - } - if flag.Value.Type() == "string" { - // put quotes on the value - format = format + "=%q" - } else { - format = format + "=%s" - } - if len(flag.NoOptDefVal) > 0 { - format = format + "]" - } - format = format + "\t%s\n" - shorthand := flag.Shorthand - fmt.Fprintf(x, format, shorthand, flag.Name, flag.DefValue, flag.Usage) - }) - - return x.String() -} - -func getSubCommands(cmd *cobra.Command) []*cobra.Command { - var subCommands []*cobra.Command - for _, subCmd := range cmd.Commands() { - subCommands = append(subCommands, subCmd) - subCommands = append(subCommands, getSubCommands(subCmd)...) - } - return subCommands -} - -func usageFunc(cmd *cobra.Command) error { - subCommands := getSubCommands(cmd) - tabOut := getTabOutWithWriter(os.Stdout) - commandUsageTemplate.Execute(tabOut, struct { - Cmd *cobra.Command - LocalFlags string - GlobalFlags string - SubCommands []*cobra.Command - Version string - APIVersion string - }{ - cmd, - etcdFlagUsages(cmd.LocalFlags()), - etcdFlagUsages(cmd.InheritedFlags()), - subCommands, - version.Version, - version.APIVersion, - }) - tabOut.Flush() - return nil -} - -func getTabOutWithWriter(writer io.Writer) *tabwriter.Writer { - aTabOut := new(tabwriter.Writer) - aTabOut.Init(writer, 0, 8, 1, '\t', 0) - return aTabOut -} diff --git a/tests/functional/runner/lease_renewer_command.go b/tests/functional/runner/lease_renewer_command.go deleted file mode 100644 index b61600ca131..00000000000 --- a/tests/functional/runner/lease_renewer_command.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "context" - "errors" - "log" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - - "github.com/spf13/cobra" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var ( - leaseTTL int64 -) - -// NewLeaseRenewerCommand returns the cobra command for "lease-renewer runner". -func NewLeaseRenewerCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "lease-renewer", - Short: "Performs lease renew operation", - Run: runLeaseRenewerFunc, - } - cmd.Flags().Int64Var(&leaseTTL, "ttl", 5, "lease's ttl") - return cmd -} - -func runLeaseRenewerFunc(cmd *cobra.Command, args []string) { - if len(args) > 0 { - ExitWithError(ExitBadArgs, errors.New("lease-renewer does not take any argument")) - } - - eps := endpointsFromFlag(cmd) - c := newClient(eps, dialTimeout) - ctx := context.Background() - - for { - var ( - l *clientv3.LeaseGrantResponse - lk *clientv3.LeaseKeepAliveResponse - err error - ) - for { - l, err = c.Lease.Grant(ctx, leaseTTL) - if err == nil { - break - } - } - expire := time.Now().Add(time.Duration(l.TTL-1) * time.Second) - - for { - lk, err = c.Lease.KeepAliveOnce(ctx, l.ID) - if ev, ok := status.FromError(err); ok && ev.Code() == codes.NotFound { - if time.Since(expire) < 0 { - log.Fatalf("bad renew! exceeded: %v", time.Since(expire)) - } - log.Fatalf("lost lease %d, expire: %v\n", l.ID, expire) - } - - if err != nil { - continue - } - expire = time.Now().Add(time.Duration(lk.TTL-1) * time.Second) - log.Printf("renewed lease %d, expire: %v\n", lk.ID, expire) - time.Sleep(time.Duration(lk.TTL-2) * time.Second) - } - } -} diff --git a/tests/functional/runner/lock_racer_command.go b/tests/functional/runner/lock_racer_command.go deleted file mode 100644 index c6f1b941006..00000000000 --- a/tests/functional/runner/lock_racer_command.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "context" - "errors" - "fmt" - "sync" - - "go.etcd.io/etcd/client/v3/concurrency" - - "github.com/spf13/cobra" -) - -// NewLockRacerCommand returns the cobra command for "lock-racer runner". -func NewLockRacerCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "lock-racer [name of lock (defaults to 'racers')]", - Short: "Performs lock race operation", - Run: runRacerFunc, - } - cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections") - return cmd -} - -func runRacerFunc(cmd *cobra.Command, args []string) { - racers := "racers" - if len(args) == 1 { - racers = args[0] - } - - if len(args) > 1 { - ExitWithError(ExitBadArgs, errors.New("lock-racer takes at most one argument")) - } - - rcs := make([]roundClient, totalClientConnections) - ctx := context.Background() - // mu ensures validate and release funcs are atomic. - var mu sync.Mutex - cnt := 0 - - eps := endpointsFromFlag(cmd) - - for i := range rcs { - var ( - s *concurrency.Session - err error - ) - - rcs[i].c = newClient(eps, dialTimeout) - - for { - s, err = concurrency.NewSession(rcs[i].c) - if err == nil { - break - } - } - m := concurrency.NewMutex(s, racers) - rcs[i].acquire = func() error { return m.Lock(ctx) } - rcs[i].validate = func() error { - mu.Lock() - defer mu.Unlock() - if cnt++; cnt != 1 { - return fmt.Errorf("bad lock; count: %d", cnt) - } - return nil - } - rcs[i].release = func() error { - mu.Lock() - defer mu.Unlock() - if err := m.Unlock(ctx); err != nil { - return err - } - cnt = 0 - return nil - } - } - // each client creates 1 key from NewMutex() and delete it from Unlock() - // a round involves in 2*len(rcs) requests. - doRounds(rcs, rounds, 2*len(rcs)) -} diff --git a/tests/functional/runner/root.go b/tests/functional/runner/root.go deleted file mode 100644 index abd74af1bc9..00000000000 --- a/tests/functional/runner/root.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package runner implements individual etcd-runner commands for the etcd-runner utility. -package runner - -import ( - "log" - "math/rand" - "time" - - "github.com/spf13/cobra" -) - -const ( - cliName = "etcd-runner" - cliDescription = "Stress tests using clientv3 functionality.." - - defaultDialTimeout = 2 * time.Second -) - -var ( - rootCmd = &cobra.Command{ - Use: cliName, - Short: cliDescription, - SuggestFor: []string{"etcd-runner"}, - } -) - -func init() { - cobra.EnablePrefixMatching = true - - rand.Seed(time.Now().UnixNano()) - - log.SetFlags(log.Lmicroseconds) - - rootCmd.PersistentFlags().StringSliceVar(&endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints") - rootCmd.PersistentFlags().DurationVar(&dialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections") - rootCmd.PersistentFlags().IntVar(&reqRate, "req-rate", 30, "maximum number of requests per second") - rootCmd.PersistentFlags().IntVar(&rounds, "rounds", 100, "number of rounds to run; 0 to run forever") - - rootCmd.AddCommand( - NewElectionCommand(), - NewLeaseRenewerCommand(), - NewLockRacerCommand(), - NewWatchCommand(), - ) -} - -func Start() { - rootCmd.SetUsageFunc(usageFunc) - - // Make help just show the usage - rootCmd.SetHelpTemplate(`{{.UsageString}}`) - - if err := rootCmd.Execute(); err != nil { - ExitWithError(ExitError, err) - } -} diff --git a/tests/functional/runner/watch_command.go b/tests/functional/runner/watch_command.go deleted file mode 100644 index 96a52ddcb53..00000000000 --- a/tests/functional/runner/watch_command.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runner - -import ( - "context" - "errors" - "fmt" - "log" - "sync" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/stringutil" - - "github.com/spf13/cobra" - "golang.org/x/time/rate" -) - -var ( - runningTime time.Duration // time for which operation should be performed - noOfPrefixes int // total number of prefixes which will be watched upon - watchPerPrefix int // number of watchers per prefix - watchPrefix string // prefix append to keys in watcher - totalKeys int // total number of keys for operation -) - -// NewWatchCommand returns the cobra command for "watcher runner". -func NewWatchCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "watcher", - Short: "Performs watch operation", - Run: runWatcherFunc, - } - cmd.Flags().DurationVar(&runningTime, "running-time", 60, "number of seconds to run") - cmd.Flags().StringVar(&watchPrefix, "prefix", "", "the prefix to append on all keys") - cmd.Flags().IntVar(&noOfPrefixes, "total-prefixes", 10, "total no of prefixes to use") - cmd.Flags().IntVar(&watchPerPrefix, "watch-per-prefix", 10, "number of watchers per prefix") - cmd.Flags().IntVar(&totalKeys, "total-keys", 1000, "total number of keys to watch") - - return cmd -} - -func runWatcherFunc(cmd *cobra.Command, args []string) { - if len(args) > 0 { - ExitWithError(ExitBadArgs, errors.New("watcher does not take any argument")) - } - - ctx := context.Background() - for round := 0; round < rounds || rounds <= 0; round++ { - fmt.Println("round", round) - performWatchOnPrefixes(ctx, cmd, round) - } -} - -func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) { - keyPerPrefix := totalKeys / noOfPrefixes - prefixes := stringutil.UniqueStrings(5, noOfPrefixes) - keys := stringutil.RandomStrings(10, keyPerPrefix) - - roundPrefix := fmt.Sprintf("%16x", round) - - eps := endpointsFromFlag(cmd) - - var ( - revision int64 - wg sync.WaitGroup - gr *clientv3.GetResponse - err error - ) - - client := newClient(eps, dialTimeout) - defer client.Close() - - gr, err = getKey(ctx, client, "non-existent") - if err != nil { - log.Fatalf("failed to get the initial revision: %v", err) - } - revision = gr.Header.Revision - - ctxt, cancel := context.WithDeadline(ctx, time.Now().Add(runningTime*time.Second)) - defer cancel() - - // generate and put keys in cluster - limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate) - - go func() { - for _, key := range keys { - for _, prefix := range prefixes { - if err = limiter.Wait(ctxt); err != nil { - return - } - if err = putKeyAtMostOnce(ctxt, client, watchPrefix+"-"+roundPrefix+"-"+prefix+"-"+key); err != nil { - log.Fatalf("failed to put key: %v", err) - return - } - } - } - }() - - ctxc, cancelc := context.WithCancel(ctx) - - wcs := make([]clientv3.WatchChan, 0) - rcs := make([]*clientv3.Client, 0) - - for _, prefix := range prefixes { - for j := 0; j < watchPerPrefix; j++ { - rc := newClient(eps, dialTimeout) - rcs = append(rcs, rc) - - wprefix := watchPrefix + "-" + roundPrefix + "-" + prefix - - wc := rc.Watch(ctxc, wprefix, clientv3.WithPrefix(), clientv3.WithRev(revision)) - wcs = append(wcs, wc) - - wg.Add(1) - go func() { - defer wg.Done() - checkWatchResponse(wc, wprefix, keys) - }() - } - } - wg.Wait() - - cancelc() - - // verify all watch channels are closed - for e, wc := range wcs { - if _, ok := <-wc; ok { - log.Fatalf("expected wc to be closed, but received %v", e) - } - } - - for _, rc := range rcs { - rc.Close() - } - - if err = deletePrefix(ctx, client, watchPrefix); err != nil { - log.Fatalf("failed to clean up keys after test: %v", err) - } -} - -func checkWatchResponse(wc clientv3.WatchChan, prefix string, keys []string) { - for n := 0; n < len(keys); { - wr, more := <-wc - if !more { - log.Fatalf("expect more keys (received %d/%d) for %s", n, len(keys), prefix) - } - for _, event := range wr.Events { - expectedKey := prefix + "-" + keys[n] - receivedKey := string(event.Kv.Key) - if expectedKey != receivedKey { - log.Fatalf("expected key %q, got %q for prefix : %q\n", expectedKey, receivedKey, prefix) - } - n++ - } - } -} - -func putKeyAtMostOnce(ctx context.Context, client *clientv3.Client, key string) error { - gr, err := getKey(ctx, client, key) - if err != nil { - return err - } - - var modrev int64 - if len(gr.Kvs) > 0 { - modrev = gr.Kvs[0].ModRevision - } - - for ctx.Err() == nil { - _, err := client.Txn(ctx).If(clientv3.Compare(clientv3.ModRevision(key), "=", modrev)).Then(clientv3.OpPut(key, key)).Commit() - - if err == nil { - return nil - } - } - - return ctx.Err() -} - -func deletePrefix(ctx context.Context, client *clientv3.Client, key string) error { - for ctx.Err() == nil { - if _, err := client.Delete(ctx, key, clientv3.WithPrefix()); err == nil { - return nil - } - } - return ctx.Err() -} - -func getKey(ctx context.Context, client *clientv3.Client, key string) (*clientv3.GetResponse, error) { - for ctx.Err() == nil { - if gr, err := client.Get(ctx, key); err == nil { - return gr, nil - } - } - return nil, ctx.Err() -} diff --git a/tests/functional/scripts/docker-local-agent.sh b/tests/functional/scripts/docker-local-agent.sh deleted file mode 100755 index 81b8d97f714..00000000000 --- a/tests/functional/scripts/docker-local-agent.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -< snapshotCount { - clus.lg.Info( - "trigger snapshot PASS", - zap.Int("retries", i), - zap.String("desc", c.Desc()), - zap.Int64("committed-entries", diff), - zap.Int64("etcd-snapshot-count", snapshotCount), - zap.Int64("start-revision", startRev), - zap.Int64("last-revision", lastRev), - zap.Duration("took", time.Since(now)), - ) - return nil - } - - clus.lg.Info( - "trigger snapshot RETRY", - zap.Int("retries", i), - zap.Int64("committed-entries", diff), - zap.Int64("etcd-snapshot-count", snapshotCount), - zap.Int64("start-revision", startRev), - zap.Int64("last-revision", lastRev), - zap.Duration("took", time.Since(now)), - zap.Error(err), - ) - time.Sleep(time.Second) - if err != nil { - time.Sleep(2 * time.Second) - } - } - - return fmt.Errorf("cluster too slow: only %d commits in %d retries", lastRev-startRev, retries) -} - -func (c *caseUntilSnapshot) Desc() string { - if c.desc != "" { - return c.desc - } - if c.rpcpbCase.String() != "" { - return c.rpcpbCase.String() - } - return c.Case.Desc() -} - -func (c *caseUntilSnapshot) TestCase() rpcpb.Case { - return c.rpcpbCase -} diff --git a/tests/functional/tester/case_delay.go b/tests/functional/tester/case_delay.go deleted file mode 100644 index d06d1d65dc4..00000000000 --- a/tests/functional/tester/case_delay.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "time" - - "go.uber.org/zap" -) - -type caseDelay struct { - Case - delayDuration time.Duration -} - -func (c *caseDelay) Inject(clus *Cluster) error { - if err := c.Case.Inject(clus); err != nil { - return err - } - if c.delayDuration > 0 { - clus.lg.Info( - "wait after inject", - zap.Duration("delay", c.delayDuration), - zap.String("desc", c.Case.Desc()), - ) - time.Sleep(c.delayDuration) - } - return nil -} diff --git a/tests/functional/tester/case_external.go b/tests/functional/tester/case_external.go deleted file mode 100644 index 69cc07e01b1..00000000000 --- a/tests/functional/tester/case_external.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "os/exec" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" -) - -type caseExternal struct { - desc string - rpcpbCase rpcpb.Case - - scriptPath string -} - -func (c *caseExternal) Inject(clus *Cluster) error { - return exec.Command(c.scriptPath, "enable", fmt.Sprintf("%d", clus.rd)).Run() -} - -func (c *caseExternal) Recover(clus *Cluster) error { - return exec.Command(c.scriptPath, "disable", fmt.Sprintf("%d", clus.rd)).Run() -} - -func (c *caseExternal) Desc() string { - return c.desc -} - -func (c *caseExternal) TestCase() rpcpb.Case { - return c.rpcpbCase -} - -func new_Case_EXTERNAL(scriptPath string) Case { - return &caseExternal{ - desc: fmt.Sprintf("external fault injector (script: %q)", scriptPath), - rpcpbCase: rpcpb.Case_EXTERNAL, - scriptPath: scriptPath, - } -} diff --git a/tests/functional/tester/case_failpoints.go b/tests/functional/tester/case_failpoints.go deleted file mode 100644 index 9cc74f40ac3..00000000000 --- a/tests/functional/tester/case_failpoints.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "io" - "net/http" - "strings" - "sync" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" -) - -type failpointStats struct { - mu sync.Mutex - // crashes counts the number of crashes for a failpoint - crashes map[string]int -} - -var fpStats failpointStats - -func failpointFailures(clus *Cluster) (ret []Case, err error) { - var fps []string - fps, err = failpointPaths(clus.Members[0].FailpointHTTPAddr) - if err != nil { - return nil, err - } - // create failure objects for all failpoints - for _, fp := range fps { - if len(fp) == 0 { - continue - } - - fpFails := casesFromFailpoint(fp, clus.Tester.FailpointCommands) - - // wrap in delays so failpoint has time to trigger - for i, fpf := range fpFails { - if strings.Contains(fp, "Snap") { - // hack to trigger snapshot failpoints - fpFails[i] = &caseUntilSnapshot{ - desc: fpf.Desc(), - rpcpbCase: rpcpb.Case_FAILPOINTS, - Case: fpf, - } - } else { - fpFails[i] = &caseDelay{ - Case: fpf, - delayDuration: clus.GetCaseDelayDuration(), - } - } - } - ret = append(ret, fpFails...) - } - fpStats.crashes = make(map[string]int) - return ret, err -} - -func failpointPaths(endpoint string) ([]string, error) { - resp, err := http.Get(endpoint) - if err != nil { - return nil, err - } - defer resp.Body.Close() - body, rerr := io.ReadAll(resp.Body) - if rerr != nil { - return nil, rerr - } - var fps []string - for _, l := range strings.Split(string(body), "\n") { - fp := strings.Split(l, "=")[0] - fps = append(fps, fp) - } - return fps, nil -} - -// failpoints follows FreeBSD FAIL_POINT syntax. -// e.g. panic("etcd-tester"),1*sleep(1000)->panic("etcd-tester") -func casesFromFailpoint(fp string, failpointCommands []string) (fs []Case) { - recov := makeRecoverFailpoint(fp) - for _, fcmd := range failpointCommands { - inject := makeInjectFailpoint(fp, fcmd) - fs = append(fs, []Case{ - &caseFollower{ - caseByFunc: caseByFunc{ - desc: fmt.Sprintf("failpoint %q (one: %q)", fp, fcmd), - rpcpbCase: rpcpb.Case_FAILPOINTS, - injectMember: inject, - recoverMember: recov, - }, - last: -1, - lead: -1, - }, - &caseLeader{ - caseByFunc: caseByFunc{ - desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd), - rpcpbCase: rpcpb.Case_FAILPOINTS, - injectMember: inject, - recoverMember: recov, - }, - last: -1, - lead: -1, - }, - &caseQuorum{ - caseByFunc: caseByFunc{ - desc: fmt.Sprintf("failpoint %q (quorum: %q)", fp, fcmd), - rpcpbCase: rpcpb.Case_FAILPOINTS, - injectMember: inject, - recoverMember: recov, - }, - injected: make(map[int]struct{}), - }, - &caseAll{ - desc: fmt.Sprintf("failpoint %q (all: %q)", fp, fcmd), - rpcpbCase: rpcpb.Case_FAILPOINTS, - injectMember: inject, - recoverMember: recov, - }, - }...) - } - return fs -} - -func makeInjectFailpoint(fp, val string) injectMemberFunc { - return func(clus *Cluster, idx int) (err error) { - // Add the failpoint into the member's list of failpoints so that if the member is restarted, the - // failpoint state is persisted (via the GOFAIL_FAILPOINTS environment variable) - addFailpointToMemberList(clus.Members[idx], idx, fp) - - // Enable the failpoint - return putFailpoint(clus.Members[idx].FailpointHTTPAddr, fp, val) - } -} - -func makeRecoverFailpoint(fp string) recoverMemberFunc { - return func(clus *Cluster, idx int) error { - // Remove the failpoint into the member's list of failpoints. - removeFailpointFromMemberList(clus.Members[idx], idx, fp) - - // Disable the failpoint - if err := delFailpoint(clus.Members[idx].FailpointHTTPAddr, fp); err == nil { - return nil - } - // node not responding, likely dead from fp panic; restart - fpStats.mu.Lock() - fpStats.crashes[fp]++ - fpStats.mu.Unlock() - return recover_SIGTERM_ETCD(clus, idx) - } -} - -func addFailpointToMemberList(member *rpcpb.Member, idx int, fp string) { - failpoints := strings.Split(member.Failpoints, ";") - failpoints = append(failpoints, fp) - member.Failpoints = strings.Join(failpoints, ";") -} - -func removeFailpointFromMemberList(member *rpcpb.Member, idx int, fp string) { - failpoints := strings.Split(member.Failpoints, ";") - for i, f := range failpoints { - if f == fp { - failpoints = append(failpoints[:i], failpoints[i+1:]...) - break - } - } - member.Failpoints = strings.Join(failpoints, ";") -} - -func putFailpoint(ep, fp, val string) error { - req, _ := http.NewRequest(http.MethodPut, ep+"/"+fp, strings.NewReader(val)) - c := http.Client{} - resp, err := c.Do(req) - if err != nil { - return err - } - resp.Body.Close() - if resp.StatusCode/100 != 2 { - return fmt.Errorf("failed to PUT %s=%s at %s (%v)", fp, val, ep, resp.Status) - } - return nil -} - -func delFailpoint(ep, fp string) error { - req, _ := http.NewRequest(http.MethodDelete, ep+"/"+fp, strings.NewReader("")) - c := http.Client{} - resp, err := c.Do(req) - if err != nil { - return err - } - resp.Body.Close() - if resp.StatusCode/100 != 2 { - return fmt.Errorf("failed to DELETE %s at %s (%v)", fp, ep, resp.Status) - } - return nil -} diff --git a/tests/functional/tester/case_failpoints_disk_io.go b/tests/functional/tester/case_failpoints_disk_io.go deleted file mode 100644 index 4cc2396b679..00000000000 --- a/tests/functional/tester/case_failpoints_disk_io.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "strings" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" -) - -const ( - diskIOFailpoint = "raftAfterSave" -) - -func failpointDiskIOFailures(clus *Cluster) (ret []Case, err error) { - fps, err := failpointPaths(clus.Members[0].FailpointHTTPAddr) - if err != nil { - return nil, err - } - var detailDiskIOLatencyFailpointPath string - for i := 0; i < len(fps); i++ { - if strings.HasSuffix(fps[i], diskIOFailpoint) { - detailDiskIOLatencyFailpointPath = fps[i] - break - } - } - // create failure objects for diskIOFailpoint - fpFails := casesFromDiskIOFailpoint(detailDiskIOLatencyFailpointPath, clus.Tester.FailpointCommands) - // wrap in delays so failpoint has time to trigger - for i, fpf := range fpFails { - fpFails[i] = &caseDelay{ - Case: fpf, - delayDuration: clus.GetCaseDelayDuration(), - } - } - ret = append(ret, fpFails...) - return ret, nil -} - -func casesFromDiskIOFailpoint(fp string, failpointCommands []string) (fs []Case) { - recov := makeRecoverFailpoint(fp) - for _, fcmd := range failpointCommands { - inject := makeInjectFailpoint(fp, fcmd) - fs = append(fs, []Case{ - &caseLeader{ - caseByFunc: caseByFunc{ - desc: fmt.Sprintf("failpoint %q (leader: %q)", fp, fcmd), - rpcpbCase: rpcpb.Case_FAILPOINTS, - injectMember: inject, - recoverMember: recov, - }, - last: -1, - lead: -1, - }, - }...) - } - return fs -} diff --git a/tests/functional/tester/case_network_blackhole.go b/tests/functional/tester/case_network_blackhole.go deleted file mode 100644 index 3b0602050a4..00000000000 --- a/tests/functional/tester/case_network_blackhole.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "go.etcd.io/etcd/tests/v3/functional/rpcpb" - -func inject_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error { - return clus.sendOp(idx, rpcpb.Operation_BLACKHOLE_PEER_PORT_TX_RX) -} - -func recover_BLACKHOLE_PEER_PORT_TX_RX(clus *Cluster, idx int) error { - return clus.sendOp(idx, rpcpb.Operation_UNBLACKHOLE_PEER_PORT_TX_RX) -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - } - c := &caseFollower{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT() Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - } - c := &caseFollower{cc, -1, -1} - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT, - Case: c, - } -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - } - c := &caseLeader{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT() Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - } - c := &caseLeader{cc, -1, -1} - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT, - Case: c, - } -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus *Cluster) Case { - c := &caseQuorum{ - caseByFunc: caseByFunc{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - }, - injected: make(map[int]struct{}), - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus *Cluster) Case { - c := &caseAll{ - rpcpbCase: rpcpb.Case_BLACKHOLE_PEER_PORT_TX_RX_ALL, - injectMember: inject_BLACKHOLE_PEER_PORT_TX_RX, - recoverMember: recover_BLACKHOLE_PEER_PORT_TX_RX, - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} diff --git a/tests/functional/tester/case_network_delay.go b/tests/functional/tester/case_network_delay.go deleted file mode 100644 index bedcd9e4fc0..00000000000 --- a/tests/functional/tester/case_network_delay.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "time" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -const ( - // Wait more when it recovers from slow network, because network layer - // needs extra time to propagate traffic control (tc command) change. - // Otherwise, we get different hash values from the previous revision. - // For more detail, please see https://github.com/etcd-io/etcd/issues/5121. - waitRecover = 5 * time.Second -) - -func inject_DELAY_PEER_PORT_TX_RX(clus *Cluster, idx int) error { - clus.lg.Info( - "injecting delay latency", - zap.Duration("latency", time.Duration(clus.Tester.UpdatedDelayLatencyMs)*time.Millisecond), - zap.Duration("latency-rv", time.Duration(clus.Tester.DelayLatencyMsRv)*time.Millisecond), - zap.String("endpoint", clus.Members[idx].EtcdClientEndpoint), - ) - return clus.sendOp(idx, rpcpb.Operation_DELAY_PEER_PORT_TX_RX) -} - -func recover_DELAY_PEER_PORT_TX_RX(clus *Cluster, idx int) error { - err := clus.sendOp(idx, rpcpb.Operation_UNDELAY_PEER_PORT_TX_RX) - time.Sleep(waitRecover) - return err -} - -func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus *Cluster, random bool) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER - } - c := &caseFollower{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT - } - c := &caseFollower{cc, -1, -1} - return &caseUntilSnapshot{ - rpcpbCase: cc.rpcpbCase, - Case: c, - } -} - -func new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus *Cluster, random bool) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER - } - c := &caseLeader{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster, random bool) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - cc.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT - } - c := &caseLeader{cc, -1, -1} - return &caseUntilSnapshot{ - rpcpbCase: cc.rpcpbCase, - Case: c, - } -} - -func new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus *Cluster, random bool) Case { - c := &caseQuorum{ - caseByFunc: caseByFunc{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_QUORUM, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - }, - injected: make(map[int]struct{}), - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus *Cluster, random bool) Case { - c := &caseAll{ - rpcpbCase: rpcpb.Case_DELAY_PEER_PORT_TX_RX_ALL, - injectMember: inject_DELAY_PEER_PORT_TX_RX, - recoverMember: recover_DELAY_PEER_PORT_TX_RX, - } - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - if random { - clus.UpdateDelayLatencyMs() - c.rpcpbCase = rpcpb.Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} diff --git a/tests/functional/tester/case_no_fail.go b/tests/functional/tester/case_no_fail.go deleted file mode 100644 index 16c6371d711..00000000000 --- a/tests/functional/tester/case_no_fail.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "time" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -type caseNoFailWithStress caseByFunc - -func (c *caseNoFailWithStress) Inject(clus *Cluster) error { - return nil -} - -func (c *caseNoFailWithStress) Recover(clus *Cluster) error { - return nil -} - -func (c *caseNoFailWithStress) Desc() string { - if c.desc != "" { - return c.desc - } - return c.rpcpbCase.String() -} - -func (c *caseNoFailWithStress) TestCase() rpcpb.Case { - return c.rpcpbCase -} - -func new_Case_NO_FAIL_WITH_STRESS(clus *Cluster) Case { - c := &caseNoFailWithStress{ - rpcpbCase: rpcpb.Case_NO_FAIL_WITH_STRESS, - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -type caseNoFailWithNoStressForLiveness caseByFunc - -func (c *caseNoFailWithNoStressForLiveness) Inject(clus *Cluster) error { - clus.lg.Info( - "extra delay for liveness mode with no stresser", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.String("desc", c.Desc()), - ) - time.Sleep(clus.GetCaseDelayDuration()) - - clus.lg.Info( - "wait health in liveness mode", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.String("desc", c.Desc()), - ) - return clus.WaitHealth() -} - -func (c *caseNoFailWithNoStressForLiveness) Recover(clus *Cluster) error { - return nil -} - -func (c *caseNoFailWithNoStressForLiveness) Desc() string { - if c.desc != "" { - return c.desc - } - return c.rpcpbCase.String() -} - -func (c *caseNoFailWithNoStressForLiveness) TestCase() rpcpb.Case { - return c.rpcpbCase -} - -func new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus *Cluster) Case { - c := &caseNoFailWithNoStressForLiveness{ - rpcpbCase: rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS, - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} diff --git a/tests/functional/tester/case_sigquit_remove.go b/tests/functional/tester/case_sigquit_remove.go deleted file mode 100644 index a9396987d5c..00000000000 --- a/tests/functional/tester/case_sigquit_remove.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "fmt" - "sort" - "strings" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -func inject_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error { - cli1, err := clus.Members[idx1].CreateEtcdClient() - if err != nil { - return err - } - defer cli1.Close() - - var mresp *clientv3.MemberListResponse - mresp, err = cli1.MemberList(context.Background()) - var mss []string - if err == nil && mresp != nil { - mss = describeMembers(mresp) - } - clus.lg.Info( - "member list before disastrous machine failure", - zap.String("request-to", clus.Members[idx1].EtcdClientEndpoint), - zap.Strings("members", mss), - zap.Error(err), - ) - if err != nil { - return err - } - - sresp, serr := cli1.Status(context.Background(), clus.Members[idx1].EtcdClientEndpoint) - if serr != nil { - return serr - } - id1 := sresp.Header.MemberId - is1 := fmt.Sprintf("%016x", id1) - - clus.lg.Info( - "disastrous machine failure START", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.String("target-member-id", is1), - zap.Error(err), - ) - err = clus.sendOp(idx1, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA) - clus.lg.Info( - "disastrous machine failure END", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.String("target-member-id", is1), - zap.Error(err), - ) - if err != nil { - return err - } - - time.Sleep(2 * time.Second) - - idx2 := (idx1 + 1) % len(clus.Members) - var cli2 *clientv3.Client - cli2, err = clus.Members[idx2].CreateEtcdClient() - if err != nil { - return err - } - defer cli2.Close() - - // FIXME(bug): this may block forever during - // "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT" - // is the new leader too busy with snapshotting? - // is raft proposal dropped? - // enable client keepalive for failover? - clus.lg.Info( - "member remove after disaster START", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.String("target-member-id", is1), - zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint), - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - _, err = cli2.MemberRemove(ctx, id1) - cancel() - clus.lg.Info( - "member remove after disaster END", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.String("target-member-id", is1), - zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint), - zap.Error(err), - ) - if err != nil { - return err - } - - time.Sleep(2 * time.Second) - - mresp, err = cli2.MemberList(context.Background()) - mss = []string{} - if err == nil && mresp != nil { - mss = describeMembers(mresp) - } - clus.lg.Info( - "member list after member remove", - zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint), - zap.Strings("members", mss), - zap.Error(err), - ) - return err -} - -func recover_SIGQUIT_ETCD_AND_REMOVE_DATA(clus *Cluster, idx1 int) error { - idx2 := (idx1 + 1) % len(clus.Members) - cli2, err := clus.Members[idx2].CreateEtcdClient() - if err != nil { - return err - } - defer cli2.Close() - - _, err = cli2.MemberAdd(context.Background(), clus.Members[idx1].Etcd.AdvertisePeerURLs) - clus.lg.Info( - "member add before fresh restart", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint), - zap.Error(err), - ) - if err != nil { - return err - } - - time.Sleep(2 * time.Second) - - clus.Members[idx1].Etcd.InitialClusterState = "existing" - err = clus.sendOp(idx1, rpcpb.Operation_RESTART_ETCD) - clus.lg.Info( - "fresh restart after member add", - zap.String("target-endpoint", clus.Members[idx1].EtcdClientEndpoint), - zap.Error(err), - ) - if err != nil { - return err - } - - time.Sleep(2 * time.Second) - - var mresp *clientv3.MemberListResponse - mresp, err = cli2.MemberList(context.Background()) - var mss []string - if err == nil && mresp != nil { - mss = describeMembers(mresp) - } - clus.lg.Info( - "member list after member add", - zap.String("request-to", clus.Members[idx2].EtcdClientEndpoint), - zap.Strings("members", mss), - zap.Error(err), - ) - return err -} - -func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER, - injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA, - recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA, - } - c := &caseFollower{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case { - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT, - Case: new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus), - } -} - -func new_Case_SIGQUIT_AND_REMOVE_LEADER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER, - injectMember: inject_SIGQUIT_ETCD_AND_REMOVE_DATA, - recoverMember: recover_SIGQUIT_ETCD_AND_REMOVE_DATA, - } - c := &caseLeader{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case { - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT, - Case: new_Case_SIGQUIT_AND_REMOVE_LEADER(clus), - } -} - -func describeMembers(mresp *clientv3.MemberListResponse) (ss []string) { - ss = make([]string, len(mresp.Members)) - for i, m := range mresp.Members { - ss[i] = fmt.Sprintf("Name %s / ID %016x / ClientURLs %s / PeerURLs %s", - m.Name, - m.ID, - strings.Join(m.ClientURLs, ","), - strings.Join(m.PeerURLs, ","), - ) - } - sort.Strings(ss) - return ss -} diff --git a/tests/functional/tester/case_sigquit_remove_quorum.go b/tests/functional/tester/case_sigquit_remove_quorum.go deleted file mode 100644 index 4570ba39bc7..00000000000 --- a/tests/functional/tester/case_sigquit_remove_quorum.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "fmt" - "strings" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -type fetchSnapshotCaseQuorum struct { - desc string - rpcpbCase rpcpb.Case - injected map[int]struct{} - snapshotted int -} - -func (c *fetchSnapshotCaseQuorum) Inject(clus *Cluster) error { - // 1. Assume node C is the current leader with most up-to-date data. - lead, err := clus.GetLeader() - if err != nil { - return err - } - c.snapshotted = lead - - // 2. Download snapshot from node C, before destroying node A and B. - clus.lg.Info( - "save snapshot on leader node START", - zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint), - ) - var resp *rpcpb.Response - resp, err = clus.sendOpWithResp(lead, rpcpb.Operation_SAVE_SNAPSHOT) - if resp == nil || (resp != nil && !resp.Success) || err != nil { - clus.lg.Info( - "save snapshot on leader node FAIL", - zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint), - zap.Error(err), - ) - return err - } - clus.lg.Info( - "save snapshot on leader node SUCCESS", - zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint), - zap.String("member-name", resp.SnapshotInfo.MemberName), - zap.Strings("member-client-urls", resp.SnapshotInfo.MemberClientURLs), - zap.String("snapshot-path", resp.SnapshotInfo.SnapshotPath), - zap.String("snapshot-file-size", resp.SnapshotInfo.SnapshotFileSize), - zap.String("snapshot-total-size", resp.SnapshotInfo.SnapshotTotalSize), - zap.Int64("snapshot-total-key", resp.SnapshotInfo.SnapshotTotalKey), - zap.Int64("snapshot-hash", resp.SnapshotInfo.SnapshotHash), - zap.Int64("snapshot-revision", resp.SnapshotInfo.SnapshotRevision), - zap.String("took", resp.SnapshotInfo.Took), - zap.Error(err), - ) - if err != nil { - return err - } - clus.Members[lead].SnapshotInfo = resp.SnapshotInfo - - leaderc, err := clus.Members[lead].CreateEtcdClient() - if err != nil { - return err - } - defer leaderc.Close() - var mresp *clientv3.MemberListResponse - mresp, err = leaderc.MemberList(context.Background()) - var mss []string - if err == nil && mresp != nil { - mss = describeMembers(mresp) - } - clus.lg.Info( - "member list before disastrous machine failure", - zap.String("request-to", clus.Members[lead].EtcdClientEndpoint), - zap.Strings("members", mss), - zap.Error(err), - ) - if err != nil { - return err - } - - // simulate real life; machine failures may happen - // after some time since last snapshot save - time.Sleep(time.Second) - - // 3. Destroy node A and B, and make the whole cluster inoperable. - for { - c.injected = pickQuorum(len(clus.Members)) - if _, ok := c.injected[lead]; !ok { - break - } - } - for idx := range c.injected { - clus.lg.Info( - "disastrous machine failure to quorum START", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - ) - err = clus.sendOp(idx, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA) - clus.lg.Info( - "disastrous machine failure to quorum END", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - zap.Error(err), - ) - if err != nil { - return err - } - } - - // 4. Now node C cannot operate either. - // 5. SIGTERM node C and remove its data directories. - clus.lg.Info( - "disastrous machine failure to old leader START", - zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint), - ) - err = clus.sendOp(lead, rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA) - clus.lg.Info( - "disastrous machine failure to old leader END", - zap.String("target-endpoint", clus.Members[lead].EtcdClientEndpoint), - zap.Error(err), - ) - return err -} - -func (c *fetchSnapshotCaseQuorum) Recover(clus *Cluster) error { - // 6. Restore a new seed member from node C's latest snapshot file. - oldlead := c.snapshotted - - // configuration on restart from recovered snapshot - // seed member's configuration is all the same as previous one - // except initial cluster string is now a single-node cluster - clus.Members[oldlead].EtcdOnSnapshotRestore = clus.Members[oldlead].Etcd - clus.Members[oldlead].EtcdOnSnapshotRestore.InitialClusterState = "existing" - name := clus.Members[oldlead].Etcd.Name - var initClus []string - for _, u := range clus.Members[oldlead].Etcd.AdvertisePeerURLs { - initClus = append(initClus, fmt.Sprintf("%s=%s", name, u)) - } - clus.Members[oldlead].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",") - - clus.lg.Info( - "restore snapshot and restart from snapshot request START", - zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint), - zap.Strings("initial-cluster", initClus), - ) - err := clus.sendOp(oldlead, rpcpb.Operation_RESTORE_RESTART_FROM_SNAPSHOT) - clus.lg.Info( - "restore snapshot and restart from snapshot request END", - zap.String("target-endpoint", clus.Members[oldlead].EtcdClientEndpoint), - zap.Strings("initial-cluster", initClus), - zap.Error(err), - ) - if err != nil { - return err - } - - leaderc, err := clus.Members[oldlead].CreateEtcdClient() - if err != nil { - return err - } - defer leaderc.Close() - - // 7. Add another member to establish 2-node cluster. - // 8. Add another member to establish 3-node cluster. - // 9. Add more if any. - idxs := make([]int, 0, len(c.injected)) - for idx := range c.injected { - idxs = append(idxs, idx) - } - clus.lg.Info("member add START", zap.Int("members-to-add", len(idxs))) - for i, idx := range idxs { - clus.lg.Info( - "member add request SENT", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs), - ) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - _, err := leaderc.MemberAdd(ctx, clus.Members[idx].Etcd.AdvertisePeerURLs) - cancel() - clus.lg.Info( - "member add request DONE", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - zap.Strings("peer-urls", clus.Members[idx].Etcd.AdvertisePeerURLs), - zap.Error(err), - ) - if err != nil { - return err - } - - // start the added(new) member with fresh data - clus.Members[idx].EtcdOnSnapshotRestore = clus.Members[idx].Etcd - clus.Members[idx].EtcdOnSnapshotRestore.InitialClusterState = "existing" - name := clus.Members[idx].Etcd.Name - for _, u := range clus.Members[idx].Etcd.AdvertisePeerURLs { - initClus = append(initClus, fmt.Sprintf("%s=%s", name, u)) - } - clus.Members[idx].EtcdOnSnapshotRestore.InitialCluster = strings.Join(initClus, ",") - clus.lg.Info( - "restart from snapshot request SENT", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - zap.Strings("initial-cluster", initClus), - ) - err = clus.sendOp(idx, rpcpb.Operation_RESTART_FROM_SNAPSHOT) - clus.lg.Info( - "restart from snapshot request DONE", - zap.String("target-endpoint", clus.Members[idx].EtcdClientEndpoint), - zap.Strings("initial-cluster", initClus), - zap.Error(err), - ) - if err != nil { - return err - } - - if i != len(c.injected)-1 { - // wait until membership reconfiguration entry gets applied - // TODO: test concurrent member add - dur := 5 * clus.Members[idx].ElectionTimeout() - clus.lg.Info( - "waiting after restart from snapshot request", - zap.Int("i", i), - zap.Int("idx", idx), - zap.Duration("sleep", dur), - ) - time.Sleep(dur) - } else { - clus.lg.Info( - "restart from snapshot request ALL END", - zap.Int("i", i), - zap.Int("idx", idx), - ) - } - } - return nil -} - -func (c *fetchSnapshotCaseQuorum) Desc() string { - if c.desc != "" { - return c.desc - } - return c.rpcpbCase.String() -} - -func (c *fetchSnapshotCaseQuorum) TestCase() rpcpb.Case { - return c.rpcpbCase -} - -func new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus *Cluster) Case { - c := &fetchSnapshotCaseQuorum{ - rpcpbCase: rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH, - injected: make(map[int]struct{}), - snapshotted: -1, - } - // simulate real life; machine replacements may happen - // after some time since disaster - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} diff --git a/tests/functional/tester/case_sigterm.go b/tests/functional/tester/case_sigterm.go deleted file mode 100644 index 49b20a000af..00000000000 --- a/tests/functional/tester/case_sigterm.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "go.etcd.io/etcd/tests/v3/functional/rpcpb" - -func inject_SIGTERM_ETCD(clus *Cluster, idx int) error { - return clus.sendOp(idx, rpcpb.Operation_SIGTERM_ETCD) -} - -func recover_SIGTERM_ETCD(clus *Cluster, idx int) error { - return clus.sendOp(idx, rpcpb.Operation_RESTART_ETCD) -} - -func new_Case_SIGTERM_ONE_FOLLOWER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER, - injectMember: inject_SIGTERM_ETCD, - recoverMember: recover_SIGTERM_ETCD, - } - c := &caseFollower{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case { - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT, - Case: new_Case_SIGTERM_ONE_FOLLOWER(clus), - } -} - -func new_Case_SIGTERM_LEADER(clus *Cluster) Case { - cc := caseByFunc{ - rpcpbCase: rpcpb.Case_SIGTERM_LEADER, - injectMember: inject_SIGTERM_ETCD, - recoverMember: recover_SIGTERM_ETCD, - } - c := &caseLeader{cc, -1, -1} - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus *Cluster) Case { - return &caseUntilSnapshot{ - rpcpbCase: rpcpb.Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT, - Case: new_Case_SIGTERM_LEADER(clus), - } -} - -func new_Case_SIGTERM_QUORUM(clus *Cluster) Case { - c := &caseQuorum{ - caseByFunc: caseByFunc{ - rpcpbCase: rpcpb.Case_SIGTERM_QUORUM, - injectMember: inject_SIGTERM_ETCD, - recoverMember: recover_SIGTERM_ETCD, - }, - injected: make(map[int]struct{}), - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} - -func new_Case_SIGTERM_ALL(clus *Cluster) Case { - c := &caseAll{ - rpcpbCase: rpcpb.Case_SIGTERM_ALL, - injectMember: inject_SIGTERM_ETCD, - recoverMember: recover_SIGTERM_ETCD, - } - return &caseDelay{ - Case: c, - delayDuration: clus.GetCaseDelayDuration(), - } -} diff --git a/tests/functional/tester/checker.go b/tests/functional/tester/checker.go deleted file mode 100644 index f6f21761647..00000000000 --- a/tests/functional/tester/checker.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "go.etcd.io/etcd/tests/v3/functional/rpcpb" - -// Checker checks cluster consistency. -type Checker interface { - // Type returns the checker type. - Type() rpcpb.Checker - // EtcdClientEndpoints returns the client endpoints of - // all checker target nodes.. - EtcdClientEndpoints() []string - // Check returns an error if the system fails a consistency check. - Check() error -} diff --git a/tests/functional/tester/checker_kv_hash.go b/tests/functional/tester/checker_kv_hash.go deleted file mode 100644 index cd42f727449..00000000000 --- a/tests/functional/tester/checker_kv_hash.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "time" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -const retries = 7 - -type kvHashChecker struct { - ctype rpcpb.Checker - clus *Cluster -} - -func newKVHashChecker(clus *Cluster) Checker { - return &kvHashChecker{ - ctype: rpcpb.Checker_KV_HASH, - clus: clus, - } -} - -func (hc *kvHashChecker) checkRevAndHashes() (err error) { - var ( - revs map[string]int64 - hashes map[string]int64 - ) - // retries in case of transient failure or etcd cluster has not stablized yet. - for i := 0; i < retries; i++ { - revs, hashes, err = hc.clus.getRevisionHash() - if err != nil { - hc.clus.lg.Warn( - "failed to get revision and hash", - zap.Int("retries", i), - zap.Error(err), - ) - } else { - sameRev := getSameValue(revs) - sameHashes := getSameValue(hashes) - if sameRev && sameHashes { - return nil - } - hc.clus.lg.Warn( - "retrying; etcd cluster is not stable", - zap.Int("retries", i), - zap.Bool("same-revisions", sameRev), - zap.Bool("same-hashes", sameHashes), - zap.String("revisions", fmt.Sprintf("%+v", revs)), - zap.String("hashes", fmt.Sprintf("%+v", hashes)), - ) - } - time.Sleep(time.Second) - } - - if err != nil { - return fmt.Errorf("failed revision and hash check (%v)", err) - } - - return fmt.Errorf("etcd cluster is not stable: [revisions: %v] and [hashes: %v]", revs, hashes) -} - -func (hc *kvHashChecker) Type() rpcpb.Checker { - return hc.ctype -} - -func (hc *kvHashChecker) EtcdClientEndpoints() []string { - return hc.clus.EtcdClientEndpoints() -} - -func (hc *kvHashChecker) Check() error { - return hc.checkRevAndHashes() -} diff --git a/tests/functional/tester/checker_lease_expire.go b/tests/functional/tester/checker_lease_expire.go deleted file mode 100644 index 84b7eb744cc..00000000000 --- a/tests/functional/tester/checker_lease_expire.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "fmt" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type leaseExpireChecker struct { - ctype rpcpb.Checker - lg *zap.Logger - m *rpcpb.Member - ls *leaseStresser - cli *clientv3.Client -} - -func newLeaseExpireChecker(ls *leaseStresser) Checker { - return &leaseExpireChecker{ - ctype: rpcpb.Checker_LEASE_EXPIRE, - lg: ls.lg, - m: ls.m, - ls: ls, - } -} - -func (lc *leaseExpireChecker) Type() rpcpb.Checker { - return lc.ctype -} - -func (lc *leaseExpireChecker) EtcdClientEndpoints() []string { - return []string{lc.m.EtcdClientEndpoint} -} - -func (lc *leaseExpireChecker) Check() error { - if lc.ls == nil { - return nil - } - if lc.ls != nil && - (lc.ls.revokedLeases == nil || - lc.ls.aliveLeases == nil || - lc.ls.shortLivedLeases == nil) { - return nil - } - - cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second)) - if err != nil { - return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint) - } - defer func() { - if cli != nil { - cli.Close() - } - }() - lc.cli = cli - - if err := check(lc.lg, lc.cli, true, lc.ls.revokedLeases.leases); err != nil { - return err - } - if err := check(lc.lg, lc.cli, false, lc.ls.aliveLeases.leases); err != nil { - return err - } - - return lc.checkShortLivedLeases() -} - -const leaseExpireCheckerTimeout = 10 * time.Second - -// checkShortLivedLeases ensures leases expire. -func (lc *leaseExpireChecker) checkShortLivedLeases() error { - ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout) - errc := make(chan error) - defer cancel() - for leaseID := range lc.ls.shortLivedLeases.leases { - go func(id int64) { - errc <- lc.checkShortLivedLease(ctx, id) - }(leaseID) - } - - var errs []error - for range lc.ls.shortLivedLeases.leases { - if err := <-errc; err != nil { - errs = append(errs, err) - } - } - return errsToError(errs) -} - -func (lc *leaseExpireChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) { - // retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it. - var resp *clientv3.LeaseTimeToLiveResponse - for i := 0; i < retries; i++ { - resp, err = getLeaseByID(ctx, lc.cli, leaseID) - // lease not found, for ~v3.1 compatibilities, check ErrLeaseNotFound - if (err == nil && resp.TTL == -1) || (err != nil && rpctypes.Error(err) == rpctypes.ErrLeaseNotFound) { - return nil - } - if err != nil { - lc.lg.Debug( - "retrying; Lease TimeToLive failed", - zap.Int("retries", i), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(err), - ) - continue - } - if resp.TTL > 0 { - dur := time.Duration(resp.TTL) * time.Second - lc.lg.Debug( - "lease has not been expired, wait until expire", - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Int64("ttl", resp.TTL), - zap.Duration("wait-duration", dur), - ) - time.Sleep(dur) - } else { - lc.lg.Debug( - "lease expired but not yet revoked", - zap.Int("retries", i), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Int64("ttl", resp.TTL), - zap.Duration("wait-duration", time.Second), - ) - time.Sleep(time.Second) - } - if err = checkLease(ctx, lc.lg, lc.cli, false, leaseID); err != nil { - continue - } - return nil - } - return err -} - -func checkLease(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, expired bool, leaseID int64) error { - keysExpired, err := hasKeysAttachedToLeaseExpired(ctx, lg, cli, leaseID) - if err != nil { - lg.Warn( - "hasKeysAttachedToLeaseExpired failed", - zap.Any("endpoint", cli.Endpoints()), - zap.Error(err), - ) - return err - } - leaseExpired, err := hasLeaseExpired(ctx, lg, cli, leaseID) - if err != nil { - lg.Warn( - "hasLeaseExpired failed", - zap.Any("endpoint", cli.Endpoints()), - zap.Error(err), - ) - return err - } - if leaseExpired != keysExpired { - return fmt.Errorf("lease %v expiration mismatch (lease expired=%v, keys expired=%v)", leaseID, leaseExpired, keysExpired) - } - if leaseExpired != expired { - return fmt.Errorf("lease %v expected expired=%v, got %v", leaseID, expired, leaseExpired) - } - return nil -} - -func check(lg *zap.Logger, cli *clientv3.Client, expired bool, leases map[int64]time.Time) error { - ctx, cancel := context.WithTimeout(context.Background(), leaseExpireCheckerTimeout) - defer cancel() - for leaseID := range leases { - if err := checkLease(ctx, lg, cli, expired, leaseID); err != nil { - return err - } - } - return nil -} - -// TODO: handle failures from "grpc.WaitForReady(true)" -func getLeaseByID(ctx context.Context, cli *clientv3.Client, leaseID int64) (*clientv3.LeaseTimeToLiveResponse, error) { - return cli.TimeToLive( - ctx, - clientv3.LeaseID(leaseID), - clientv3.WithAttachedKeys(), - ) -} - -func hasLeaseExpired(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, leaseID int64) (bool, error) { - // keep retrying until lease's state is known or ctx is being canceled - for ctx.Err() == nil { - resp, err := getLeaseByID(ctx, cli, leaseID) - if err != nil { - // for ~v3.1 compatibilities - if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound { - return true, nil - } - } else { - return resp.TTL == -1, nil - } - lg.Warn( - "hasLeaseExpired getLeaseByID failed", - zap.Any("endpoint", cli.Endpoints()), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(err), - ) - } - return false, ctx.Err() -} - -// The keys attached to the lease has the format of "_" where idx is the ordering key creation -// Since the format of keys contains about leaseID, finding keys base on "" prefix -// determines whether the attached keys for a given leaseID has been deleted or not -func hasKeysAttachedToLeaseExpired(ctx context.Context, lg *zap.Logger, cli *clientv3.Client, leaseID int64) (bool, error) { - resp, err := cli.Get(ctx, fmt.Sprintf("%d", leaseID), clientv3.WithPrefix()) - if err != nil { - lg.Warn( - "hasKeysAttachedToLeaseExpired failed", - zap.Any("endpoint", cli.Endpoints()), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(err), - ) - return false, err - } - return len(resp.Kvs) == 0, nil -} diff --git a/tests/functional/tester/checker_no_check.go b/tests/functional/tester/checker_no_check.go deleted file mode 100644 index b0aef6e2168..00000000000 --- a/tests/functional/tester/checker_no_check.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "go.etcd.io/etcd/tests/v3/functional/rpcpb" - -type noCheck struct{} - -func newNoChecker() Checker { return &noCheck{} } -func (nc *noCheck) Type() rpcpb.Checker { return rpcpb.Checker_NO_CHECK } -func (nc *noCheck) EtcdClientEndpoints() []string { return nil } -func (nc *noCheck) Check() error { return nil } diff --git a/tests/functional/tester/checker_runner.go b/tests/functional/tester/checker_runner.go deleted file mode 100644 index 944ecc6a397..00000000000 --- a/tests/functional/tester/checker_runner.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "go.etcd.io/etcd/tests/v3/functional/rpcpb" - -type runnerChecker struct { - ctype rpcpb.Checker - etcdClientEndpoint string - errc chan error -} - -func newRunnerChecker(ep string, errc chan error) Checker { - return &runnerChecker{ - ctype: rpcpb.Checker_RUNNER, - etcdClientEndpoint: ep, - errc: errc, - } -} - -func (rc *runnerChecker) Type() rpcpb.Checker { - return rc.ctype -} - -func (rc *runnerChecker) EtcdClientEndpoints() []string { - return []string{rc.etcdClientEndpoint} -} - -func (rc *runnerChecker) Check() error { - select { - case err := <-rc.errc: - return err - default: - return nil - } -} diff --git a/tests/functional/tester/checker_short_ttl_lease_expire.go b/tests/functional/tester/checker_short_ttl_lease_expire.go deleted file mode 100644 index faa6137b29e..00000000000 --- a/tests/functional/tester/checker_short_ttl_lease_expire.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type shortTTLLeaseExpireChecker struct { - ctype rpcpb.Checker - lg *zap.Logger - m *rpcpb.Member - ls *leaseStresser - cli *clientv3.Client -} - -func newShortTTLLeaseExpireChecker(ls *leaseStresser) Checker { - return &shortTTLLeaseExpireChecker{ - ctype: rpcpb.Checker_SHORT_TTL_LEASE_EXPIRE, - lg: ls.lg, - m: ls.m, - ls: ls, - } -} - -func (lc *shortTTLLeaseExpireChecker) Type() rpcpb.Checker { - return lc.ctype -} - -func (lc *shortTTLLeaseExpireChecker) EtcdClientEndpoints() []string { - return []string{lc.m.EtcdClientEndpoint} -} - -func (lc *shortTTLLeaseExpireChecker) Check() error { - if lc.ls == nil { - return nil - } - if lc.ls != nil && lc.ls.alivedLeasesWithShortTTL == nil { - return nil - } - - cli, err := lc.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(time.Second)) - if err != nil { - return fmt.Errorf("%v (%q)", err, lc.m.EtcdClientEndpoint) - } - defer func() { - if cli != nil { - cli.Close() - } - }() - lc.cli = cli - if err := check(lc.lg, lc.cli, false, lc.ls.alivedLeasesWithShortTTL.leases); err != nil { - lc.lg.Error("failed to check alivedLeasesWithShortTTL", zap.Error(err)) - return err - } - lc.lg.Info("check alivedLeasesWithShortTTL succ", zap.Int("num", len(lc.ls.alivedLeasesWithShortTTL.leases))) - return nil -} diff --git a/tests/functional/tester/cluster.go b/tests/functional/tester/cluster.go deleted file mode 100644 index de63309924d..00000000000 --- a/tests/functional/tester/cluster.go +++ /dev/null @@ -1,737 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "errors" - "fmt" - "io" - "log" - "math/rand" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/debugutil" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" - "golang.org/x/time/rate" - "google.golang.org/grpc" -) - -// Cluster defines tester cluster. -type Cluster struct { - lg *zap.Logger - - agentConns []*grpc.ClientConn - agentClients []rpcpb.TransportClient - agentStreams []rpcpb.Transport_TransportClient - - testerHTTPServer *http.Server - - Members []*rpcpb.Member `yaml:"agent-configs"` - Tester *rpcpb.Tester `yaml:"tester-config"` - - cases []Case - - rateLimiter *rate.Limiter - stresser Stresser - checkers []Checker - - currentRevision int64 - rd int - cs int -} - -var dialOpts = []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithTimeout(5 * time.Second), - grpc.WithBlock(), -} - -// NewCluster creates a cluster from a tester configuration. -func NewCluster(lg *zap.Logger, fpath string) (*Cluster, error) { - clus, err := read(lg, fpath) - if err != nil { - return nil, err - } - - clus.agentConns = make([]*grpc.ClientConn, len(clus.Members)) - clus.agentClients = make([]rpcpb.TransportClient, len(clus.Members)) - clus.agentStreams = make([]rpcpb.Transport_TransportClient, len(clus.Members)) - clus.cases = make([]Case, 0) - - lg.Info("creating members") - for i, ap := range clus.Members { - var err error - clus.agentConns[i], err = grpc.Dial(ap.AgentAddr, dialOpts...) - if err != nil { - return nil, fmt.Errorf("cannot dial agent %v: %v", ap.AgentAddr, err) - } - clus.agentClients[i] = rpcpb.NewTransportClient(clus.agentConns[i]) - lg.Info("connected", zap.String("agent-address", ap.AgentAddr)) - - clus.agentStreams[i], err = clus.agentClients[i].Transport(context.Background()) - if err != nil { - return nil, err - } - lg.Info("created stream", zap.String("agent-address", ap.AgentAddr)) - } - - lg.Info("agents configured.") - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.Handler()) - if clus.Tester.EnablePprof { - for p, h := range debugutil.PProfHandlers() { - mux.Handle(p, h) - } - } - clus.testerHTTPServer = &http.Server{ - Addr: clus.Tester.Addr, - Handler: mux, - ErrorLog: log.New(io.Discard, "net/http", 0), - } - go clus.serveTesterServer() - lg.Info("tester server started") - - clus.rateLimiter = rate.NewLimiter( - rate.Limit(int(clus.Tester.StressQPS)), - int(clus.Tester.StressQPS), - ) - - clus.setStresserChecker() - - return clus, nil -} - -// EtcdClientEndpoints returns all etcd client endpoints. -func (clus *Cluster) EtcdClientEndpoints() (css []string) { - css = make([]string, len(clus.Members)) - for i := range clus.Members { - css[i] = clus.Members[i].EtcdClientEndpoint - } - return css -} - -func (clus *Cluster) serveTesterServer() { - clus.lg.Info( - "started tester HTTP server", - zap.String("tester-address", clus.Tester.Addr), - ) - err := clus.testerHTTPServer.ListenAndServe() - clus.lg.Info( - "tester HTTP server returned", - zap.String("tester-address", clus.Tester.Addr), - zap.Error(err), - ) - if err != nil && err != http.ErrServerClosed { - clus.lg.Fatal("tester HTTP errored", zap.Error(err)) - } -} - -func (clus *Cluster) updateCases() { - for _, cs := range clus.Tester.Cases { - switch cs { - case "SIGTERM_ONE_FOLLOWER": - clus.cases = append(clus.cases, - new_Case_SIGTERM_ONE_FOLLOWER(clus)) - case "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus)) - case "SIGTERM_LEADER": - clus.cases = append(clus.cases, - new_Case_SIGTERM_LEADER(clus)) - case "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus)) - case "SIGTERM_QUORUM": - clus.cases = append(clus.cases, - new_Case_SIGTERM_QUORUM(clus)) - case "SIGTERM_ALL": - clus.cases = append(clus.cases, - new_Case_SIGTERM_ALL(clus)) - - case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER": - clus.cases = append(clus.cases, - new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER(clus)) - case "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus)) - case "SIGQUIT_AND_REMOVE_LEADER": - clus.cases = append(clus.cases, - new_Case_SIGQUIT_AND_REMOVE_LEADER(clus)) - case "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus)) - case "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": - clus.cases = append(clus.cases, - new_Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH(clus)) - - case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER(clus)) - case "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT()) - case "BLACKHOLE_PEER_PORT_TX_RX_LEADER": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER(clus)) - case "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT()) - case "BLACKHOLE_PEER_PORT_TX_RX_QUORUM": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM(clus)) - case "BLACKHOLE_PEER_PORT_TX_RX_ALL": - clus.cases = append(clus.cases, - new_Case_BLACKHOLE_PEER_PORT_TX_RX_ALL(clus)) - - case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER(clus, true)) - case "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT(clus, true)) - case "DELAY_PEER_PORT_TX_RX_LEADER": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_LEADER(clus, true)) - case "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT(clus, true)) - case "DELAY_PEER_PORT_TX_RX_QUORUM": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_QUORUM(clus, true)) - case "DELAY_PEER_PORT_TX_RX_ALL": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, false)) - case "RANDOM_DELAY_PEER_PORT_TX_RX_ALL": - clus.cases = append(clus.cases, - new_Case_DELAY_PEER_PORT_TX_RX_ALL(clus, true)) - - case "NO_FAIL_WITH_STRESS": - clus.cases = append(clus.cases, - new_Case_NO_FAIL_WITH_STRESS(clus)) - case "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": - clus.cases = append(clus.cases, - new_Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS(clus)) - - case "EXTERNAL": - clus.cases = append(clus.cases, - new_Case_EXTERNAL(clus.Tester.ExternalExecPath)) - case "FAILPOINTS": - fpFailures, fperr := failpointFailures(clus) - if len(fpFailures) == 0 { - clus.lg.Info("no failpoints found!", zap.Error(fperr)) - } else { - clus.cases = append(clus.cases, fpFailures...) - } - case "FAILPOINTS_WITH_DISK_IO_LATENCY": - fpFailures, fperr := failpointDiskIOFailures(clus) - if len(fpFailures) == 0 { - clus.lg.Info("no failpoints found!", zap.Error(fperr)) - } else { - clus.cases = append(clus.cases, fpFailures...) - } - } - } -} - -func (clus *Cluster) listCases() (css []string) { - css = make([]string, len(clus.cases)) - for i := range clus.cases { - css[i] = clus.cases[i].Desc() - } - return css -} - -// UpdateDelayLatencyMs updates delay latency with random value -// within election timeout. -func (clus *Cluster) UpdateDelayLatencyMs() { - rand.Seed(time.Now().UnixNano()) - clus.Tester.UpdatedDelayLatencyMs = uint32(rand.Int63n(clus.Members[0].Etcd.ElectionTimeoutMs)) - - minLatRv := clus.Tester.DelayLatencyMsRv + clus.Tester.DelayLatencyMsRv/5 - if clus.Tester.UpdatedDelayLatencyMs <= minLatRv { - clus.Tester.UpdatedDelayLatencyMs += minLatRv - } -} - -func (clus *Cluster) setStresserChecker() { - css := &compositeStresser{} - var lss []*leaseStresser - var rss []*runnerStresser - for _, m := range clus.Members { - sss := newStresser(clus, m) - css.stressers = append(css.stressers, &compositeStresser{sss}) - for _, s := range sss { - if v, ok := s.(*leaseStresser); ok { - lss = append(lss, v) - clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint)) - } - if v, ok := s.(*runnerStresser); ok { - rss = append(rss, v) - clus.lg.Info("added lease stresser", zap.String("endpoint", m.EtcdClientEndpoint)) - } - } - } - clus.stresser = css - - for _, cs := range clus.Tester.Checkers { - switch cs { - case "KV_HASH": - clus.checkers = append(clus.checkers, newKVHashChecker(clus)) - - case "LEASE_EXPIRE": - for _, ls := range lss { - clus.checkers = append(clus.checkers, newLeaseExpireChecker(ls)) - } - - case "RUNNER": - for _, rs := range rss { - clus.checkers = append(clus.checkers, newRunnerChecker(rs.etcdClientEndpoint, rs.errc)) - } - - case "NO_CHECK": - clus.checkers = append(clus.checkers, newNoChecker()) - - case "SHORT_TTL_LEASE_EXPIRE": - for _, ls := range lss { - clus.checkers = append(clus.checkers, newShortTTLLeaseExpireChecker(ls)) - } - } - } - clus.lg.Info("updated stressers") -} - -func (clus *Cluster) runCheckers(exceptions ...rpcpb.Checker) (err error) { - defer func() { - if err != nil { - return - } - if err = clus.updateRevision(); err != nil { - clus.lg.Warn( - "updateRevision failed", - zap.Error(err), - ) - return - } - }() - - exs := make(map[rpcpb.Checker]struct{}) - for _, e := range exceptions { - exs[e] = struct{}{} - } - for _, chk := range clus.checkers { - clus.lg.Warn( - "consistency check START", - zap.String("checker", chk.Type().String()), - zap.Strings("client-endpoints", chk.EtcdClientEndpoints()), - ) - err = chk.Check() - clus.lg.Warn( - "consistency check END", - zap.String("checker", chk.Type().String()), - zap.Strings("client-endpoints", chk.EtcdClientEndpoints()), - zap.Error(err), - ) - if err != nil { - _, ok := exs[chk.Type()] - if !ok { - return err - } - clus.lg.Warn( - "consistency check SKIP FAIL", - zap.String("checker", chk.Type().String()), - zap.Strings("client-endpoints", chk.EtcdClientEndpoints()), - zap.Error(err), - ) - } - } - return nil -} - -// Send_INITIAL_START_ETCD bootstraps etcd cluster the very first time. -// After this, just continue to call kill/restart. -func (clus *Cluster) Send_INITIAL_START_ETCD() error { - // this is the only time that creates request from scratch - return clus.broadcast(rpcpb.Operation_INITIAL_START_ETCD) -} - -// send_SIGQUIT_ETCD_AND_ARCHIVE_DATA sends "Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA" operation. -func (clus *Cluster) send_SIGQUIT_ETCD_AND_ARCHIVE_DATA() error { - return clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA) -} - -// Send_SIGQUIT_ETCD_AND_REMOVE_DATA sends "Operation_SIGQUIT_ETCD_AND_REMOVE_DATA" operation. -func (clus *Cluster) Send_SIGQUIT_ETCD_AND_REMOVE_DATA() error { - return clus.broadcast(rpcpb.Operation_SIGQUIT_ETCD_AND_REMOVE_DATA) -} - -// send_RESTART_ETCD sends restart operation. -func (clus *Cluster) send_RESTART_ETCD() error { - return clus.broadcast(rpcpb.Operation_RESTART_ETCD) -} - -func (clus *Cluster) broadcast(op rpcpb.Operation) error { - var wg sync.WaitGroup - wg.Add(len(clus.agentStreams)) - - errc := make(chan error, len(clus.agentStreams)) - for i := range clus.agentStreams { - go func(idx int, o rpcpb.Operation) { - defer wg.Done() - errc <- clus.sendOp(idx, o) - }(i, op) - } - wg.Wait() - close(errc) - - var errs []string - for err := range errc { - if err == nil { - continue - } - errs = append(errs, err.Error()) - } - - if len(errs) == 0 { - return nil - } - return errors.New(strings.Join(errs, ", ")) -} - -func (clus *Cluster) sendOp(idx int, op rpcpb.Operation) error { - _, err := clus.sendOpWithResp(idx, op) - return err -} - -func (clus *Cluster) sendOpWithResp(idx int, op rpcpb.Operation) (*rpcpb.Response, error) { - // maintain the initial member object - // throughout the test time - req := &rpcpb.Request{ - Operation: op, - Member: clus.Members[idx], - Tester: clus.Tester, - } - - err := clus.agentStreams[idx].Send(req) - clus.lg.Info( - "sent request", - zap.String("operation", op.String()), - zap.String("to", clus.Members[idx].EtcdClientEndpoint), - zap.Error(err), - ) - if err != nil { - return nil, err - } - - resp, err := clus.agentStreams[idx].Recv() - if resp != nil { - clus.lg.Info( - "received response", - zap.String("operation", op.String()), - zap.String("from", clus.Members[idx].EtcdClientEndpoint), - zap.Bool("success", resp.Success), - zap.String("status", resp.Status), - zap.Error(err), - ) - } else { - clus.lg.Info( - "received empty response", - zap.String("operation", op.String()), - zap.String("from", clus.Members[idx].EtcdClientEndpoint), - zap.Error(err), - ) - } - if err != nil { - return nil, err - } - - if !resp.Success { - return nil, errors.New(resp.Status) - } - - m, secure := clus.Members[idx], false - for _, cu := range m.Etcd.AdvertiseClientURLs { - u, perr := url.Parse(cu) - if perr != nil { - return nil, perr - } - if u.Scheme == "https" { // TODO: handle unix - secure = true - } - } - - // store TLS assets from agents/servers onto disk - if secure && (op == rpcpb.Operation_INITIAL_START_ETCD || op == rpcpb.Operation_RESTART_ETCD) { - dirClient := filepath.Join( - clus.Tester.DataDir, - clus.Members[idx].Etcd.Name, - "fixtures", - "client", - ) - if err = fileutil.TouchDirAll(clus.lg, dirClient); err != nil { - return nil, err - } - - clientCertData := []byte(resp.Member.ClientCertData) - if len(clientCertData) == 0 { - return nil, fmt.Errorf("got empty client cert from %q", m.EtcdClientEndpoint) - } - clientCertPath := filepath.Join(dirClient, "cert.pem") - if err = os.WriteFile(clientCertPath, clientCertData, 0644); err != nil { // overwrite if exists - return nil, err - } - resp.Member.ClientCertPath = clientCertPath - clus.lg.Info( - "saved client cert file", - zap.String("path", clientCertPath), - ) - - clientKeyData := []byte(resp.Member.ClientKeyData) - if len(clientKeyData) == 0 { - return nil, fmt.Errorf("got empty client key from %q", m.EtcdClientEndpoint) - } - clientKeyPath := filepath.Join(dirClient, "key.pem") - if err = os.WriteFile(clientKeyPath, clientKeyData, 0644); err != nil { // overwrite if exists - return nil, err - } - resp.Member.ClientKeyPath = clientKeyPath - clus.lg.Info( - "saved client key file", - zap.String("path", clientKeyPath), - ) - - clientTrustedCAData := []byte(resp.Member.ClientTrustedCAData) - if len(clientTrustedCAData) != 0 { - // TODO: disable this when auto TLS is deprecated - clientTrustedCAPath := filepath.Join(dirClient, "ca.pem") - if err = os.WriteFile(clientTrustedCAPath, clientTrustedCAData, 0644); err != nil { // overwrite if exists - return nil, err - } - resp.Member.ClientTrustedCAPath = clientTrustedCAPath - clus.lg.Info( - "saved client trusted CA file", - zap.String("path", clientTrustedCAPath), - ) - } - - // no need to store peer certs for tester clients - - clus.Members[idx] = resp.Member - } - - return resp, nil -} - -// WaitHealth ensures all members are healthy -// by writing a test key to etcd cluster. -func (clus *Cluster) WaitHealth() error { - var err error - // wait 60s to check cluster health. - // TODO: set it to a reasonable value. It is set that high because - // follower may use long time to catch up the leader when reboot under - // reasonable workload (https://github.com/etcd-io/etcd/issues/2698) - for i := 0; i < 60; i++ { - for _, m := range clus.Members { - if err = m.WriteHealthKey(); err != nil { - clus.lg.Warn( - "health check FAIL", - zap.Int("retries", i), - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Error(err), - ) - break - } - clus.lg.Info( - "health check PASS", - zap.Int("retries", i), - zap.String("endpoint", m.EtcdClientEndpoint), - ) - } - if err == nil { - clus.lg.Info("health check ALL PASS") - return nil - } - time.Sleep(time.Second) - } - return err -} - -// GetLeader returns the index of leader and error if any. -func (clus *Cluster) GetLeader() (int, error) { - for i, m := range clus.Members { - isLeader, err := m.IsLeader() - if isLeader || err != nil { - return i, err - } - } - return 0, fmt.Errorf("no leader found") -} - -// maxRev returns the maximum revision found on the cluster. -func (clus *Cluster) maxRev() (rev int64, err error) { - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - defer cancel() - revc, errc := make(chan int64, len(clus.Members)), make(chan error, len(clus.Members)) - for i := range clus.Members { - go func(m *rpcpb.Member) { - mrev, merr := m.Rev(ctx) - revc <- mrev - errc <- merr - }(clus.Members[i]) - } - for i := 0; i < len(clus.Members); i++ { - if merr := <-errc; merr != nil { - err = merr - } - if mrev := <-revc; mrev > rev { - rev = mrev - } - } - return rev, err -} - -func (clus *Cluster) getRevisionHash() (map[string]int64, map[string]int64, error) { - revs := make(map[string]int64) - hashes := make(map[string]int64) - for _, m := range clus.Members { - rev, hash, err := m.RevHash() - if err != nil { - return nil, nil, err - } - revs[m.EtcdClientEndpoint] = rev - hashes[m.EtcdClientEndpoint] = hash - } - return revs, hashes, nil -} - -func (clus *Cluster) compactKV(rev int64, timeout time.Duration) (err error) { - if rev <= 0 { - return nil - } - - for i, m := range clus.Members { - clus.lg.Info( - "compact START", - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Int64("compact-revision", rev), - zap.Duration("timeout", timeout), - ) - now := time.Now() - cerr := m.Compact(rev, timeout) - succeed := true - if cerr != nil { - if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 { - clus.lg.Info( - "compact error is ignored", - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Int64("compact-revision", rev), - zap.String("expected-error-msg", cerr.Error()), - ) - } else { - clus.lg.Warn( - "compact FAIL", - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Int64("compact-revision", rev), - zap.Error(cerr), - ) - err = cerr - succeed = false - } - } - - if succeed { - clus.lg.Info( - "compact PASS", - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Int64("compact-revision", rev), - zap.Duration("timeout", timeout), - zap.Duration("took", time.Since(now)), - ) - } - } - return err -} - -func (clus *Cluster) checkCompact(rev int64) error { - if rev == 0 { - return nil - } - for _, m := range clus.Members { - if err := m.CheckCompact(rev); err != nil { - return err - } - } - return nil -} - -func (clus *Cluster) defrag() error { - for _, m := range clus.Members { - if err := m.Defrag(); err != nil { - clus.lg.Warn( - "defrag FAIL", - zap.String("endpoint", m.EtcdClientEndpoint), - zap.Error(err), - ) - return err - } - clus.lg.Info( - "defrag PASS", - zap.String("endpoint", m.EtcdClientEndpoint), - ) - } - clus.lg.Info( - "defrag ALL PASS", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - ) - return nil -} - -// GetCaseDelayDuration computes failure delay duration. -func (clus *Cluster) GetCaseDelayDuration() time.Duration { - return time.Duration(clus.Tester.CaseDelayMs) * time.Millisecond -} - -// Report reports the number of modified keys. -func (clus *Cluster) Report() int64 { - return clus.stresser.ModifiedKeys() -} diff --git a/tests/functional/tester/cluster_read_config.go b/tests/functional/tester/cluster_read_config.go deleted file mode 100644 index 874ee48af62..00000000000 --- a/tests/functional/tester/cluster_read_config.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - yaml "gopkg.in/yaml.v2" -) - -func read(lg *zap.Logger, fpath string) (*Cluster, error) { - bts, err := os.ReadFile(fpath) - if err != nil { - return nil, err - } - lg.Info("opened configuration file", zap.String("path", fpath)) - - clus := &Cluster{lg: lg} - if err = yaml.Unmarshal(bts, clus); err != nil { - return nil, err - } - - if len(clus.Members) < 3 { - return nil, fmt.Errorf("len(clus.Members) expects at least 3, got %d", len(clus.Members)) - } - - failpointsEnabled := false - for _, c := range clus.Tester.Cases { - if c == rpcpb.Case_FAILPOINTS.String() { - failpointsEnabled = true - break - } - } - - if len(clus.Tester.Cases) == 0 { - return nil, errors.New("cases not found") - } - if clus.Tester.DelayLatencyMs <= clus.Tester.DelayLatencyMsRv*5 { - return nil, fmt.Errorf("delay latency %d ms must be greater than 5x of delay latency random variable %d ms", clus.Tester.DelayLatencyMs, clus.Tester.DelayLatencyMsRv) - } - if clus.Tester.UpdatedDelayLatencyMs == 0 { - clus.Tester.UpdatedDelayLatencyMs = clus.Tester.DelayLatencyMs - } - - for _, v := range clus.Tester.Cases { - if _, ok := rpcpb.Case_value[v]; !ok { - return nil, fmt.Errorf("%q is not defined in 'rpcpb.Case_value'", v) - } - } - - for _, s := range clus.Tester.Stressers { - if _, ok := rpcpb.StresserType_value[s.Type]; !ok { - return nil, fmt.Errorf("unknown 'StresserType' %+v", s) - } - } - - for _, v := range clus.Tester.Checkers { - if _, ok := rpcpb.Checker_value[v]; !ok { - return nil, fmt.Errorf("Checker is unknown; got %q", v) - } - } - - if clus.Tester.StressKeySuffixRangeTxn > 100 { - return nil, fmt.Errorf("StressKeySuffixRangeTxn maximum value is 100, got %v", clus.Tester.StressKeySuffixRangeTxn) - } - if clus.Tester.StressKeyTxnOps > 64 { - return nil, fmt.Errorf("StressKeyTxnOps maximum value is 64, got %v", clus.Tester.StressKeyTxnOps) - } - - for i, mem := range clus.Members { - if mem.EtcdExec == "embed" && failpointsEnabled { - return nil, errors.New("EtcdExec 'embed' cannot be run with failpoints enabled") - } - if mem.BaseDir == "" { - return nil, fmt.Errorf("BaseDir cannot be empty (got %q)", mem.BaseDir) - } - if mem.Etcd.Name == "" { - return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", mem) - } - if mem.Etcd.DataDir == "" { - return nil, fmt.Errorf("'--data-dir' cannot be empty (got %+v)", mem) - } - if mem.Etcd.SnapshotCount == 0 { - return nil, fmt.Errorf("'--snapshot-count' cannot be 0 (got %+v)", mem.Etcd.SnapshotCount) - } - if mem.Etcd.DataDir == "" { - return nil, fmt.Errorf("'--data-dir' cannot be empty (got %q)", mem.Etcd.DataDir) - } - if mem.Etcd.WALDir == "" { - clus.Members[i].Etcd.WALDir = filepath.Join(mem.Etcd.DataDir, "member", "wal") - } - - switch mem.Etcd.InitialClusterState { - case "new": - case "existing": - default: - return nil, fmt.Errorf("'--initial-cluster-state' got %q", mem.Etcd.InitialClusterState) - } - - if mem.Etcd.HeartbeatIntervalMs == 0 { - return nil, fmt.Errorf("'--heartbeat-interval' cannot be 0 (got %+v)", mem.Etcd) - } - if mem.Etcd.ElectionTimeoutMs == 0 { - return nil, fmt.Errorf("'--election-timeout' cannot be 0 (got %+v)", mem.Etcd) - } - if int64(clus.Tester.DelayLatencyMs) <= mem.Etcd.ElectionTimeoutMs { - return nil, fmt.Errorf("delay latency %d ms must be greater than election timeout %d ms", clus.Tester.DelayLatencyMs, mem.Etcd.ElectionTimeoutMs) - } - - port := "" - listenClientPorts := make([]string, len(clus.Members)) - for i, u := range mem.Etcd.ListenClientURLs { - if !isValidURL(u) { - return nil, fmt.Errorf("'--listen-client-urls' has valid URL %q", u) - } - listenClientPorts[i], err = getPort(u) - if err != nil { - return nil, fmt.Errorf("'--listen-client-urls' has no port %q", u) - } - } - for i, u := range mem.Etcd.AdvertiseClientURLs { - if !isValidURL(u) { - return nil, fmt.Errorf("'--advertise-client-urls' has valid URL %q", u) - } - port, err = getPort(u) - if err != nil { - return nil, fmt.Errorf("'--advertise-client-urls' has no port %q", u) - } - if mem.EtcdClientProxy && listenClientPorts[i] == port { - return nil, fmt.Errorf("clus.Members[%d] requires client port proxy, but advertise port %q conflicts with listener port %q", i, port, listenClientPorts[i]) - } - } - - listenPeerPorts := make([]string, len(clus.Members)) - for i, u := range mem.Etcd.ListenPeerURLs { - if !isValidURL(u) { - return nil, fmt.Errorf("'--listen-peer-urls' has valid URL %q", u) - } - listenPeerPorts[i], err = getPort(u) - if err != nil { - return nil, fmt.Errorf("'--listen-peer-urls' has no port %q", u) - } - } - for j, u := range mem.Etcd.AdvertisePeerURLs { - if !isValidURL(u) { - return nil, fmt.Errorf("'--initial-advertise-peer-urls' has valid URL %q", u) - } - port, err = getPort(u) - if err != nil { - return nil, fmt.Errorf("'--initial-advertise-peer-urls' has no port %q", u) - } - if mem.EtcdPeerProxy && listenPeerPorts[j] == port { - return nil, fmt.Errorf("clus.Members[%d] requires peer port proxy, but advertise port %q conflicts with listener port %q", i, port, listenPeerPorts[j]) - } - } - - if !strings.HasPrefix(mem.Etcd.DataDir, mem.BaseDir) { - return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", mem.Etcd.DataDir) - } - - // TODO: support separate WALDir that can be handled via failure-archive - if !strings.HasPrefix(mem.Etcd.WALDir, mem.BaseDir) { - return nil, fmt.Errorf("Etcd.WALDir must be prefixed with BaseDir (got %q)", mem.Etcd.WALDir) - } - - // TODO: only support generated certs with TLS generator - // deprecate auto TLS - if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerCertFile != "" { - return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile) - } - if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerKeyFile != "" { - return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerKeyFile) - } - if mem.Etcd.PeerAutoTLS && mem.Etcd.PeerTrustedCAFile != "" { - return nil, fmt.Errorf("Etcd.PeerAutoTLS 'true', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerTrustedCAFile) - } - if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientCertFile != "" { - return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientCertFile is %q", mem.Etcd.ClientCertFile) - } - if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientKeyFile != "" { - return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientKeyFile is %q", mem.Etcd.ClientKeyFile) - } - if mem.Etcd.ClientAutoTLS && mem.Etcd.ClientTrustedCAFile != "" { - return nil, fmt.Errorf("Etcd.ClientAutoTLS 'true', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.ClientTrustedCAFile) - } - - if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile == "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile) - } - if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerKeyFile == "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerCertFile) - } - // only support self-signed certs - if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerTrustedCAFile == "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'true', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerCertFile) - } - if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile != "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerCertFile is %q", mem.Etcd.PeerCertFile) - } - if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerKeyFile != "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerKeyFile is %q", mem.Etcd.PeerCertFile) - } - if !mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerTrustedCAFile != "" { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth 'false', but Etcd.PeerTrustedCAFile is %q", mem.Etcd.PeerTrustedCAFile) - } - if mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerAutoTLS { - return nil, fmt.Errorf("Etcd.PeerClientCertAuth and Etcd.PeerAutoTLS cannot be both 'true'") - } - if (mem.Etcd.PeerCertFile == "") != (mem.Etcd.PeerKeyFile == "") { - return nil, fmt.Errorf("both Etcd.PeerCertFile %q and Etcd.PeerKeyFile %q must be either empty or non-empty", mem.Etcd.PeerCertFile, mem.Etcd.PeerKeyFile) - } - if mem.Etcd.ClientCertAuth && mem.Etcd.ClientAutoTLS { - return nil, fmt.Errorf("Etcd.ClientCertAuth and Etcd.ClientAutoTLS cannot be both 'true'") - } - if mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile == "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientCertFile is %q", mem.Etcd.PeerCertFile) - } - if mem.Etcd.ClientCertAuth && mem.Etcd.ClientKeyFile == "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientKeyFile is %q", mem.Etcd.PeerCertFile) - } - if mem.Etcd.ClientCertAuth && mem.Etcd.ClientTrustedCAFile == "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'true', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.ClientTrustedCAFile) - } - if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile != "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientCertFile is %q", mem.Etcd.PeerCertFile) - } - if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientKeyFile != "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientKeyFile is %q", mem.Etcd.PeerCertFile) - } - if !mem.Etcd.ClientCertAuth && mem.Etcd.ClientTrustedCAFile != "" { - return nil, fmt.Errorf("Etcd.ClientCertAuth 'false', but Etcd.ClientTrustedCAFile is %q", mem.Etcd.PeerCertFile) - } - if (mem.Etcd.ClientCertFile == "") != (mem.Etcd.ClientKeyFile == "") { - return nil, fmt.Errorf("both Etcd.ClientCertFile %q and Etcd.ClientKeyFile %q must be either empty or non-empty", mem.Etcd.ClientCertFile, mem.Etcd.ClientKeyFile) - } - - peerTLS := mem.Etcd.PeerAutoTLS || - (mem.Etcd.PeerClientCertAuth && mem.Etcd.PeerCertFile != "" && mem.Etcd.PeerKeyFile != "" && mem.Etcd.PeerTrustedCAFile != "") - if peerTLS { - for _, cu := range mem.Etcd.ListenPeerURLs { - var u *url.URL - u, err = url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme != "https" { // TODO: support unix - return nil, fmt.Errorf("peer TLS is enabled with wrong scheme %q", cu) - } - } - for _, cu := range mem.Etcd.AdvertisePeerURLs { - var u *url.URL - u, err = url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme != "https" { // TODO: support unix - return nil, fmt.Errorf("peer TLS is enabled with wrong scheme %q", cu) - } - } - clus.Members[i].PeerCertPath = mem.Etcd.PeerCertFile - if mem.Etcd.PeerCertFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.PeerCertFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerCertFile, err) - } - clus.Members[i].PeerCertData = string(data) - } - clus.Members[i].PeerKeyPath = mem.Etcd.PeerKeyFile - if mem.Etcd.PeerKeyFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.PeerKeyFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerKeyFile, err) - } - clus.Members[i].PeerCertData = string(data) - } - clus.Members[i].PeerTrustedCAPath = mem.Etcd.PeerTrustedCAFile - if mem.Etcd.PeerTrustedCAFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.PeerTrustedCAFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.PeerTrustedCAFile, err) - } - clus.Members[i].PeerCertData = string(data) - } - } - - clientTLS := mem.Etcd.ClientAutoTLS || - (mem.Etcd.ClientCertAuth && mem.Etcd.ClientCertFile != "" && mem.Etcd.ClientKeyFile != "" && mem.Etcd.ClientTrustedCAFile != "") - if clientTLS { - for _, cu := range mem.Etcd.ListenClientURLs { - var u *url.URL - u, err = url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme != "https" { // TODO: support unix - return nil, fmt.Errorf("client TLS is enabled with wrong scheme %q", cu) - } - } - for _, cu := range mem.Etcd.AdvertiseClientURLs { - var u *url.URL - u, err = url.Parse(cu) - if err != nil { - return nil, err - } - if u.Scheme != "https" { // TODO: support unix - return nil, fmt.Errorf("client TLS is enabled with wrong scheme %q", cu) - } - } - clus.Members[i].ClientCertPath = mem.Etcd.ClientCertFile - if mem.Etcd.ClientCertFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.ClientCertFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientCertFile, err) - } - clus.Members[i].ClientCertData = string(data) - } - clus.Members[i].ClientKeyPath = mem.Etcd.ClientKeyFile - if mem.Etcd.ClientKeyFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.ClientKeyFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientKeyFile, err) - } - clus.Members[i].ClientCertData = string(data) - } - clus.Members[i].ClientTrustedCAPath = mem.Etcd.ClientTrustedCAFile - if mem.Etcd.ClientTrustedCAFile != "" { - var data []byte - data, err = os.ReadFile(mem.Etcd.ClientTrustedCAFile) - if err != nil { - return nil, fmt.Errorf("failed to read %q (%v)", mem.Etcd.ClientTrustedCAFile, err) - } - clus.Members[i].ClientCertData = string(data) - } - } - - if len(mem.Etcd.LogOutputs) == 0 { - return nil, fmt.Errorf("mem.Etcd.LogOutputs cannot be empty") - } - for _, v := range mem.Etcd.LogOutputs { - switch v { - case "stderr", "stdout", "/dev/null", "default": - default: - if !strings.HasPrefix(v, mem.BaseDir) { - return nil, fmt.Errorf("LogOutput %q must be prefixed with BaseDir %q", v, mem.BaseDir) - } - } - } - } - - return clus, err -} diff --git a/tests/functional/tester/cluster_run.go b/tests/functional/tester/cluster_run.go deleted file mode 100644 index 7b33c39e54b..00000000000 --- a/tests/functional/tester/cluster_run.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "os" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -// compactQPS is rough number of compact requests per second. -// Previous tests showed etcd can compact about 60,000 entries per second. -const compactQPS = 50000 - -// Run starts tester. -func (clus *Cluster) Run(t *testing.T) error { - defer printReport() - - // updateCases must be executed after etcd is started, because the FAILPOINTS case - // needs to obtain all the failpoints from the etcd member. - clus.updateCases() - - if err := fileutil.TouchDirAll(clus.lg, clus.Tester.DataDir); err != nil { - clus.lg.Panic( - "failed to create test data directory", - zap.String("dir", clus.Tester.DataDir), - zap.Error(err), - ) - } - - var ( - preModifiedKey int64 - err error - ) - for round := 0; round < int(clus.Tester.RoundLimit) || clus.Tester.RoundLimit == -1; round++ { - t.Run(fmt.Sprintf("round-%d", round), func(t *testing.T) { - preModifiedKey, err = clus.doRoundAndCompact(t, round, preModifiedKey) - }) - - if err != nil { - clus.failed(t, err) - return err - } - - if round > 0 && round%500 == 0 { // every 500 rounds - t.Logf("Defragmenting in round: %v", round) - if err := clus.defrag(); err != nil { - clus.failed(t, err) - return err - } - } - } - - clus.lg.Info( - "functional-tester PASS", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - ) - return nil -} - -func (clus *Cluster) doRoundAndCompact(t *testing.T, round int, preModifiedKey int64) (postModifiedKey int64, err error) { - roundTotalCounter.Inc() - clus.rd = round - - if err = clus.doRound(t); err != nil { - clus.failed(t, fmt.Errorf("doRound FAIL: %w", err)) - return - } - - // -1 so that logPrefix doesn't print out 'case' - clus.cs = -1 - - revToCompact := max(0, clus.currentRevision-10000) - currentModifiedKey := clus.stresser.ModifiedKeys() - modifiedKey := currentModifiedKey - preModifiedKey - timeout := 10 * time.Second - timeout += time.Duration(modifiedKey/compactQPS) * time.Second - clus.lg.Info( - "compact START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.Duration("timeout", timeout), - ) - if err = clus.compact(revToCompact, timeout); err != nil { - clus.failed(t, fmt.Errorf("compact FAIL: %w", err)) - } else { - postModifiedKey = currentModifiedKey - } - return -} - -func (clus *Cluster) doRound(t *testing.T) error { - if clus.Tester.CaseShuffle { - clus.shuffleCases() - } - - roundNow := time.Now() - clus.lg.Info( - "round START", - zap.Int("round", clus.rd), - zap.Int("case-total", len(clus.cases)), - zap.Strings("cases", clus.listCases()), - ) - for i, fa := range clus.cases { - clus.cs = i - t.Run(fmt.Sprintf("%v_%s", i, fa.TestCase()), - func(t *testing.T) { - clus.doTestCase(t, fa) - }) - } - - clus.lg.Info( - "round ALL PASS", - zap.Int("round", clus.rd), - zap.Strings("cases", clus.listCases()), - zap.Int("case-total", len(clus.cases)), - zap.Duration("took", time.Since(roundNow)), - ) - return nil -} - -func (clus *Cluster) doTestCase(t *testing.T, fa Case) { - caseTotal[fa.Desc()]++ - caseTotalCounter.WithLabelValues(fa.Desc()).Inc() - - caseNow := time.Now() - clus.lg.Info( - "case START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - - clus.lg.Info("wait health before injecting failures") - if err := clus.WaitHealth(); err != nil { - clus.failed(t, fmt.Errorf("wait full health error before starting test case: %w", err)) - } - - stressStarted := false - fcase := fa.TestCase() - if fcase != rpcpb.Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS { - clus.lg.Info( - "stress START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - if err := clus.stresser.Stress(); err != nil { - clus.failed(t, fmt.Errorf("start stresser error: %w", err)) - } - stressStarted = true - } - - clus.lg.Info( - "inject START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - if err := fa.Inject(clus); err != nil { - clus.failed(t, fmt.Errorf("injection error: %w", err)) - } - - // if run local, recovering server may conflict - // with stressing client ports - // TODO: use unix for local tests - clus.lg.Info( - "recover START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - if err := fa.Recover(clus); err != nil { - clus.failed(t, fmt.Errorf("recovery error: %w", err)) - } - - if stressStarted { - clus.lg.Info( - "stress PAUSE", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - ems := clus.stresser.Pause() - if fcase == rpcpb.Case_NO_FAIL_WITH_STRESS && len(ems) > 0 { - ess := make([]string, 0, len(ems)) - cnt := 0 - for k, v := range ems { - ess = append(ess, fmt.Sprintf("%s (count: %d)", k, v)) - cnt += v - } - clus.lg.Warn( - "expected no errors", - zap.String("desc", fa.Desc()), - zap.Strings("errors", ess), - ) - - // with network delay, some ongoing requests may fail - // only return error, if more than 30% of QPS requests fail - if cnt > int(float64(clus.Tester.StressQPS)*0.3) { - clus.failed(t, fmt.Errorf("expected no error in %q, got %q", fcase.String(), ess)) - } - } - } - - clus.lg.Info( - "health check START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - if err := clus.WaitHealth(); err != nil { - clus.failed(t, fmt.Errorf("wait full health error after test finished: %w", err)) - } - - var checkerFailExceptions []rpcpb.Checker - switch fcase { - case rpcpb.Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH: - // TODO: restore from snapshot - checkerFailExceptions = append(checkerFailExceptions, rpcpb.Checker_LEASE_EXPIRE) - } - - clus.lg.Info( - "consistency check START", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - ) - if err := clus.runCheckers(checkerFailExceptions...); err != nil { - clus.failed(t, fmt.Errorf("consistency check error: %w", err)) - } - clus.lg.Info( - "consistency check PASS", - zap.Int("round", clus.rd), - zap.Int("case", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.String("desc", fa.Desc()), - zap.Duration("took", time.Since(caseNow)), - ) -} - -func (clus *Cluster) updateRevision() error { - revs, _, err := clus.getRevisionHash() - for _, rev := range revs { - clus.currentRevision = rev - break // just need get one of the current revisions - } - - clus.lg.Info( - "updated current revision", - zap.Int64("current-revision", clus.currentRevision), - ) - return err -} - -func (clus *Cluster) compact(rev int64, timeout time.Duration) (err error) { - if err = clus.compactKV(rev, timeout); err != nil { - clus.lg.Warn( - "compact FAIL", - zap.Int64("current-revision", clus.currentRevision), - zap.Int64("compact-revision", rev), - zap.Error(err), - ) - return err - } - clus.lg.Info( - "compact DONE", - zap.Int64("current-revision", clus.currentRevision), - zap.Int64("compact-revision", rev), - ) - - if err = clus.checkCompact(rev); err != nil { - clus.lg.Warn( - "check compact FAIL", - zap.Int64("current-revision", clus.currentRevision), - zap.Int64("compact-revision", rev), - zap.Error(err), - ) - return err - } - clus.lg.Info( - "check compact DONE", - zap.Int64("current-revision", clus.currentRevision), - zap.Int64("compact-revision", rev), - ) - - return nil -} - -func (clus *Cluster) failed(t *testing.T, err error) { - clus.lg.Error( - "functional-tester FAIL", - zap.Int("round", clus.rd), - zap.String("case-name", t.Name()), - zap.Int("case-number", clus.cs), - zap.Int("case-total", len(clus.cases)), - zap.Error(err), - ) - - os.Exit(2) -} diff --git a/tests/functional/tester/cluster_shuffle.go b/tests/functional/tester/cluster_shuffle.go deleted file mode 100644 index 25077202d9b..00000000000 --- a/tests/functional/tester/cluster_shuffle.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "math/rand" - "time" - - "go.uber.org/zap" -) - -func (clus *Cluster) shuffleCases() { - rand.Seed(time.Now().UnixNano()) - offset := rand.Intn(1000) - n := len(clus.cases) - cp := coprime(n) - - css := make([]Case, n) - for i := 0; i < n; i++ { - css[i] = clus.cases[(cp*i+offset)%n] - } - clus.cases = css - clus.lg.Info("shuffled test failure cases", zap.Int("total", n)) -} - -/* -x and y of GCD 1 are coprime to each other - -x1 = ( coprime of n * idx1 + offset ) % n -x2 = ( coprime of n * idx2 + offset ) % n -(x2 - x1) = coprime of n * (idx2 - idx1) % n - - = (idx2 - idx1) = 1 - -Consecutive x's are guaranteed to be distinct -*/ -func coprime(n int) int { - coprime := 1 - for i := n / 2; i < n; i++ { - if gcd(i, n) == 1 { - coprime = i - break - } - } - return coprime -} - -func gcd(x, y int) int { - if y == 0 { - return x - } - return gcd(y, x%y) -} diff --git a/tests/functional/tester/cluster_test.go b/tests/functional/tester/cluster_test.go deleted file mode 100644 index ec65b35079c..00000000000 --- a/tests/functional/tester/cluster_test.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "reflect" - "sort" - "testing" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" -) - -func Test_read(t *testing.T) { - exp := &Cluster{ - Members: []*rpcpb.Member{ - { - EtcdExec: "./bin/etcd", - AgentAddr: "127.0.0.1:19027", - FailpointHTTPAddr: "http://127.0.0.1:7381", - BaseDir: "/tmp/etcd-functional-1", - EtcdClientProxy: false, - EtcdPeerProxy: true, - EtcdClientEndpoint: "127.0.0.1:1379", - Etcd: &rpcpb.Etcd{ - Name: "s1", - DataDir: "/tmp/etcd-functional-1/etcd.data", - WALDir: "/tmp/etcd-functional-1/etcd.data/member/wal", - HeartbeatIntervalMs: 100, - ElectionTimeoutMs: 1000, - ListenClientURLs: []string{"https://127.0.0.1:1379"}, - AdvertiseClientURLs: []string{"https://127.0.0.1:1379"}, - ClientAutoTLS: true, - ClientCertAuth: false, - ClientCertFile: "", - ClientKeyFile: "", - ClientTrustedCAFile: "", - ListenPeerURLs: []string{"https://127.0.0.1:1380"}, - AdvertisePeerURLs: []string{"https://127.0.0.1:1381"}, - PeerAutoTLS: true, - PeerClientCertAuth: false, - PeerCertFile: "", - PeerKeyFile: "", - PeerTrustedCAFile: "", - InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381", - InitialClusterState: "new", - InitialClusterToken: "tkn", - SnapshotCount: 2000, - QuotaBackendBytes: 10740000000, - PreVote: true, - InitialCorruptCheck: true, - Logger: "zap", - LogOutputs: []string{"/tmp/etcd-functional-1/etcd.log"}, - LogLevel: "info", - SocketReuseAddress: true, - SocketReusePort: true, - }, - ClientCertData: "", - ClientCertPath: "", - ClientKeyData: "", - ClientKeyPath: "", - ClientTrustedCAData: "", - ClientTrustedCAPath: "", - PeerCertData: "", - PeerCertPath: "", - PeerKeyData: "", - PeerKeyPath: "", - PeerTrustedCAData: "", - PeerTrustedCAPath: "", - SnapshotPath: "/tmp/etcd-functional-1.snapshot.db", - }, - { - EtcdExec: "./bin/etcd", - AgentAddr: "127.0.0.1:29027", - FailpointHTTPAddr: "http://127.0.0.1:7382", - BaseDir: "/tmp/etcd-functional-2", - EtcdClientProxy: false, - EtcdPeerProxy: true, - EtcdClientEndpoint: "127.0.0.1:2379", - Etcd: &rpcpb.Etcd{ - Name: "s2", - DataDir: "/tmp/etcd-functional-2/etcd.data", - WALDir: "/tmp/etcd-functional-2/etcd.data/member/wal", - HeartbeatIntervalMs: 100, - ElectionTimeoutMs: 1000, - ListenClientURLs: []string{"https://127.0.0.1:2379"}, - AdvertiseClientURLs: []string{"https://127.0.0.1:2379"}, - ClientAutoTLS: true, - ClientCertAuth: false, - ClientCertFile: "", - ClientKeyFile: "", - ClientTrustedCAFile: "", - ListenPeerURLs: []string{"https://127.0.0.1:2380"}, - AdvertisePeerURLs: []string{"https://127.0.0.1:2381"}, - PeerAutoTLS: true, - PeerClientCertAuth: false, - PeerCertFile: "", - PeerKeyFile: "", - PeerTrustedCAFile: "", - InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381", - InitialClusterState: "new", - InitialClusterToken: "tkn", - SnapshotCount: 2000, - QuotaBackendBytes: 10740000000, - PreVote: true, - InitialCorruptCheck: true, - Logger: "zap", - LogOutputs: []string{"/tmp/etcd-functional-2/etcd.log"}, - LogLevel: "info", - SocketReuseAddress: true, - SocketReusePort: true, - }, - ClientCertData: "", - ClientCertPath: "", - ClientKeyData: "", - ClientKeyPath: "", - ClientTrustedCAData: "", - ClientTrustedCAPath: "", - PeerCertData: "", - PeerCertPath: "", - PeerKeyData: "", - PeerKeyPath: "", - PeerTrustedCAData: "", - PeerTrustedCAPath: "", - SnapshotPath: "/tmp/etcd-functional-2.snapshot.db", - }, - { - EtcdExec: "./bin/etcd", - AgentAddr: "127.0.0.1:39027", - FailpointHTTPAddr: "http://127.0.0.1:7383", - BaseDir: "/tmp/etcd-functional-3", - EtcdClientProxy: false, - EtcdPeerProxy: true, - EtcdClientEndpoint: "127.0.0.1:3379", - Etcd: &rpcpb.Etcd{ - Name: "s3", - DataDir: "/tmp/etcd-functional-3/etcd.data", - WALDir: "/tmp/etcd-functional-3/etcd.data/member/wal", - HeartbeatIntervalMs: 100, - ElectionTimeoutMs: 1000, - ListenClientURLs: []string{"https://127.0.0.1:3379"}, - AdvertiseClientURLs: []string{"https://127.0.0.1:3379"}, - ClientAutoTLS: true, - ClientCertAuth: false, - ClientCertFile: "", - ClientKeyFile: "", - ClientTrustedCAFile: "", - ListenPeerURLs: []string{"https://127.0.0.1:3380"}, - AdvertisePeerURLs: []string{"https://127.0.0.1:3381"}, - PeerAutoTLS: true, - PeerClientCertAuth: false, - PeerCertFile: "", - PeerKeyFile: "", - PeerTrustedCAFile: "", - InitialCluster: "s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381", - InitialClusterState: "new", - InitialClusterToken: "tkn", - SnapshotCount: 2000, - QuotaBackendBytes: 10740000000, - PreVote: true, - InitialCorruptCheck: true, - Logger: "zap", - LogOutputs: []string{"/tmp/etcd-functional-3/etcd.log"}, - LogLevel: "info", - SocketReuseAddress: true, - SocketReusePort: true, - }, - ClientCertData: "", - ClientCertPath: "", - ClientKeyData: "", - ClientKeyPath: "", - ClientTrustedCAData: "", - ClientTrustedCAPath: "", - PeerCertData: "", - PeerCertPath: "", - PeerKeyData: "", - PeerKeyPath: "", - PeerTrustedCAData: "", - PeerTrustedCAPath: "", - SnapshotPath: "/tmp/etcd-functional-3.snapshot.db", - }, - }, - Tester: &rpcpb.Tester{ - DataDir: "/tmp/etcd-tester-data", - Network: "tcp", - Addr: "127.0.0.1:9028", - DelayLatencyMs: 5000, - DelayLatencyMsRv: 500, - UpdatedDelayLatencyMs: 5000, - RoundLimit: 1, - ExitOnCaseFail: true, - EnablePprof: true, - CaseDelayMs: 7000, - CaseShuffle: true, - Cases: []string{ - "SIGTERM_ONE_FOLLOWER", - "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - "SIGTERM_LEADER", - "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT", - "SIGTERM_QUORUM", - "SIGTERM_ALL", - "SIGQUIT_AND_REMOVE_ONE_FOLLOWER", - "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - // "SIGQUIT_AND_REMOVE_LEADER", - // "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT", - // "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH", - // "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER", - // "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - "BLACKHOLE_PEER_PORT_TX_RX_LEADER", - "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - "BLACKHOLE_PEER_PORT_TX_RX_QUORUM", - "BLACKHOLE_PEER_PORT_TX_RX_ALL", - // "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER", - // "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER", - // "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - // "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", - "DELAY_PEER_PORT_TX_RX_LEADER", - "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER", - "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT", - "DELAY_PEER_PORT_TX_RX_QUORUM", - "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM", - "DELAY_PEER_PORT_TX_RX_ALL", - "RANDOM_DELAY_PEER_PORT_TX_RX_ALL", - "NO_FAIL_WITH_STRESS", - "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS", - }, - FailpointCommands: []string{`panic("etcd-tester")`}, - RunnerExecPath: "./bin/etcd-runner", - ExternalExecPath: "", - Stressers: []*rpcpb.Stresser{ - {Type: "KV_WRITE_SMALL", Weight: 0.35}, - {Type: "KV_WRITE_LARGE", Weight: 0.002}, - {Type: "KV_READ_ONE_KEY", Weight: 0.07}, - {Type: "KV_READ_RANGE", Weight: 0.07}, - {Type: "KV_DELETE_ONE_KEY", Weight: 0.07}, - {Type: "KV_DELETE_RANGE", Weight: 0.07}, - {Type: "KV_TXN_WRITE_DELETE", Weight: 0.35}, - {Type: "LEASE", Weight: 0.0}, - }, - Checkers: []string{"KV_HASH", "LEASE_EXPIRE"}, - StressKeySize: 100, - StressKeySizeLarge: 32769, - StressKeySuffixRange: 250000, - StressKeySuffixRangeTxn: 100, - StressKeyTxnOps: 10, - StressClients: 100, - StressQPS: 2000, - }, - } - - logger := zaptest.NewLogger(t) - defer logger.Sync() - - cfg, err := read(logger, "../functional.yaml") - if err != nil { - t.Fatal(err) - } - cfg.lg = nil - - if !reflect.DeepEqual(exp, cfg) { - t.Fatalf(`exp != cfg: - expected %+v - got %+v`, exp, cfg) - } - - cfg.lg = logger - - cfg.updateCases() - fs1 := cfg.listCases() - - cfg.shuffleCases() - fs2 := cfg.listCases() - if reflect.DeepEqual(fs1, fs2) { - t.Fatalf("expected shuffled failure cases, got %q", fs2) - } - - cfg.shuffleCases() - fs3 := cfg.listCases() - if reflect.DeepEqual(fs2, fs3) { - t.Fatalf("expected reshuffled failure cases from %q, got %q", fs2, fs3) - } - - // shuffle ensures visit all exactly once - // so when sorted, failure cases must be equal - sort.Strings(fs1) - sort.Strings(fs2) - sort.Strings(fs3) - - if !reflect.DeepEqual(fs1, fs2) { - t.Fatalf("expected %q, got %q", fs1, fs2) - } - if !reflect.DeepEqual(fs2, fs3) { - t.Fatalf("expected %q, got %q", fs2, fs3) - } -} diff --git a/tests/functional/tester/doc.go b/tests/functional/tester/doc.go deleted file mode 100644 index d1e23e94134..00000000000 --- a/tests/functional/tester/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tester implements functional-tester tester server. -package tester diff --git a/tests/functional/tester/metrics_report.go b/tests/functional/tester/metrics_report.go deleted file mode 100644 index c82e58f5b64..00000000000 --- a/tests/functional/tester/metrics_report.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "sort" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - caseTotal = make(map[string]int) - - caseTotalCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "funcational_tester", - Name: "case_total", - Help: "Total number of finished test cases", - }, - []string{"desc"}, - ) - - caseFailedTotalCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "funcational_tester", - Name: "case_failed_total", - Help: "Total number of failed test cases", - }, - []string{"desc"}, - ) - - roundTotalCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "funcational_tester", - Name: "round_total", - Help: "Total number of finished test rounds.", - }) - - roundFailedTotalCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "funcational_tester", - Name: "round_failed_total", - Help: "Total number of failed test rounds.", - }) -) - -func init() { - prometheus.MustRegister(caseTotalCounter) - prometheus.MustRegister(caseFailedTotalCounter) - prometheus.MustRegister(roundTotalCounter) - prometheus.MustRegister(roundFailedTotalCounter) -} - -func printReport() { - rows := make([]string, 0, len(caseTotal)) - for k, v := range caseTotal { - rows = append(rows, fmt.Sprintf("%s: %d", k, v)) - } - sort.Strings(rows) - - println() - for _, row := range rows { - fmt.Println(row) - } - println() -} diff --git a/tests/functional/tester/stresser.go b/tests/functional/tester/stresser.go deleted file mode 100644 index f147c6cee20..00000000000 --- a/tests/functional/tester/stresser.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "time" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" -) - -// Stresser defines stressing client operations. -type Stresser interface { - // Stress starts to stress the etcd cluster - Stress() error - // Pause stops the stresser from sending requests to etcd. Resume by calling Stress. - Pause() map[string]int - // Close releases all of the Stresser's resources. - Close() map[string]int - // ModifiedKeys reports the number of keys created and deleted by stresser - ModifiedKeys() int64 -} - -// newStresser creates stresser from a comma separated list of stresser types. -func newStresser(clus *Cluster, m *rpcpb.Member) (stressers []Stresser) { - // TODO: Too intensive stressing clients can panic etcd member with - // 'out of memory' error. Put rate limits in server side. - ks := &keyStresser{ - lg: clus.lg, - m: m, - keySize: int(clus.Tester.StressKeySize), - keyLargeSize: int(clus.Tester.StressKeySizeLarge), - keySuffixRange: int(clus.Tester.StressKeySuffixRange), - keyTxnSuffixRange: int(clus.Tester.StressKeySuffixRangeTxn), - keyTxnOps: int(clus.Tester.StressKeyTxnOps), - clientsN: int(clus.Tester.StressClients), - rateLimiter: clus.rateLimiter, - } - ksExist := false - - for _, s := range clus.Tester.Stressers { - clus.lg.Info( - "creating stresser", - zap.String("type", s.Type), - zap.Float64("weight", s.Weight), - zap.String("endpoint", m.EtcdClientEndpoint), - ) - switch s.Type { - case "KV_WRITE_SMALL": - ksExist = true - ks.weightKVWriteSmall = s.Weight - case "KV_WRITE_LARGE": - ksExist = true - ks.weightKVWriteLarge = s.Weight - case "KV_READ_ONE_KEY": - ksExist = true - ks.weightKVReadOneKey = s.Weight - case "KV_READ_RANGE": - ksExist = true - ks.weightKVReadRange = s.Weight - case "KV_DELETE_ONE_KEY": - ksExist = true - ks.weightKVDeleteOneKey = s.Weight - case "KV_DELETE_RANGE": - ksExist = true - ks.weightKVDeleteRange = s.Weight - case "KV_TXN_WRITE_DELETE": - ksExist = true - ks.weightKVTxnWriteDelete = s.Weight - - case "LEASE": - stressers = append(stressers, &leaseStresser{ - stype: rpcpb.StresserType_LEASE, - lg: clus.lg, - m: m, - numLeases: 10, // TODO: configurable - keysPerLease: 10, // TODO: configurable - rateLimiter: clus.rateLimiter, - }) - - case "ELECTION_RUNNER": - reqRate := 100 - args := []string{ - "election", - fmt.Sprintf("%v", time.Now().UnixNano()), // election name as current nano time - "--dial-timeout=10s", - "--endpoints", m.EtcdClientEndpoint, - "--total-client-connections=10", - "--rounds=0", // runs forever - "--req-rate", fmt.Sprintf("%v", reqRate), - } - stressers = append(stressers, newRunnerStresser( - rpcpb.StresserType_ELECTION_RUNNER, - m.EtcdClientEndpoint, - clus.lg, - clus.Tester.RunnerExecPath, - args, - clus.rateLimiter, - reqRate, - )) - - case "WATCH_RUNNER": - reqRate := 100 - args := []string{ - "watcher", - "--prefix", fmt.Sprintf("%v", time.Now().UnixNano()), // prefix all keys with nano time - "--total-keys=1", - "--total-prefixes=1", - "--watch-per-prefix=1", - "--endpoints", m.EtcdClientEndpoint, - "--rounds=0", // runs forever - "--req-rate", fmt.Sprintf("%v", reqRate), - } - stressers = append(stressers, newRunnerStresser( - rpcpb.StresserType_WATCH_RUNNER, - m.EtcdClientEndpoint, - clus.lg, - clus.Tester.RunnerExecPath, - args, - clus.rateLimiter, - reqRate, - )) - - case "LOCK_RACER_RUNNER": - reqRate := 100 - args := []string{ - "lock-racer", - fmt.Sprintf("%v", time.Now().UnixNano()), // locker name as current nano time - "--endpoints", m.EtcdClientEndpoint, - "--total-client-connections=10", - "--rounds=0", // runs forever - "--req-rate", fmt.Sprintf("%v", reqRate), - } - stressers = append(stressers, newRunnerStresser( - rpcpb.StresserType_LOCK_RACER_RUNNER, - m.EtcdClientEndpoint, - clus.lg, - clus.Tester.RunnerExecPath, - args, - clus.rateLimiter, - reqRate, - )) - - case "LEASE_RUNNER": - args := []string{ - "lease-renewer", - "--ttl=30", - "--endpoints", m.EtcdClientEndpoint, - } - stressers = append(stressers, newRunnerStresser( - rpcpb.StresserType_LEASE_RUNNER, - m.EtcdClientEndpoint, - clus.lg, - clus.Tester.RunnerExecPath, - args, - clus.rateLimiter, - 0, - )) - } - } - - if ksExist { - return append(stressers, ks) - } - return stressers -} diff --git a/tests/functional/tester/stresser_composite.go b/tests/functional/tester/stresser_composite.go deleted file mode 100644 index 09dcb55ff63..00000000000 --- a/tests/functional/tester/stresser_composite.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import "sync" - -// compositeStresser implements a Stresser that runs a slice of -// stressing clients concurrently. -type compositeStresser struct { - stressers []Stresser -} - -func (cs *compositeStresser) Stress() error { - for i, s := range cs.stressers { - if err := s.Stress(); err != nil { - for j := 0; j < i; j++ { - cs.stressers[j].Close() - } - return err - } - } - return nil -} - -func (cs *compositeStresser) Pause() (ems map[string]int) { - var emu sync.Mutex - ems = make(map[string]int) - var wg sync.WaitGroup - wg.Add(len(cs.stressers)) - for i := range cs.stressers { - go func(s Stresser) { - defer wg.Done() - errs := s.Pause() - for k, v := range errs { - emu.Lock() - ems[k] += v - emu.Unlock() - } - }(cs.stressers[i]) - } - wg.Wait() - return ems -} - -func (cs *compositeStresser) Close() (ems map[string]int) { - var emu sync.Mutex - ems = make(map[string]int) - var wg sync.WaitGroup - wg.Add(len(cs.stressers)) - for i := range cs.stressers { - go func(s Stresser) { - defer wg.Done() - errs := s.Close() - for k, v := range errs { - emu.Lock() - ems[k] += v - emu.Unlock() - } - }(cs.stressers[i]) - } - wg.Wait() - return ems -} - -func (cs *compositeStresser) ModifiedKeys() (modifiedKey int64) { - for _, stress := range cs.stressers { - modifiedKey += stress.ModifiedKeys() - } - return modifiedKey -} diff --git a/tests/functional/tester/stresser_key.go b/tests/functional/tester/stresser_key.go deleted file mode 100644 index 227b871c354..00000000000 --- a/tests/functional/tester/stresser_key.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "sync" - "sync/atomic" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/errors" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - "go.etcd.io/raft/v3" - - "go.uber.org/zap" - "golang.org/x/time/rate" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type keyStresser struct { - lg *zap.Logger - - m *rpcpb.Member - - weightKVWriteSmall float64 - weightKVWriteLarge float64 - weightKVReadOneKey float64 - weightKVReadRange float64 - weightKVDeleteOneKey float64 - weightKVDeleteRange float64 - weightKVTxnWriteDelete float64 - - keySize int - keyLargeSize int - keySuffixRange int - keyTxnSuffixRange int - keyTxnOps int - - rateLimiter *rate.Limiter - - wg sync.WaitGroup - clientsN int - - ctx context.Context - cancel func() - cli *clientv3.Client - - emu sync.RWMutex - ems map[string]int - paused bool - - // atomicModifiedKeys records the number of keys created and deleted by the stresser. - atomicModifiedKeys int64 - - stressTable *stressTable -} - -func (s *keyStresser) Stress() error { - var err error - s.cli, err = s.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(1 * time.Second)) - if err != nil { - return fmt.Errorf("%v (%q)", err, s.m.EtcdClientEndpoint) - } - s.ctx, s.cancel = context.WithCancel(context.Background()) - - s.wg.Add(s.clientsN) - - s.stressTable = createStressTable([]stressEntry{ - {weight: s.weightKVWriteSmall, f: newStressPut(s.cli, s.keySuffixRange, s.keySize)}, - {weight: s.weightKVWriteLarge, f: newStressPut(s.cli, s.keySuffixRange, s.keyLargeSize)}, - {weight: s.weightKVReadOneKey, f: newStressRange(s.cli, s.keySuffixRange)}, - {weight: s.weightKVReadRange, f: newStressRangeInterval(s.cli, s.keySuffixRange)}, - {weight: s.weightKVDeleteOneKey, f: newStressDelete(s.cli, s.keySuffixRange)}, - {weight: s.weightKVDeleteRange, f: newStressDeleteInterval(s.cli, s.keySuffixRange)}, - {weight: s.weightKVTxnWriteDelete, f: newStressTxn(s.cli, s.keyTxnSuffixRange, s.keyTxnOps)}, - }) - - s.emu.Lock() - s.paused = false - s.ems = make(map[string]int, 100) - s.emu.Unlock() - for i := 0; i < s.clientsN; i++ { - go s.run() - } - - s.lg.Info( - "stress START", - zap.String("stress-type", "KV"), - zap.String("endpoint", s.m.EtcdClientEndpoint), - ) - return nil -} - -func (s *keyStresser) run() { - defer s.wg.Done() - - for { - if err := s.rateLimiter.Wait(s.ctx); err == context.Canceled { - return - } - - // TODO: 10-second is enough timeout to cover leader failure - // and immediate leader election. Find out what other cases this - // could be timed out. - sctx, scancel := context.WithTimeout(s.ctx, 10*time.Second) - modifiedKeys, err := s.stressTable.choose()(sctx) - scancel() - if err == nil { - atomic.AddInt64(&s.atomicModifiedKeys, modifiedKeys) - continue - } - - if !s.isRetryableError(err) { - return - } - - // only record errors before pausing stressers - s.emu.Lock() - if !s.paused { - s.ems[err.Error()]++ - } - s.emu.Unlock() - } -} - -func (s *keyStresser) isRetryableError(err error) bool { - switch rpctypes.ErrorDesc(err) { - // retryable - case context.DeadlineExceeded.Error(): - // This retries when request is triggered at the same time as - // leader failure. When we terminate the leader, the request to - // that leader cannot be processed, and times out. Also requests - // to followers cannot be forwarded to the old leader, so timing out - // as well. We want to keep stressing until the cluster elects a - // new leader and start processing requests again. - return true - case errors.ErrTimeoutDueToLeaderFail.Error(), errors.ErrTimeout.Error(): - // This retries when request is triggered at the same time as - // leader failure and follower nodes receive time out errors - // from losing their leader. Followers should retry to connect - // to the new leader. - return true - case errors.ErrStopped.Error(): - // one of the etcd nodes stopped from failure injection - return true - case rpctypes.ErrNotCapable.Error(): - // capability check has not been done (in the beginning) - return true - case rpctypes.ErrTooManyRequests.Error(): - // hitting the recovering member. - return true - case raft.ErrProposalDropped.Error(): - // removed member, or leadership has changed (old leader got raftpb.MsgProp) - return true - - // not retryable. - case context.Canceled.Error(): - // from stresser.Cancel method: - return false - } - - if status.Convert(err).Code() == codes.Unavailable { - // gRPC connection errors are translated to status.Unavailable - return true - } - - s.lg.Warn( - "stress run exiting", - zap.String("stress-type", "KV"), - zap.String("endpoint", s.m.EtcdClientEndpoint), - zap.String("error-type", reflect.TypeOf(err).String()), - zap.String("error-desc", rpctypes.ErrorDesc(err)), - zap.Error(err), - ) - return false -} - -func (s *keyStresser) Pause() map[string]int { - return s.Close() -} - -func (s *keyStresser) Close() map[string]int { - s.cancel() - s.cli.Close() - s.wg.Wait() - - s.emu.Lock() - s.paused = true - ess := s.ems - s.ems = make(map[string]int, 100) - s.emu.Unlock() - - s.lg.Info( - "stress STOP", - zap.String("stress-type", "KV"), - zap.String("endpoint", s.m.EtcdClientEndpoint), - ) - return ess -} - -func (s *keyStresser) ModifiedKeys() int64 { - return atomic.LoadInt64(&s.atomicModifiedKeys) -} - -type stressFunc func(ctx context.Context) (modifiedKeys int64, err error) - -type stressEntry struct { - weight float64 - f stressFunc -} - -type stressTable struct { - entries []stressEntry - sumWeights float64 -} - -func createStressTable(entries []stressEntry) *stressTable { - st := stressTable{entries: entries} - for _, entry := range st.entries { - st.sumWeights += entry.weight - } - return &st -} - -func (st *stressTable) choose() stressFunc { - v := rand.Float64() * st.sumWeights - var sum float64 - var idx int - for i := range st.entries { - sum += st.entries[i].weight - if sum >= v { - idx = i - break - } - } - return st.entries[idx].f -} - -func newStressPut(cli *clientv3.Client, keySuffixRange, keySize int) stressFunc { - return func(ctx context.Context) (int64, error) { - _, err := cli.Put( - ctx, - fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)), - string(randBytes(keySize)), - ) - return 1, err - } -} - -func newStressTxn(cli *clientv3.Client, keyTxnSuffixRange, txnOps int) stressFunc { - keys := make([]string, keyTxnSuffixRange) - for i := range keys { - keys[i] = fmt.Sprintf("/k%03d", i) - } - return writeTxn(cli, keys, txnOps) -} - -func writeTxn(cli *clientv3.Client, keys []string, txnOps int) stressFunc { - return func(ctx context.Context) (int64, error) { - ks := make(map[string]struct{}, txnOps) - for len(ks) != txnOps { - ks[keys[rand.Intn(len(keys))]] = struct{}{} - } - selected := make([]string, 0, txnOps) - for k := range ks { - selected = append(selected, k) - } - com, delOp, putOp := getTxnOps(selected[0], "bar00") - thenOps := []clientv3.Op{delOp} - elseOps := []clientv3.Op{putOp} - for i := 1; i < txnOps; i++ { // nested txns - k, v := selected[i], fmt.Sprintf("bar%02d", i) - com, delOp, putOp = getTxnOps(k, v) - txnOp := clientv3.OpTxn( - []clientv3.Cmp{com}, - []clientv3.Op{delOp}, - []clientv3.Op{putOp}, - ) - thenOps = append(thenOps, txnOp) - elseOps = append(elseOps, txnOp) - } - _, err := cli.Txn(ctx). - If(com). - Then(thenOps...). - Else(elseOps...). - Commit() - return int64(txnOps), err - } -} - -func getTxnOps(k, v string) ( - cmp clientv3.Cmp, - dop clientv3.Op, - pop clientv3.Op) { - // if key exists (version > 0) - cmp = clientv3.Compare(clientv3.Version(k), ">", 0) - dop = clientv3.OpDelete(k) - pop = clientv3.OpPut(k, v) - return cmp, dop, pop -} - -func newStressRange(cli *clientv3.Client, keySuffixRange int) stressFunc { - return func(ctx context.Context) (int64, error) { - _, err := cli.Get(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))) - return 0, err - } -} - -func newStressRangeInterval(cli *clientv3.Client, keySuffixRange int) stressFunc { - return func(ctx context.Context) (int64, error) { - start := rand.Intn(keySuffixRange) - end := start + 500 - _, err := cli.Get( - ctx, - fmt.Sprintf("foo%016x", start), - clientv3.WithRange(fmt.Sprintf("foo%016x", end)), - ) - return 0, err - } -} - -func newStressDelete(cli *clientv3.Client, keySuffixRange int) stressFunc { - return func(ctx context.Context) (int64, error) { - _, err := cli.Delete(ctx, fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))) - return 1, err - } -} - -func newStressDeleteInterval(cli *clientv3.Client, keySuffixRange int) stressFunc { - return func(ctx context.Context) (int64, error) { - start := rand.Intn(keySuffixRange) - end := start + 500 - resp, err := cli.Delete(ctx, - fmt.Sprintf("foo%016x", start), - clientv3.WithRange(fmt.Sprintf("foo%016x", end)), - ) - if err == nil { - return resp.Deleted, nil - } - return 0, err - } -} diff --git a/tests/functional/tester/stresser_lease.go b/tests/functional/tester/stresser_lease.go deleted file mode 100644 index 8b1cfcb499b..00000000000 --- a/tests/functional/tester/stresser_lease.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "context" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - "golang.org/x/time/rate" - "google.golang.org/grpc" -) - -const ( - // time to live for lease - defaultTTL = 120 - defaultTTLShort = 2 -) - -type leaseStresser struct { - stype rpcpb.StresserType - lg *zap.Logger - - m *rpcpb.Member - cli *clientv3.Client - ctx context.Context - cancel func() - - rateLimiter *rate.Limiter - // atomicModifiedKey records the number of keys created and deleted during a test case - atomicModifiedKey int64 - numLeases int - keysPerLease int - aliveLeases *atomicLeases - alivedLeasesWithShortTTL *atomicLeases - revokedLeases *atomicLeases - // The tester doesn't keep alive the shortLivedLeases, - // so they will expire after the TTL. - shortLivedLeases *atomicLeases - - runWg sync.WaitGroup - aliveWg sync.WaitGroup -} - -type atomicLeases struct { - // rwLock is used to protect read/write access of leases map - // which are accessed and modified by different goroutines. - rwLock sync.RWMutex - leases map[int64]time.Time -} - -func (al *atomicLeases) add(leaseID int64, t time.Time) { - al.rwLock.Lock() - al.leases[leaseID] = t - al.rwLock.Unlock() -} - -func (al *atomicLeases) update(leaseID int64, t time.Time) { - al.rwLock.Lock() - _, ok := al.leases[leaseID] - if ok { - al.leases[leaseID] = t - } - al.rwLock.Unlock() -} - -func (al *atomicLeases) read(leaseID int64) (rv time.Time, ok bool) { - al.rwLock.RLock() - rv, ok = al.leases[leaseID] - al.rwLock.RUnlock() - return rv, ok -} - -func (al *atomicLeases) remove(leaseID int64) { - al.rwLock.Lock() - delete(al.leases, leaseID) - al.rwLock.Unlock() -} - -func (al *atomicLeases) getLeasesMap() map[int64]time.Time { - leasesCopy := make(map[int64]time.Time) - al.rwLock.RLock() - for k, v := range al.leases { - leasesCopy[k] = v - } - al.rwLock.RUnlock() - return leasesCopy -} - -func (ls *leaseStresser) setupOnce() error { - if ls.aliveLeases != nil { - return nil - } - if ls.numLeases == 0 { - panic("expect numLeases to be set") - } - if ls.keysPerLease == 0 { - panic("expect keysPerLease to be set") - } - - ls.aliveLeases = &atomicLeases{leases: make(map[int64]time.Time)} - return nil -} - -func (ls *leaseStresser) Stress() error { - ls.lg.Info( - "stress START", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - - if err := ls.setupOnce(); err != nil { - return err - } - - ctx, cancel := context.WithCancel(context.Background()) - ls.ctx = ctx - ls.cancel = cancel - - cli, err := ls.m.CreateEtcdClient(grpc.WithBackoffMaxDelay(1 * time.Second)) - if err != nil { - return fmt.Errorf("%v (%s)", err, ls.m.EtcdClientEndpoint) - } - ls.cli = cli - - ls.revokedLeases = &atomicLeases{leases: make(map[int64]time.Time)} - ls.shortLivedLeases = &atomicLeases{leases: make(map[int64]time.Time)} - ls.alivedLeasesWithShortTTL = &atomicLeases{leases: make(map[int64]time.Time)} - - ls.runWg.Add(1) - go ls.run() - return nil -} - -func (ls *leaseStresser) run() { - defer ls.runWg.Done() - ls.restartKeepAlives() - for { - // the number of keys created and deleted is roughly 2x the number of created keys for an iteration. - // the rateLimiter therefore consumes 2x ls.numLeases*ls.keysPerLease tokens where each token represents a create/delete operation for key. - err := ls.rateLimiter.WaitN(ls.ctx, 2*ls.numLeases*ls.keysPerLease) - if err == context.Canceled { - return - } - - ls.lg.Debug( - "stress creating leases", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - ls.createLeases() - ls.lg.Debug( - "stress created leases", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - - ls.lg.Debug( - "stress dropped leases", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - ls.randomlyDropLeases() - ls.lg.Debug( - "stress dropped leases", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - } -} - -func (ls *leaseStresser) restartKeepAlives() { - f := func(leases *atomicLeases) { - for leaseID := range leases.getLeasesMap() { - ls.aliveWg.Add(1) - go func(id int64) { - ls.keepLeaseAlive(id) - }(leaseID) - } - } - f(ls.aliveLeases) - f(ls.alivedLeasesWithShortTTL) -} - -func (ls *leaseStresser) createLeases() { - ls.createAliveLeasesWithShortTTL() - ls.createAliveLeases() - ls.createShortLivedLeases() -} - -func (ls *leaseStresser) createAliveLeases() { - neededLeases := ls.numLeases - len(ls.aliveLeases.getLeasesMap()) - var wg sync.WaitGroup - for i := 0; i < neededLeases; i++ { - wg.Add(1) - go func() { - defer wg.Done() - leaseID, err := ls.createLeaseWithKeys(defaultTTL) - if err != nil { - ls.lg.Debug( - "createLeaseWithKeys failed", - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.Error(err), - ) - return - } - ls.aliveLeases.add(leaseID, time.Now()) - // keep track of all the keep lease alive goroutines - ls.aliveWg.Add(1) - go ls.keepLeaseAlive(leaseID) - }() - } - wg.Wait() -} - -func (ls *leaseStresser) createAliveLeasesWithShortTTL() { - neededLeases := 2 - var wg sync.WaitGroup - for i := 0; i < neededLeases; i++ { - wg.Add(1) - go func() { - defer wg.Done() - leaseID, err := ls.createLeaseWithKeys(defaultTTLShort) - if err != nil { - ls.lg.Debug( - "createLeaseWithKeys failed", - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.Error(err), - ) - return - } - ls.lg.Debug("createAliveLeasesWithShortTTL", zap.Int64("lease-id", leaseID)) - ls.alivedLeasesWithShortTTL.add(leaseID, time.Now()) - // keep track of all the keep lease alive goroutines - ls.aliveWg.Add(1) - go ls.keepLeaseAlive(leaseID) - }() - } - wg.Wait() -} - -func (ls *leaseStresser) createShortLivedLeases() { - // one round of createLeases() might not create all the short lived leases we want due to failures. - // thus, we want to create remaining short lived leases in the future round. - neededLeases := ls.numLeases - len(ls.shortLivedLeases.getLeasesMap()) - var wg sync.WaitGroup - for i := 0; i < neededLeases; i++ { - wg.Add(1) - go func() { - defer wg.Done() - leaseID, err := ls.createLeaseWithKeys(defaultTTLShort) - if err != nil { - return - } - ls.shortLivedLeases.add(leaseID, time.Now()) - }() - } - wg.Wait() -} - -func (ls *leaseStresser) createLeaseWithKeys(ttl int64) (int64, error) { - leaseID, err := ls.createLease(ttl) - if err != nil { - ls.lg.Debug( - "createLease failed", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.Error(err), - ) - return -1, err - } - - ls.lg.Debug( - "createLease created lease", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - if err := ls.attachKeysWithLease(leaseID); err != nil { - return -1, err - } - return leaseID, nil -} - -func (ls *leaseStresser) randomlyDropLeases() { - var wg sync.WaitGroup - for l := range ls.aliveLeases.getLeasesMap() { - wg.Add(1) - go func(leaseID int64) { - defer wg.Done() - dropped, err := ls.randomlyDropLease(leaseID) - // if randomlyDropLease encountered an error such as context is cancelled, remove the lease from aliveLeases - // because we can't tell whether the lease is dropped or not. - if err != nil { - ls.lg.Debug( - "randomlyDropLease failed", - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(err), - ) - ls.aliveLeases.remove(leaseID) - return - } - if !dropped { - return - } - ls.lg.Debug( - "randomlyDropLease dropped a lease", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - ls.revokedLeases.add(leaseID, time.Now()) - ls.aliveLeases.remove(leaseID) - }(l) - } - wg.Wait() -} - -func (ls *leaseStresser) createLease(ttl int64) (int64, error) { - resp, err := ls.cli.Grant(ls.ctx, ttl) - if err != nil { - return -1, err - } - return int64(resp.ID), nil -} - -func (ls *leaseStresser) keepLeaseAlive(leaseID int64) { - defer ls.aliveWg.Done() - ctx, cancel := context.WithCancel(ls.ctx) - stream, err := ls.cli.KeepAlive(ctx, clientv3.LeaseID(leaseID)) - if err != nil { - ls.lg.Error( - "keepLeaseAlive lease creates stream error", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(err), - ) - } - defer cancel() - - for { - select { - case <-time.After(500 * time.Millisecond): - case <-ls.ctx.Done(): - ls.lg.Debug( - "keepLeaseAlive context canceled", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(ls.ctx.Err()), - ) - // It is possible that lease expires at invariant checking phase - // but not at keepLeaseAlive() phase. This scenario is possible - // when alive lease is just about to expire when keepLeaseAlive() - // exists and expires at invariant checking phase. To circumvent - // this scenario, we check each lease before keepalive loop exist - // to see if it has been renewed in last TTL/2 duration. If it is - // renewed, it means that invariant checking have at least ttl/2 - // time before lease expires which is long enough for the checking - // to finish. If it is not renewed, we remove the lease from the - // alive map so that the lease doesn't expire during invariant - // checking. - renewTime, ok := ls.aliveLeases.read(leaseID) - if ok && renewTime.Add(defaultTTL/2*time.Second).Before(time.Now()) { - ls.aliveLeases.remove(leaseID) - ls.lg.Debug( - "keepLeaseAlive lease has not been renewed, dropped it", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - } - return - } - - ls.lg.Debug( - "keepLeaseAlive waiting on lease stream", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - leaseRenewTime := time.Now() - respRC := <-stream - if respRC == nil { - ls.lg.Debug( - "keepLeaseAlive received nil lease keepalive response", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - continue - } - - // lease expires after TTL become 0 - // don't send keepalive if the lease has expired - if respRC.TTL <= 0 { - ls.lg.Debug( - "keepLeaseAlive stream received lease keepalive response TTL <= 0", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Int64("ttl", respRC.TTL), - ) - ls.aliveLeases.remove(leaseID) - return - } - // renew lease timestamp only if lease is present - ls.lg.Debug( - "keepLeaseAlive renewed a lease", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - ) - ls.aliveLeases.update(leaseID, leaseRenewTime) - } -} - -// attachKeysWithLease function attaches keys to the lease. -// the format of key is the concat of leaseID + '_' + '' -// e.g 5186835655248304152_0 for first created key and 5186835655248304152_1 for second created key -func (ls *leaseStresser) attachKeysWithLease(leaseID int64) error { - var txnPuts []clientv3.Op - for j := 0; j < ls.keysPerLease; j++ { - txnput := clientv3.OpPut( - fmt.Sprintf("%d%s%d", leaseID, "_", j), - "bar", - clientv3.WithLease(clientv3.LeaseID(leaseID)), - ) - txnPuts = append(txnPuts, txnput) - } - // keep retrying until lease is not found or ctx is being canceled - for ls.ctx.Err() == nil { - _, err := ls.cli.Txn(ls.ctx).Then(txnPuts...).Commit() - if err == nil { - // since all created keys will be deleted too, the number of operations on keys will be roughly 2x the number of created keys - atomic.AddInt64(&ls.atomicModifiedKey, 2*int64(ls.keysPerLease)) - return nil - } - if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound { - return err - } - } - return ls.ctx.Err() -} - -// randomlyDropLease drops the lease only when the rand.Int(2) returns 1. -// This creates a 50/50 percents chance of dropping a lease -func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) { - if rand.Intn(2) != 0 { - return false, nil - } - - // keep retrying until a lease is dropped or ctx is being canceled - for ls.ctx.Err() == nil { - _, err := ls.cli.Revoke(ls.ctx, clientv3.LeaseID(leaseID)) - if err == nil || rpctypes.Error(err) == rpctypes.ErrLeaseNotFound { - return true, nil - } - } - - ls.lg.Debug( - "randomlyDropLease error", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - zap.String("lease-id", fmt.Sprintf("%016x", leaseID)), - zap.Error(ls.ctx.Err()), - ) - return false, ls.ctx.Err() -} - -func (ls *leaseStresser) Pause() map[string]int { - return ls.Close() -} - -func (ls *leaseStresser) Close() map[string]int { - ls.cancel() - ls.runWg.Wait() - ls.aliveWg.Wait() - ls.cli.Close() - ls.lg.Info( - "stress STOP", - zap.String("stress-type", ls.stype.String()), - zap.String("endpoint", ls.m.EtcdClientEndpoint), - ) - return nil -} - -func (ls *leaseStresser) ModifiedKeys() int64 { - return atomic.LoadInt64(&ls.atomicModifiedKey) -} diff --git a/tests/functional/tester/stresser_runner.go b/tests/functional/tester/stresser_runner.go deleted file mode 100644 index 24fb82641cd..00000000000 --- a/tests/functional/tester/stresser_runner.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "fmt" - "io" - "os/exec" - "syscall" - - "go.etcd.io/etcd/tests/v3/functional/rpcpb" - - "go.uber.org/zap" - "golang.org/x/time/rate" -) - -type runnerStresser struct { - stype rpcpb.StresserType - etcdClientEndpoint string - lg *zap.Logger - - cmd *exec.Cmd - cmdStr string - args []string - rl *rate.Limiter - reqRate int - - errc chan error - donec chan struct{} -} - -func newRunnerStresser( - stype rpcpb.StresserType, - ep string, - lg *zap.Logger, - cmdStr string, - args []string, - rl *rate.Limiter, - reqRate int, -) *runnerStresser { - rl.SetLimit(rl.Limit() - rate.Limit(reqRate)) - return &runnerStresser{ - stype: stype, - etcdClientEndpoint: ep, - lg: lg, - cmdStr: cmdStr, - args: args, - rl: rl, - reqRate: reqRate, - errc: make(chan error, 1), - donec: make(chan struct{}), - } -} - -func (rs *runnerStresser) setupOnce() (err error) { - if rs.cmd != nil { - return nil - } - - rs.cmd = exec.Command(rs.cmdStr, rs.args...) - stderr, err := rs.cmd.StderrPipe() - if err != nil { - return err - } - - go func() { - defer close(rs.donec) - out, err := io.ReadAll(stderr) - if err != nil { - rs.errc <- err - } else { - rs.errc <- fmt.Errorf("(%v %v) stderr %v", rs.cmdStr, rs.args, string(out)) - } - }() - - return rs.cmd.Start() -} - -func (rs *runnerStresser) Stress() (err error) { - rs.lg.Info( - "stress START", - zap.String("stress-type", rs.stype.String()), - ) - if err = rs.setupOnce(); err != nil { - return err - } - return syscall.Kill(rs.cmd.Process.Pid, syscall.SIGCONT) -} - -func (rs *runnerStresser) Pause() map[string]int { - rs.lg.Info( - "stress STOP", - zap.String("stress-type", rs.stype.String()), - ) - syscall.Kill(rs.cmd.Process.Pid, syscall.SIGSTOP) - return nil -} - -func (rs *runnerStresser) Close() map[string]int { - syscall.Kill(rs.cmd.Process.Pid, syscall.SIGINT) - rs.cmd.Wait() - <-rs.donec - rs.rl.SetLimit(rs.rl.Limit() + rate.Limit(rs.reqRate)) - return nil -} - -func (rs *runnerStresser) ModifiedKeys() int64 { - return 1 -} diff --git a/tests/functional/tester/utils.go b/tests/functional/tester/utils.go deleted file mode 100644 index 4403ff34663..00000000000 --- a/tests/functional/tester/utils.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tester - -import ( - "errors" - "math/rand" - "net" - "net/url" - "strings" -) - -func isValidURL(u string) bool { - _, err := url.Parse(u) - return err == nil -} - -func getPort(addr string) (port string, err error) { - urlAddr, err := url.Parse(addr) - if err != nil { - return "", err - } - _, port, err = net.SplitHostPort(urlAddr.Host) - if err != nil { - return "", err - } - return port, nil -} - -func getSameValue(vals map[string]int64) bool { - var rv int64 - for _, v := range vals { - if rv == 0 { - rv = v - } - if rv != v { - return false - } - } - return true -} - -func max(n1, n2 int64) int64 { - if n1 > n2 { - return n1 - } - return n2 -} - -func errsToError(errs []error) error { - if len(errs) == 0 { - return nil - } - stringArr := make([]string, len(errs)) - for i, err := range errs { - stringArr[i] = err.Error() - } - return errors.New(strings.Join(stringArr, ", ")) -} - -func randBytes(size int) []byte { - data := make([]byte, size) - for i := 0; i < size; i++ { - data[i] = byte(int('a') + rand.Intn(26)) - } - return data -} diff --git a/tests/go.mod b/tests/go.mod deleted file mode 100644 index 801e536614c..00000000000 --- a/tests/go.mod +++ /dev/null @@ -1,103 +0,0 @@ -module go.etcd.io/etcd/tests/v3 - -go 1.19 - -replace ( - go.etcd.io/etcd/api/v3 => ../api - go.etcd.io/etcd/client/pkg/v3 => ../client/pkg - go.etcd.io/etcd/client/v2 => ../client/v2 - go.etcd.io/etcd/client/v3 => ../client/v3 - go.etcd.io/etcd/etcdctl/v3 => ../etcdctl - go.etcd.io/etcd/etcdutl/v3 => ../etcdutl - go.etcd.io/etcd/pkg/v3 => ../pkg - go.etcd.io/etcd/server/v3 => ../server -) - -require ( - github.com/anishathalye/porcupine v0.1.4 - github.com/coreos/go-semver v0.3.1 - github.com/dustin/go-humanize v1.0.1 - github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.9 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/prometheus/client_golang v1.14.0 - github.com/soheilhy/cmux v0.1.5 - github.com/spf13/cobra v1.6.1 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 - go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 - go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 - go.etcd.io/etcd/server/v3 v3.6.0-alpha.0 - go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 - go.opentelemetry.io/otel v1.11.2 - go.opentelemetry.io/otel/sdk v1.11.2 - go.opentelemetry.io/otel/trace v1.11.2 - go.opentelemetry.io/proto/otlp v0.19.0 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/grpc v1.51.0 - gopkg.in/yaml.v2 v2.4.0 -) - -require ( - github.com/VividCortex/ewma v1.1.1 // indirect - github.com/benbjohnson/clock v1.1.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cheggaaa/pb/v3 v3.1.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/creack/pty v1.1.18 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.3 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.1.2 // indirect - github.com/gorilla/websocket v1.4.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jonboulle/clockwork v0.3.0 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/mattn/go-runewidth v0.0.12 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.etcd.io/bbolt v1.3.7 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) diff --git a/tests/go.sum b/tests/go.sum deleted file mode 100644 index 247b4d14088..00000000000 --- a/tests/go.sum +++ /dev/null @@ -1,684 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/anishathalye/porcupine v0.1.4 h1:rRekB2jH1mbtLPEzuqyMHp4scU52Bcc1jgkPi1kWFQA= -github.com/anishathalye/porcupine v0.1.4/go.mod h1:/X9OQYnVb7DzfKCQVO4tI1Aq+o56UJW+RvN/5U4EuZA= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04= -github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= -github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU= -go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/tests/integration/clientv3/cluster_test.go b/tests/integration/clientv3/cluster_test.go deleted file mode 100644 index 822855f1365..00000000000 --- a/tests/integration/clientv3/cluster_test.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/types" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMemberList(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - capi := clus.RandClient() - - resp, err := capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - if len(resp.Members) != 3 { - t.Errorf("number of members = %d, want %d", len(resp.Members), 3) - } -} - -func TestMemberAdd(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - capi := clus.RandClient() - - urls := []string{"http://127.0.0.1:1234"} - resp, err := capi.MemberAdd(context.Background(), urls) - if err != nil { - t.Fatalf("failed to add member %v", err) - } - - if !reflect.DeepEqual(resp.Member.PeerURLs, urls) { - t.Errorf("urls = %v, want %v", urls, resp.Member.PeerURLs) - } -} - -func TestMemberAddWithExistingURLs(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - capi := clus.RandClient() - - resp, err := capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - existingURL := resp.Members[0].PeerURLs[0] - _, err = capi.MemberAdd(context.Background(), []string{existingURL}) - expectedErrKeywords := "Peer URLs already exists" - if err == nil { - t.Fatalf("expecting add member to fail, got no error") - } - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Errorf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error()) - } -} - -func TestMemberRemove(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - capi := clus.Client(1) - resp, err := capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - rmvID := resp.Members[0].ID - // indexes in capi member list don't necessarily match cluster member list; - // find member that is not the client to remove - for _, m := range resp.Members { - mURLs, _ := types.NewURLs(m.PeerURLs) - if !reflect.DeepEqual(mURLs, clus.Members[1].ServerConfig.PeerURLs) { - rmvID = m.ID - break - } - } - - _, err = capi.MemberRemove(context.Background(), rmvID) - if err != nil { - t.Fatalf("failed to remove member %v", err) - } - - resp, err = capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - if len(resp.Members) != 2 { - t.Errorf("number of members = %d, want %d", len(resp.Members), 2) - } -} - -func TestMemberUpdate(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - capi := clus.RandClient() - resp, err := capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - urls := []string{"http://127.0.0.1:1234"} - _, err = capi.MemberUpdate(context.Background(), resp.Members[0].ID, urls) - if err != nil { - t.Fatalf("failed to update member %v", err) - } - - resp, err = capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - - if !reflect.DeepEqual(resp.Members[0].PeerURLs, urls) { - t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs) - } -} - -func TestMemberAddUpdateWrongURLs(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - capi := clus.RandClient() - tt := [][]string{ - // missing protocol scheme - {"://127.0.0.1:2379"}, - // unsupported scheme - {"mailto://127.0.0.1:2379"}, - // not conform to host:port - {"http://127.0.0.1"}, - // contain a path - {"http://127.0.0.1:2379/path"}, - // first path segment in URL cannot contain colon - {"127.0.0.1:1234"}, - // URL scheme must be http, https, unix, or unixs - {"localhost:1234"}, - } - for i := range tt { - _, err := capi.MemberAdd(context.Background(), tt[i]) - if err == nil { - t.Errorf("#%d: MemberAdd err = nil, but error", i) - } - _, err = capi.MemberUpdate(context.Background(), 0, tt[i]) - if err == nil { - t.Errorf("#%d: MemberUpdate err = nil, but error", i) - } - } -} - -func TestMemberAddForLearner(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - capi := clus.RandClient() - - urls := []string{"http://127.0.0.1:1234"} - resp, err := capi.MemberAddAsLearner(context.Background(), urls) - if err != nil { - t.Fatalf("failed to add member %v", err) - } - - if !resp.Member.IsLearner { - t.Errorf("Added a member as learner, got resp.Member.IsLearner = %v", resp.Member.IsLearner) - } - - numberOfLearners := 0 - for _, m := range resp.Members { - if m.IsLearner { - numberOfLearners++ - } - } - if numberOfLearners != 1 { - t.Errorf("Added 1 learner node to cluster, got %d", numberOfLearners) - } -} - -func TestMemberPromote(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - // member promote request can be sent to any server in cluster, - // the request will be auto-forwarded to leader on server-side. - // This test explicitly includes the server-side forwarding by - // sending the request to follower. - leaderIdx := clus.WaitLeader(t) - followerIdx := (leaderIdx + 1) % 3 - capi := clus.Client(followerIdx) - - urls := []string{"http://127.0.0.1:1234"} - memberAddResp, err := capi.MemberAddAsLearner(context.Background(), urls) - if err != nil { - t.Fatalf("failed to add member %v", err) - } - - if !memberAddResp.Member.IsLearner { - t.Fatalf("Added a member as learner, got resp.Member.IsLearner = %v", memberAddResp.Member.IsLearner) - } - learnerID := memberAddResp.Member.ID - - numberOfLearners := 0 - for _, m := range memberAddResp.Members { - if m.IsLearner { - numberOfLearners++ - } - } - if numberOfLearners != 1 { - t.Fatalf("Added 1 learner node to cluster, got %d", numberOfLearners) - } - - // learner is not started yet. Expect learner progress check to fail. - // As the result, member promote request will fail. - _, err = capi.MemberPromote(context.Background(), learnerID) - expectedErrKeywords := "can only promote a learner member which is in sync with leader" - if err == nil { - t.Fatalf("expecting promote not ready learner to fail, got no error") - } - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Fatalf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error()) - } - - // create and launch learner member based on the response of V3 Member Add API. - // (the response has information on peer urls of the existing members in cluster) - learnerMember := clus.MustNewMember(t, memberAddResp) - - if err := learnerMember.Launch(); err != nil { - t.Fatal(err) - } - - // retry until promote succeed or timeout - timeout := time.After(5 * time.Second) - for { - select { - case <-time.After(500 * time.Millisecond): - case <-timeout: - t.Fatalf("failed all attempts to promote learner member, last error: %v", err) - } - - _, err = capi.MemberPromote(context.Background(), learnerID) - // successfully promoted learner - if err == nil { - break - } - // if member promote fails due to learner not ready, retry. - // otherwise fails the test. - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Fatalf("unexpected error when promoting learner member: %v", err) - } - } -} - -// TestMemberPromoteMemberNotLearner ensures that promoting a voting member fails. -func TestMemberPromoteMemberNotLearner(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // member promote request can be sent to any server in cluster, - // the request will be auto-forwarded to leader on server-side. - // This test explicitly includes the server-side forwarding by - // sending the request to follower. - leaderIdx := clus.WaitLeader(t) - followerIdx := (leaderIdx + 1) % 3 - cli := clus.Client(followerIdx) - - resp, err := cli.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - if len(resp.Members) != 3 { - t.Fatalf("number of members = %d, want %d", len(resp.Members), 3) - } - - // promoting any of the voting members in cluster should fail - expectedErrKeywords := "can only promote a learner member" - for _, m := range resp.Members { - _, err = cli.MemberPromote(context.Background(), m.ID) - if err == nil { - t.Fatalf("expect promoting voting member to fail, got no error") - } - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Fatalf("expect error to contain %s, got %s", expectedErrKeywords, err.Error()) - } - } -} - -// TestMemberPromoteMemberNotExist ensures that promoting a member that does not exist in cluster fails. -func TestMemberPromoteMemberNotExist(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // member promote request can be sent to any server in cluster, - // the request will be auto-forwarded to leader on server-side. - // This test explicitly includes the server-side forwarding by - // sending the request to follower. - leaderIdx := clus.WaitLeader(t) - followerIdx := (leaderIdx + 1) % 3 - cli := clus.Client(followerIdx) - - resp, err := cli.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to list member %v", err) - } - if len(resp.Members) != 3 { - t.Fatalf("number of members = %d, want %d", len(resp.Members), 3) - } - - // generate an random ID that does not exist in cluster - var randID uint64 - for { - randID = rand.Uint64() - notExist := true - for _, m := range resp.Members { - if m.ID == randID { - notExist = false - break - } - } - if notExist { - break - } - } - - expectedErrKeywords := "member not found" - _, err = cli.MemberPromote(context.Background(), randID) - if err == nil { - t.Fatalf("expect promoting voting member to fail, got no error") - } - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Errorf("expect error to contain %s, got %s", expectedErrKeywords, err.Error()) - } -} - -// TestMaxLearnerInCluster verifies that the maximum number of learners allowed in a cluster -func TestMaxLearnerInCluster(t *testing.T) { - integration2.BeforeTest(t) - - // 1. start with a cluster with 3 voting member and max learner 2 - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ExperimentalMaxLearners: 2, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - // 2. adding 2 learner members should succeed - for i := 0; i < 2; i++ { - _, err := clus.Client(0).MemberAddAsLearner(context.Background(), []string{fmt.Sprintf("http://127.0.0.1:123%d", i)}) - if err != nil { - t.Fatalf("failed to add learner member %v", err) - } - } - - // ensure client endpoint is voting member - leaderIdx := clus.WaitLeader(t) - capi := clus.Client(leaderIdx) - resp1, err := capi.MemberList(context.Background()) - if err != nil { - t.Fatalf("failed to get member list") - } - numberOfLearners := 0 - for _, m := range resp1.Members { - if m.IsLearner { - numberOfLearners++ - } - } - if numberOfLearners != 2 { - t.Fatalf("added 2 learner node to cluster, got %d", numberOfLearners) - } - - // 3. cluster has 3 voting member and 2 learner, adding another learner should fail - _, err = clus.Client(0).MemberAddAsLearner(context.Background(), []string{"http://127.0.0.1:2342"}) - if err == nil { - t.Fatalf("expect member add to fail, got no error") - } - expectedErrKeywords := "too many learner members in cluster" - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Fatalf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error()) - } - - // 4. cluster has 3 voting member and 1 learner, adding a voting member should succeed - _, err = clus.Client(0).MemberAdd(context.Background(), []string{"http://127.0.0.1:3453"}) - if err != nil { - t.Errorf("failed to add member %v", err) - } -} diff --git a/tests/integration/clientv3/concurrency/election_test.go b/tests/integration/clientv3/concurrency/election_test.go deleted file mode 100644 index 951c6a91fbf..00000000000 --- a/tests/integration/clientv3/concurrency/election_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "log" - "strings" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestResumeElection(t *testing.T) { - const prefix = "/resume-election/" - - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - var s *concurrency.Session - s, err = concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s.Close() - - e := concurrency.NewElection(s, prefix) - - // entire test should never take more than 10 seconds - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - // become leader - if err = e.Campaign(ctx, "candidate1"); err != nil { - t.Fatalf("Campaign() returned non nil err: %s", err) - } - - // get the leadership details of the current election - var leader *clientv3.GetResponse - leader, err = e.Leader(ctx) - if err != nil { - t.Fatalf("Leader() returned non nil err: %s", err) - } - - // Recreate the election - e = concurrency.ResumeElection(s, prefix, - string(leader.Kvs[0].Key), leader.Kvs[0].CreateRevision) - - respChan := make(chan *clientv3.GetResponse) - go func() { - defer close(respChan) - o := e.Observe(ctx) - respChan <- nil - for resp := range o { - // Ignore any observations that candidate1 was elected - if string(resp.Kvs[0].Value) == "candidate1" { - continue - } - respChan <- &resp - return - } - t.Error("Observe() channel closed prematurely") - }() - - // wait until observe goroutine is running - <-respChan - - // put some random data to generate a change event, this put should be - // ignored by Observe() because it is not under the election prefix. - _, err = cli.Put(ctx, "foo", "bar") - if err != nil { - t.Fatalf("Put('foo') returned non nil err: %s", err) - } - - // resign as leader - if err := e.Resign(ctx); err != nil { - t.Fatalf("Resign() returned non nil err: %s", err) - } - - // elect a different candidate - if err := e.Campaign(ctx, "candidate2"); err != nil { - t.Fatalf("Campaign() returned non nil err: %s", err) - } - - // wait for observed leader change - resp := <-respChan - - kv := resp.Kvs[0] - if !strings.HasPrefix(string(kv.Key), prefix) { - t.Errorf("expected observed election to have prefix '%s' got %q", prefix, string(kv.Key)) - } - if string(kv.Value) != "candidate2" { - t.Errorf("expected new leader to be 'candidate1' got %q", string(kv.Value)) - } -} diff --git a/tests/integration/clientv3/concurrency/example_election_test.go b/tests/integration/clientv3/concurrency/example_election_test.go deleted file mode 100644 index 7ac8e436257..00000000000 --- a/tests/integration/clientv3/concurrency/example_election_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -func mockElection_Campaign() { - fmt.Println("completed first election with e2") - fmt.Println("completed second election with e1") -} - -func ExampleElection_Campaign() { - forUnitTestsRunInMockedContext( - mockElection_Campaign, - func() { - cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // create two separate sessions for election competition - s1, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s1.Close() - e1 := concurrency.NewElection(s1, "/my-election/") - - s2, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s2.Close() - e2 := concurrency.NewElection(s2, "/my-election/") - - // create competing candidates, with e1 initially losing to e2 - var wg sync.WaitGroup - wg.Add(2) - electc := make(chan *concurrency.Election, 2) - go func() { - defer wg.Done() - // delay candidacy so e2 wins first - time.Sleep(3 * time.Second) - if err := e1.Campaign(context.Background(), "e1"); err != nil { - log.Fatal(err) - } - electc <- e1 - }() - go func() { - defer wg.Done() - if err := e2.Campaign(context.Background(), "e2"); err != nil { - log.Fatal(err) - } - electc <- e2 - }() - - cctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - e := <-electc - fmt.Println("completed first election with", string((<-e.Observe(cctx)).Kvs[0].Value)) - - // resign so next candidate can be elected - if err := e.Resign(context.TODO()); err != nil { - log.Fatal(err) - } - - e = <-electc - fmt.Println("completed second election with", string((<-e.Observe(cctx)).Kvs[0].Value)) - - wg.Wait() - }) - - // Output: - // completed first election with e2 - // completed second election with e1 -} diff --git a/tests/integration/clientv3/concurrency/example_mutex_test.go b/tests/integration/clientv3/concurrency/example_mutex_test.go deleted file mode 100644 index b33ba66b46c..00000000000 --- a/tests/integration/clientv3/concurrency/example_mutex_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "fmt" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -func mockMutex_TryLock() { - fmt.Println("acquired lock for s1") - fmt.Println("cannot acquire lock for s2, as already locked in another session") - fmt.Println("released lock for s1") - fmt.Println("acquired lock for s2") -} - -func ExampleMutex_TryLock() { - forUnitTestsRunInMockedContext( - mockMutex_TryLock, - func() { - cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // create two separate sessions for lock competition - s1, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s1.Close() - m1 := concurrency.NewMutex(s1, "/my-lock") - - s2, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s2.Close() - m2 := concurrency.NewMutex(s2, "/my-lock") - - // acquire lock for s1 - if err = m1.Lock(context.TODO()); err != nil { - log.Fatal(err) - } - fmt.Println("acquired lock for s1") - - if err = m2.TryLock(context.TODO()); err == nil { - log.Fatal("should not acquire lock") - } - if err == concurrency.ErrLocked { - fmt.Println("cannot acquire lock for s2, as already locked in another session") - } - - if err = m1.Unlock(context.TODO()); err != nil { - log.Fatal(err) - } - fmt.Println("released lock for s1") - if err = m2.TryLock(context.TODO()); err != nil { - log.Fatal(err) - } - fmt.Println("acquired lock for s2") - }) - - // Output: - // acquired lock for s1 - // cannot acquire lock for s2, as already locked in another session - // released lock for s1 - // acquired lock for s2 -} - -func mockMutex_Lock() { - fmt.Println("acquired lock for s1") - fmt.Println("released lock for s1") - fmt.Println("acquired lock for s2") -} - -func ExampleMutex_Lock() { - forUnitTestsRunInMockedContext( - mockMutex_Lock, - func() { - cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // create two separate sessions for lock competition - s1, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s1.Close() - m1 := concurrency.NewMutex(s1, "/my-lock/") - - s2, err := concurrency.NewSession(cli) - if err != nil { - log.Fatal(err) - } - defer s2.Close() - m2 := concurrency.NewMutex(s2, "/my-lock/") - - // acquire lock for s1 - if err := m1.Lock(context.TODO()); err != nil { - log.Fatal(err) - } - fmt.Println("acquired lock for s1") - - m2Locked := make(chan struct{}) - go func() { - defer close(m2Locked) - // wait until s1 is locks /my-lock/ - if err := m2.Lock(context.TODO()); err != nil { - log.Fatal(err) - } - }() - - if err := m1.Unlock(context.TODO()); err != nil { - log.Fatal(err) - } - fmt.Println("released lock for s1") - - <-m2Locked - fmt.Println("acquired lock for s2") - }) - - // Output: - // acquired lock for s1 - // released lock for s1 - // acquired lock for s2 -} diff --git a/tests/integration/clientv3/concurrency/example_stm_test.go b/tests/integration/clientv3/concurrency/example_stm_test.go deleted file mode 100644 index 1f39e5c278f..00000000000 --- a/tests/integration/clientv3/concurrency/example_stm_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "fmt" - "log" - "math/rand" - "sync" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" -) - -func mockSTM_apply() { - fmt.Println("account sum is 500") -} - -// ExampleSTM_apply shows how to use STM with a transactional -// transfer between balances. -func ExampleSTM_apply() { - forUnitTestsRunInMockedContext( - mockSTM_apply, - func() { - cli, err := clientv3.New(clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // set up "accounts" - totalAccounts := 5 - for i := 0; i < totalAccounts; i++ { - k := fmt.Sprintf("accts/%d", i) - if _, err = cli.Put(context.TODO(), k, "100"); err != nil { - log.Fatal(err) - } - } - - exchange := func(stm concurrency.STM) error { - from, to := rand.Intn(totalAccounts), rand.Intn(totalAccounts) - if from == to { - // nothing to do - return nil - } - // read values - fromK, toK := fmt.Sprintf("accts/%d", from), fmt.Sprintf("accts/%d", to) - fromV, toV := stm.Get(fromK), stm.Get(toK) - fromInt, toInt := 0, 0 - fmt.Sscanf(fromV, "%d", &fromInt) - fmt.Sscanf(toV, "%d", &toInt) - - // transfer amount - xfer := fromInt / 2 - fromInt, toInt = fromInt-xfer, toInt+xfer - - // write back - stm.Put(fromK, fmt.Sprintf("%d", fromInt)) - stm.Put(toK, fmt.Sprintf("%d", toInt)) - return nil - } - - // concurrently exchange values between accounts - var wg sync.WaitGroup - wg.Add(10) - for i := 0; i < 10; i++ { - go func() { - defer wg.Done() - if _, serr := concurrency.NewSTM(cli, exchange); serr != nil { - log.Fatal(serr) - } - }() - } - wg.Wait() - - // confirm account sum matches sum from beginning. - sum := 0 - accts, err := cli.Get(context.TODO(), "accts/", clientv3.WithPrefix()) - if err != nil { - log.Fatal(err) - } - for _, kv := range accts.Kvs { - v := 0 - fmt.Sscanf(string(kv.Value), "%d", &v) - sum += v - } - - fmt.Println("account sum is", sum) - }) - // Output: - // account sum is 500 -} diff --git a/tests/integration/clientv3/concurrency/main_test.go b/tests/integration/clientv3/concurrency/main_test.go deleted file mode 100644 index 62a6e73a03e..00000000000 --- a/tests/integration/clientv3/concurrency/main_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "os" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/tests/v3/integration" -) - -var lazyCluster = integration.NewLazyCluster() - -func exampleEndpoints() []string { return lazyCluster.EndpointsV3() } - -func forUnitTestsRunInMockedContext(mocking func(), example func()) { - // For integration tests runs in the provided environment - example() -} - -// TestMain sets up an etcd cluster if running the examples. -func TestMain(m *testing.M) { - cleanup := testutil.BeforeIntegrationExamples(m) - - v := m.Run() - lazyCluster.Terminate() - if v == 0 { - testutil.MustCheckLeakedGoroutine() - } - cleanup() - os.Exit(v) -} diff --git a/tests/integration/clientv3/concurrency/mutex_test.go b/tests/integration/clientv3/concurrency/mutex_test.go deleted file mode 100644 index 9313926fa8d..00000000000 --- a/tests/integration/clientv3/concurrency/mutex_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "errors" - "testing" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMutexLockSessionExpired(t *testing.T) { - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // create two separate sessions for lock competition - s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } - defer s1.Close() - m1 := concurrency.NewMutex(s1, "/my-lock/") - - s2, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } - m2 := concurrency.NewMutex(s2, "/my-lock/") - - // acquire lock for s1 - if err := m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } - - m2Locked := make(chan struct{}) - var err2 error - go func() { - defer close(m2Locked) - // m2 blocks since m1 already acquired lock /my-lock/ - if err2 = m2.Lock(context.TODO()); err2 == nil { - t.Error("expect session expired error") - } - }() - - // revoke the session of m2 before unlock m1 - err = s2.Close() - if err != nil { - t.Fatal(err) - } - if err := m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } - - <-m2Locked -} - -func TestMutexUnlock(t *testing.T) { - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - s1, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } - defer s1.Close() - - m1 := concurrency.NewMutex(s1, "/my-lock/") - err = m1.Unlock(context.TODO()) - if err == nil { - t.Fatal("expect lock released error") - } - if !errors.Is(err, concurrency.ErrLockReleased) { - t.Fatal(err) - } - - if err := m1.Lock(context.TODO()); err != nil { - t.Fatal(err) - } - - if err := m1.Unlock(context.TODO()); err != nil { - t.Fatal(err) - } - - err = m1.Unlock(context.TODO()) - if err == nil { - t.Fatal("expect lock released error") - } - if !errors.Is(err, concurrency.ErrLockReleased) { - t.Fatal(err) - } -} diff --git a/tests/integration/clientv3/concurrency/session_test.go b/tests/integration/clientv3/concurrency/session_test.go deleted file mode 100644 index d1ca413200d..00000000000 --- a/tests/integration/clientv3/concurrency/session_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package concurrency_test - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestSessionOptions(t *testing.T) { - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - lease, err := cli.Grant(context.Background(), 100) - if err != nil { - t.Fatal(err) - } - s, err := concurrency.NewSession(cli, concurrency.WithLease(lease.ID)) - if err != nil { - t.Fatal(err) - } - defer s.Close() - assert.Equal(t, s.Lease(), lease.ID) - - go s.Orphan() - select { - case <-s.Done(): - case <-time.After(time.Millisecond * 100): - t.Fatal("session did not get orphaned as expected") - } - -} -func TestSessionTTLOptions(t *testing.T) { - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: exampleEndpoints()}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - var setTTL int = 90 - s, err := concurrency.NewSession(cli, concurrency.WithTTL(setTTL)) - if err != nil { - t.Fatal(err) - } - defer s.Close() - - leaseId := s.Lease() - // TTL retrieved should be less than the set TTL, but not equal to default:60 or exprired:-1 - resp, err := cli.Lease.TimeToLive(context.Background(), leaseId) - if err != nil { - t.Log(err) - } - if resp.TTL == -1 { - t.Errorf("client lease should not be expired: %d", resp.TTL) - - } - if resp.TTL == 60 { - t.Errorf("default TTL value is used in the session, instead of set TTL: %d", setTTL) - } - if resp.TTL >= int64(setTTL) || resp.TTL < int64(setTTL)-20 { - t.Errorf("Session TTL from lease should be less, but close to set TTL %d, have: %d", setTTL, resp.TTL) - } - -} diff --git a/tests/integration/clientv3/connectivity/black_hole_test.go b/tests/integration/clientv3/connectivity/black_hole_test.go deleted file mode 100644 index 0a9ad3318aa..00000000000 --- a/tests/integration/clientv3/connectivity/black_hole_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package connectivity_test - -import ( - "context" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" - "google.golang.org/grpc" -) - -// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to -// blackholed endpoint, client balancer switches to healthy one. -// TODO: test server-to-client keepalive ping -func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 2, - GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings - UseBridge: true, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()} - - ccfg := clientv3.Config{ - Endpoints: []string{eps[0]}, - DialTimeout: time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - DialKeepAliveTime: time.Second, - DialKeepAliveTimeout: 500 * time.Millisecond, - } - - // gRPC internal implementation related. - pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout - // 3s for slow machine to process watch and reset connections - // TODO: only send healthy endpoint to gRPC so gRPC wont waste time to - // dial for unhealthy endpoint. - // then we can reduce 3s to 1s. - timeout := pingInterval + integration2.RequestWaitTimeout - - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify()) - if _, ok := <-wch; !ok { - t.Fatalf("watch failed on creation") - } - - // endpoint can switch to eps[1] when it detects the failure of eps[0] - cli.SetEndpoints(eps...) - - // give enough time for balancer resolution - time.Sleep(5 * time.Second) - - clus.Members[0].Bridge().Blackhole() - - if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - select { - case <-wch: - case <-time.After(timeout): - t.Error("took too long to receive watch events") - } - - clus.Members[0].Bridge().Unblackhole() - - // waiting for moving eps[0] out of unhealthy, so that it can be re-pined. - time.Sleep(ccfg.DialTimeout) - - clus.Members[1].Bridge().Blackhole() - - // make sure client[0] can connect to eps[0] after remove the blackhole. - if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil { - t.Fatal(err) - } - - select { - case <-wch: - case <-time.After(timeout): - t.Error("took too long to receive watch events") - } -} - -func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) { - testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Put(ctx, "foo", "bar") - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout { - return errExpected - } - return err - }) -} - -func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) { - testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Delete(ctx, "foo") - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout { - return errExpected - } - return err - }) -} - -func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) { - testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Txn(ctx). - If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). - Then(clientv3.OpPut("foo", "bar")). - Else(clientv3.OpPut("foo", "baz")).Commit() - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout { - return errExpected - } - return err - }) -} - -func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) { - testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "a") - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout { - return errExpected - } - return err - }) -} - -func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) { - testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "a", clientv3.WithSerializable()) - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) { - return errExpected - } - return err - }) -} - -// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint -// fails due to context timeout, but succeeds on next try, with endpoint switch. -func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 2, - UseBridge: true, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()} - - ccfg := clientv3.Config{ - Endpoints: []string{eps[0]}, - DialTimeout: 1 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // wait for eps[0] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - cli.SetEndpoints(eps...) - - // blackhole eps[0] - clus.Members[0].Bridge().Blackhole() - - // With round robin balancer, client will make a request to a healthy endpoint - // within a few requests. - // TODO: first operation can succeed - // when gRPC supports better retry on non-delivered request - for i := 0; i < 5; i++ { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - err = op(cli, ctx) - cancel() - if err == nil { - break - } else if err == errExpected { - t.Logf("#%d: current error %v", i, err) - } else { - t.Errorf("#%d: failed with error %v", i, err) - } - } - if err != nil { - t.Fatal(err) - } -} diff --git a/tests/integration/clientv3/connectivity/dial_test.go b/tests/integration/clientv3/connectivity/dial_test.go deleted file mode 100644 index 4f9f7784f45..00000000000 --- a/tests/integration/clientv3/connectivity/dial_test.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connectivity_test - -import ( - "context" - "math/rand" - "strings" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "go.etcd.io/etcd/tests/v3/framework/testutils" - clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" - "google.golang.org/grpc" -) - -var ( - testTLSInfo = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../../../fixtures/server.key.insecure"), - CertFile: testutils.MustAbsPath("../../../fixtures/server.crt"), - TrustedCAFile: testutils.MustAbsPath("../../../fixtures/ca.crt"), - ClientCertAuth: true, - } - - testTLSInfoExpired = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../../fixtures-expired/server.key.insecure"), - CertFile: testutils.MustAbsPath("../../fixtures-expired/server.crt"), - TrustedCAFile: testutils.MustAbsPath("../../fixtures-expired/ca.crt"), - ClientCertAuth: true, - } -) - -// TestDialTLSExpired tests client with expired certs fails to dial. -func TestDialTLSExpired(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo}) - defer clus.Terminate(t) - - tls, err := testTLSInfoExpired.ClientConfig() - if err != nil { - t.Fatal(err) - } - // expect remote errors "tls: bad certificate" - _, err = integration2.NewClient(t, clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: 3 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - TLS: tls, - }) - if !clientv3test.IsClientTimeout(err) { - t.Fatalf("expected dial timeout error, got %v", err) - } -} - -// TestDialTLSNoConfig ensures the client fails to dial / times out -// when TLS endpoints (https, unixs) are given but no tls config. -func TestDialTLSNoConfig(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo}) - defer clus.Terminate(t) - // expect "signed by unknown authority" - c, err := integration2.NewClient(t, clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - }) - defer func() { - if c != nil { - c.Close() - } - }() - if !clientv3test.IsClientTimeout(err) { - t.Fatalf("expected dial timeout error, got %v", err) - } -} - -// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable -// endpoints with available ones. -func TestDialSetEndpointsBeforeFail(t *testing.T) { - testDialSetEndpoints(t, true) -} - -func TestDialSetEndpointsAfterFail(t *testing.T) { - testDialSetEndpoints(t, false) -} - -// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. -func testDialSetEndpoints(t *testing.T, setBefore bool) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // get endpoint list - eps := make([]string, 3) - for i := range eps { - eps[i] = clus.Members[i].GRPCURL() - } - toKill := rand.Intn(len(eps)) - - cfg := clientv3.Config{ - Endpoints: []string{eps[toKill]}, - DialTimeout: 1 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - if setBefore { - cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3]) - } - // make a dead node - clus.Members[toKill].Stop(t) - clus.WaitLeader(t) - - if !setBefore { - cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3]) - } - time.Sleep(time.Second * 2) - ctx, cancel := context.WithTimeout(context.Background(), integration2.RequestWaitTimeout) - if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil { - t.Fatal(err) - } - cancel() -} - -// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint -// with a new one that doesn't include original endpoint. -func TestSwitchSetEndpoints(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // get non partitioned members endpoints - eps := []string{clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - cli := clus.Client(0) - clus.Members[0].InjectPartition(t, clus.Members[1:]...) - - cli.SetEndpoints(eps...) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if _, err := cli.Get(ctx, "foo"); err != nil { - t.Fatal(err) - } -} - -func TestRejectOldCluster(t *testing.T) { - integration2.BeforeTest(t) - // 2 endpoints to test multi-endpoint Status - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - - cfg := clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - RejectOldCluster: true, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - cli.Close() -} - -// TestDialForeignEndpoint checks an endpoint that is not registered -// with the balancer can be dialed. -func TestDialForeignEndpoint(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - - conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0]) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - // grpc can return a lazy connection that's not connected yet; confirm - // that it can communicate with the cluster. - kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0)) - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer cancel() - if _, gerr := kvc.Get(ctx, "abc"); gerr != nil { - t.Fatal(err) - } -} - -// TestSetEndpointAndPut checks that a Put following a SetEndpoints -// to a working endpoint will always succeed. -func TestSetEndpointAndPut(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - - clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL()) - _, err := clus.Client(1).Put(context.TODO(), "foo", "bar") - if err != nil && !strings.Contains(err.Error(), "closing") { - t.Fatal(err) - } -} diff --git a/tests/integration/clientv3/connectivity/doc.go b/tests/integration/clientv3/connectivity/doc.go deleted file mode 100644 index c90413b44d9..00000000000 --- a/tests/integration/clientv3/connectivity/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connectivity diff --git a/tests/integration/clientv3/connectivity/main_test.go b/tests/integration/clientv3/connectivity/main_test.go deleted file mode 100644 index 39a188823da..00000000000 --- a/tests/integration/clientv3/connectivity/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package connectivity - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/tests/integration/clientv3/connectivity/network_partition_test.go b/tests/integration/clientv3/connectivity/network_partition_test.go deleted file mode 100644 index 5b9cc602e15..00000000000 --- a/tests/integration/clientv3/connectivity/network_partition_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package connectivity_test - -import ( - "context" - "errors" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" - "google.golang.org/grpc" -) - -var errExpected = errors.New("expected error") - -func isErrorExpected(err error) bool { - return clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || - err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail -} - -// TestBalancerUnderNetworkPartitionPut tests when one member becomes isolated, -// first Put request fails, and following retry succeeds with client balancer -// switching to others. -func TestBalancerUnderNetworkPartitionPut(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Put(ctx, "a", "b") - if isErrorExpected(err) { - return errExpected - } - return err - }, time.Second) -} - -func TestBalancerUnderNetworkPartitionDelete(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Delete(ctx, "a") - if isErrorExpected(err) { - return errExpected - } - return err - }, time.Second) -} - -func TestBalancerUnderNetworkPartitionTxn(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Txn(ctx). - If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). - Then(clientv3.OpPut("foo", "bar")). - Else(clientv3.OpPut("foo", "baz")).Commit() - if isErrorExpected(err) { - return errExpected - } - return err - }, time.Second) -} - -// TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout tests -// when one member becomes isolated, first quorum Get request succeeds -// by switching endpoints within the timeout (long enough to cover endpoint switch). -func TestBalancerUnderNetworkPartitionLinearizableGetWithLongTimeout(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "a") - if isErrorExpected(err) { - return errExpected - } - return err - }, 7*time.Second) -} - -// TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout tests -// when one member becomes isolated, first quorum Get request fails, -// and following retry succeeds with client balancer switching to others. -func TestBalancerUnderNetworkPartitionLinearizableGetWithShortTimeout(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "a") - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) { - return errExpected - } - return err - }, time.Second) -} - -func TestBalancerUnderNetworkPartitionSerializableGet(t *testing.T) { - testBalancerUnderNetworkPartition(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "a", clientv3.WithSerializable()) - return err - }, time.Second) -} - -func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - // expect pin eps[0] - ccfg := clientv3.Config{ - Endpoints: []string{eps[0]}, - DialTimeout: 3 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - // wait for eps[0] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add other endpoints for later endpoint switch - cli.SetEndpoints(eps...) - time.Sleep(time.Second * 2) - clus.Members[0].InjectPartition(t, clus.Members[1:]...) - - for i := 0; i < 5; i++ { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - err = op(cli, ctx) - t.Logf("Op returned error: %v", err) - t.Log("Cancelling...") - cancel() - if err == nil { - break - } - if err != errExpected { - t.Errorf("#%d: expected '%v', got '%v'", i, errExpected, err) - } - // give enough time for endpoint switch - // TODO: remove random sleep by syncing directly with balancer - if i == 0 { - time.Sleep(5 * time.Second) - } - } - if err != nil { - t.Errorf("balancer did not switch in time (%v)", err) - } -} - -// TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection ensures balancer -// switches endpoint when leader fails and linearizable get requests returns -// "etcdserver: request timed out". -func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - lead := clus.WaitLeader(t) - - timeout := 3 * clus.Members[(lead+1)%2].ServerConfig.ReqTimeout() - - cli, err := integration2.NewClient(t, clientv3.Config{ - Endpoints: []string{eps[(lead+1)%2]}, - DialTimeout: 2 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - }) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - cli.SetEndpoints(eps[lead], eps[(lead+1)%2]) - - // isolate leader - clus.Members[lead].InjectPartition(t, clus.Members[(lead+1)%3], clus.Members[(lead+2)%3]) - - // expects balancer to round robin to leader within two attempts - for i := 0; i < 2; i++ { - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - _, err = cli.Get(ctx, "a") - cancel() - if err == nil { - break - } - } - if err != nil { - t.Fatal(err) - } -} - -func TestBalancerUnderNetworkPartitionWatchLeader(t *testing.T) { - testBalancerUnderNetworkPartitionWatch(t, true) -} - -func TestBalancerUnderNetworkPartitionWatchFollower(t *testing.T) { - testBalancerUnderNetworkPartitionWatch(t, false) -} - -// testBalancerUnderNetworkPartitionWatch ensures watch stream -// to a partitioned node be closed when context requires leader. -func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - target := clus.WaitLeader(t) - if !isolateLeader { - target = (target + 1) % 3 - } - - // pin eps[target] - watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) - if err != nil { - t.Fatal(err) - } - t.Logf("watchCli created to: %v", target) - defer watchCli.Close() - - // wait for eps[target] to be connected - clientv3test.MustWaitPinReady(t, watchCli) - t.Logf("successful connection with server: %v", target) - - // We stick to the original endpoint, so when the one fails we don't switch - // under the cover to other available eps, but expose the failure to the - // caller (test assertion). - - wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify()) - select { - case <-wch: - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("took too long to create watch") - } - - t.Logf("watch established") - - // isolate eps[target] - clus.Members[target].InjectPartition(t, - clus.Members[(target+1)%3], - clus.Members[(target+2)%3], - ) - - select { - case ev := <-wch: - if len(ev.Events) != 0 { - t.Fatal("expected no event") - } - if err = ev.Err(); err != rpctypes.ErrNoLeader { - t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err) - } - case <-time.After(integration2.RequestWaitTimeout): // enough time to detect leader lost - t.Fatal("took too long to detect leader lost") - } -} - -func TestDropReadUnderNetworkPartition(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - leaderIndex := clus.WaitLeader(t) - // get a follower endpoint - eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL()} - ccfg := clientv3.Config{ - Endpoints: eps, - DialTimeout: 10 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // wait for eps[0] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add other endpoints for later endpoint switch - cli.SetEndpoints(eps...) - time.Sleep(time.Second * 2) - conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL()) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - clus.Members[leaderIndex].InjectPartition(t, clus.Members[(leaderIndex+1)%3], clus.Members[(leaderIndex+2)%3]) - kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), nil) - ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) - _, err = kvc.Get(ctx, "a") - cancel() - if err != rpctypes.ErrLeaderChanged { - t.Fatalf("expected %v, got %v", rpctypes.ErrLeaderChanged, err) - } - - for i := 0; i < 5; i++ { - ctx, cancel = context.WithTimeout(context.TODO(), 10*time.Second) - _, err = kvc.Get(ctx, "a") - cancel() - if err != nil { - if err == rpctypes.ErrTimeout { - <-time.After(time.Second) - i++ - continue - } - t.Fatalf("expected nil or timeout, got %v", err) - } - // No error returned and no retry required - break - } -} diff --git a/tests/integration/clientv3/connectivity/server_shutdown_test.go b/tests/integration/clientv3/connectivity/server_shutdown_test.go deleted file mode 100644 index 0da7b9d1682..00000000000 --- a/tests/integration/clientv3/connectivity/server_shutdown_test.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package connectivity_test - -import ( - "bytes" - "context" - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" -) - -// TestBalancerUnderServerShutdownWatch expects that watch client -// switch its endpoints when the member of the pinned endpoint fails. -func TestBalancerUnderServerShutdownWatch(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - UseBridge: true, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - lead := clus.WaitLeader(t) - - // pin eps[lead] - watchCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[lead]}}) - if err != nil { - t.Fatal(err) - } - defer watchCli.Close() - - // wait for eps[lead] to be pinned - clientv3test.MustWaitPinReady(t, watchCli) - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - watchCli.SetEndpoints(eps...) - - key, val := "foo", "bar" - wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify()) - select { - case <-wch: - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("took too long to create watch") - } - - donec := make(chan struct{}) - go func() { - defer close(donec) - - // switch to others when eps[lead] is shut down - select { - case ev := <-wch: - if werr := ev.Err(); werr != nil { - t.Error(werr) - } - if len(ev.Events) != 1 { - t.Errorf("expected one event, got %+v", ev) - } - if !bytes.Equal(ev.Events[0].Kv.Value, []byte(val)) { - t.Errorf("expected %q, got %+v", val, ev.Events[0].Kv) - } - case <-time.After(7 * time.Second): - t.Error("took too long to receive events") - } - }() - - // shut down eps[lead] - clus.Members[lead].Terminate(t) - - // writes to eps[lead+1] - putCli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[(lead+1)%3]}}) - if err != nil { - t.Fatal(err) - } - defer putCli.Close() - for { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - _, err = putCli.Put(ctx, key, val) - cancel() - if err == nil { - break - } - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail { - continue - } - t.Fatal(err) - } - - select { - case <-donec: - case <-time.After(5 * time.Second): // enough time for balancer switch - t.Fatal("took too long to receive events") - } -} - -func TestBalancerUnderServerShutdownPut(t *testing.T) { - testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Put(ctx, "foo", "bar") - return err - }) -} - -func TestBalancerUnderServerShutdownDelete(t *testing.T) { - testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Delete(ctx, "foo") - return err - }) -} - -func TestBalancerUnderServerShutdownTxn(t *testing.T) { - testBalancerUnderServerShutdownMutable(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Txn(ctx). - If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). - Then(clientv3.OpPut("foo", "bar")). - Else(clientv3.OpPut("foo", "baz")).Commit() - return err - }) -} - -// testBalancerUnderServerShutdownMutable expects that when the member of -// the pinned endpoint is shut down, the balancer switches its endpoints -// and all subsequent put/delete/txn requests succeed with new endpoints. -func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Client, context.Context) error) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - // pin eps[0] - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // wait for eps[0] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - cli.SetEndpoints(eps...) - - // shut down eps[0] - clus.Members[0].Terminate(t) - - // switched to others when eps[0] was explicitly shut down - // and following request should succeed - // TODO: remove this (expose client connection state?) - time.Sleep(time.Second) - - cctx, ccancel := context.WithTimeout(context.Background(), time.Second) - err = op(cli, cctx) - ccancel() - if err != nil { - t.Fatal(err) - } -} - -func TestBalancerUnderServerShutdownGetLinearizable(t *testing.T) { - testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "foo") - return err - }, 7*time.Second) // give enough time for leader election, balancer switch -} - -func TestBalancerUnderServerShutdownGetSerializable(t *testing.T) { - testBalancerUnderServerShutdownImmutable(t, func(cli *clientv3.Client, ctx context.Context) error { - _, err := cli.Get(ctx, "foo", clientv3.WithSerializable()) - return err - }, 2*time.Second) -} - -// testBalancerUnderServerShutdownImmutable expects that when the member of -// the pinned endpoint is shut down, the balancer switches its endpoints -// and all subsequent range requests succeed with new endpoints. -func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Client, context.Context) error, timeout time.Duration) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{ - Size: 3, - }) - defer clus.Terminate(t) - - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()} - - // pin eps[0] - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}}) - if err != nil { - t.Errorf("failed to create client: %v", err) - } - defer cli.Close() - - // wait for eps[0] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - cli.SetEndpoints(eps...) - - // shut down eps[0] - clus.Members[0].Terminate(t) - - // switched to others when eps[0] was explicitly shut down - // and following request should succeed - cctx, ccancel := context.WithTimeout(context.Background(), timeout) - err = op(cli, cctx) - ccancel() - if err != nil { - t.Errorf("failed to finish range request in time %v (timeout %v)", err, timeout) - } -} - -func TestBalancerUnderServerStopInflightLinearizableGetOnRestart(t *testing.T) { - tt := []pinTestOpt{ - {pinLeader: true, stopPinFirst: true}, - {pinLeader: true, stopPinFirst: false}, - {pinLeader: false, stopPinFirst: true}, - {pinLeader: false, stopPinFirst: false}, - } - for _, w := range tt { - t.Run(fmt.Sprintf("%#v", w), func(t *testing.T) { - testBalancerUnderServerStopInflightRangeOnRestart(t, true, w) - }) - } -} - -func TestBalancerUnderServerStopInflightSerializableGetOnRestart(t *testing.T) { - tt := []pinTestOpt{ - {pinLeader: true, stopPinFirst: true}, - {pinLeader: true, stopPinFirst: false}, - {pinLeader: false, stopPinFirst: true}, - {pinLeader: false, stopPinFirst: false}, - } - for _, w := range tt { - t.Run(fmt.Sprintf("%#v", w), func(t *testing.T) { - testBalancerUnderServerStopInflightRangeOnRestart(t, false, w) - }) - } -} - -type pinTestOpt struct { - pinLeader bool - stopPinFirst bool -} - -// testBalancerUnderServerStopInflightRangeOnRestart expects -// inflight range request reconnects on server restart. -func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizable bool, opt pinTestOpt) { - integration2.BeforeTest(t) - - cfg := &integration2.ClusterConfig{ - Size: 2, - UseBridge: true, - } - if linearizable { - cfg.Size = 3 - } - - clus := integration2.NewCluster(t, cfg) - defer clus.Terminate(t) - eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()} - if linearizable { - eps = append(eps, clus.Members[2].GRPCURL()) - } - - lead := clus.WaitLeader(t) - - target := lead - if !opt.pinLeader { - target = (target + 1) % 2 - } - - // pin eps[target] - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{eps[target]}}) - if err != nil { - t.Errorf("failed to create client: %v", err) - } - defer cli.Close() - - // wait for eps[target] to be pinned - clientv3test.MustWaitPinReady(t, cli) - - // add all eps to list, so that when the original pined one fails - // the client can switch to other available eps - cli.SetEndpoints(eps...) - - if opt.stopPinFirst { - clus.Members[target].Stop(t) - // give some time for balancer switch before stopping the other - time.Sleep(time.Second) - clus.Members[(target+1)%2].Stop(t) - } else { - clus.Members[(target+1)%2].Stop(t) - // balancer cannot pin other member since it's already stopped - clus.Members[target].Stop(t) - } - - // 3-second is the minimum interval between endpoint being marked - // as unhealthy and being removed from unhealthy, so possibly - // takes >5-second to unpin and repin an endpoint - // TODO: decrease timeout when balancer switch rewrite - clientTimeout := 7 * time.Second - - var gops []clientv3.OpOption - if !linearizable { - gops = append(gops, clientv3.WithSerializable()) - } - - donec, readyc := make(chan struct{}), make(chan struct{}, 1) - go func() { - defer close(donec) - ctx, cancel := context.WithTimeout(context.TODO(), clientTimeout) - readyc <- struct{}{} - - // TODO: The new grpc load balancer will not pin to an endpoint - // as intended by this test. But it will round robin member within - // two attempts. - // Remove retry loop once the new grpc load balancer provides retry. - for i := 0; i < 2; i++ { - _, err = cli.Get(ctx, "abc", gops...) - if err == nil { - break - } - } - cancel() - if err != nil { - t.Errorf("unexpected error: %v", err) - } - }() - - <-readyc - clus.Members[target].Restart(t) - - select { - case <-time.After(clientTimeout + integration2.RequestWaitTimeout): - t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt) - case <-donec: - } -} diff --git a/tests/integration/clientv3/doc.go b/tests/integration/clientv3/doc.go deleted file mode 100644 index 3ebfdc95882..00000000000 --- a/tests/integration/clientv3/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3test implements tests built upon embedded etcd, and focuses on -// correctness of etcd client. -package clientv3test diff --git a/tests/integration/clientv3/examples/example_auth_test.go b/tests/integration/clientv3/examples/example_auth_test.go deleted file mode 100644 index b062d799cc0..00000000000 --- a/tests/integration/clientv3/examples/example_auth_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockAuth() { - fmt.Println(`etcdserver: permission denied`) - fmt.Println(`user u permission: key "foo", range end "zoo"`) -} - -func ExampleAuth() { - forUnitTestsRunInMockedContext( - mockAuth, - func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - if _, err = cli.RoleAdd(context.TODO(), "root"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserAdd(context.TODO(), "root", "123"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserGrantRole(context.TODO(), "root", "root"); err != nil { - log.Fatal(err) - } - - if _, err = cli.RoleAdd(context.TODO(), "r"); err != nil { - log.Fatal(err) - } - - if _, err = cli.RoleGrantPermission( - context.TODO(), - "r", // role name - "foo", // key - "zoo", // range end - clientv3.PermissionType(clientv3.PermReadWrite), - ); err != nil { - log.Fatal(err) - } - if _, err = cli.UserAdd(context.TODO(), "u", "123"); err != nil { - log.Fatal(err) - } - if _, err = cli.UserGrantRole(context.TODO(), "u", "r"); err != nil { - log.Fatal(err) - } - if _, err = cli.AuthEnable(context.TODO()); err != nil { - log.Fatal(err) - } - - cliAuth, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - Username: "u", - Password: "123", - }) - if err != nil { - log.Fatal(err) - } - defer cliAuth.Close() - - if _, err = cliAuth.Put(context.TODO(), "foo1", "bar"); err != nil { - log.Fatal(err) - } - - _, err = cliAuth.Txn(context.TODO()). - If(clientv3.Compare(clientv3.Value("zoo1"), ">", "abc")). - Then(clientv3.OpPut("zoo1", "XYZ")). - Else(clientv3.OpPut("zoo1", "ABC")). - Commit() - fmt.Println(err) - - // now check the permission with the root account - rootCli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - Username: "root", - Password: "123", - }) - if err != nil { - log.Fatal(err) - } - defer rootCli.Close() - - resp, err := rootCli.RoleGet(context.TODO(), "r") - if err != nil { - log.Fatal(err) - } - fmt.Printf("user u permission: key %q, range end %q\n", resp.Perm[0].Key, resp.Perm[0].RangeEnd) - - if _, err = rootCli.AuthDisable(context.TODO()); err != nil { - log.Fatal(err) - } - }) - // Output: etcdserver: permission denied - // user u permission: key "foo", range end "zoo" -} diff --git a/tests/integration/clientv3/examples/example_cluster_test.go b/tests/integration/clientv3/examples/example_cluster_test.go deleted file mode 100644 index 1d2da78c777..00000000000 --- a/tests/integration/clientv3/examples/example_cluster_test.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockCluster_memberList() { - fmt.Println("members: 3") -} - -func ExampleCluster_memberList() { - forUnitTestsRunInMockedContext(mockCluster_memberList, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.MemberList(context.Background()) - if err != nil { - log.Fatal(err) - } - fmt.Println("members:", len(resp.Members)) - }) - // Output: members: 3 -} - -func mockCluster_memberAdd() { - fmt.Println("added member.PeerURLs: [http://localhost:32380]") - fmt.Println("members count: 4") -} - -func ExampleCluster_memberAdd() { - forUnitTestsRunInMockedContext(mockCluster_memberAdd, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // Add member 1: - mresp, err := cli.MemberAdd(context.Background(), []string{"http://localhost:32380"}) - if err != nil { - log.Fatal(err) - } - fmt.Println("added member.PeerURLs:", mresp.Member.PeerURLs) - fmt.Println("members count:", len(mresp.Members)) - - // Restore original cluster state - _, err = cli.MemberRemove(context.Background(), mresp.Member.ID) - if err != nil { - log.Fatal(err) - } - }) - // Output: - // added member.PeerURLs: [http://localhost:32380] - // members count: 4 -} - -func mockCluster_memberAddAsLearner() { - fmt.Println("members count: 4") - fmt.Println("added member.IsLearner: true") -} - -func ExampleCluster_memberAddAsLearner() { - forUnitTestsRunInMockedContext(mockCluster_memberAddAsLearner, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - mresp, err := cli.MemberAddAsLearner(context.Background(), []string{"http://localhost:32381"}) - if err != nil { - log.Fatal(err) - } - - // Restore original cluster state - _, err = cli.MemberRemove(context.Background(), mresp.Member.ID) - if err != nil { - log.Fatal(err) - } - - fmt.Println("members count:", len(mresp.Members)) - fmt.Println("added member.IsLearner:", mresp.Member.IsLearner) - }) - // Output: - // members count: 4 - // added member.IsLearner: true -} - -func mockCluster_memberRemove() {} - -func ExampleCluster_memberRemove() { - forUnitTestsRunInMockedContext(mockCluster_memberRemove, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.MemberList(context.Background()) - if err != nil { - log.Fatal(err) - } - - _, err = cli.MemberRemove(context.Background(), resp.Members[0].ID) - if err != nil { - log.Fatal(err) - } - - // Restore original cluster: - _, err = cli.MemberAdd(context.Background(), resp.Members[0].PeerURLs) - if err != nil { - log.Fatal(err) - } - }) -} - -func mockCluster_memberUpdate() {} - -func ExampleCluster_memberUpdate() { - forUnitTestsRunInMockedContext(mockCluster_memberUpdate, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.MemberList(context.Background()) - if err != nil { - log.Fatal(err) - } - - peerURLs := []string{"http://localhost:12380"} - _, err = cli.MemberUpdate(context.Background(), resp.Members[0].ID, peerURLs) - if err != nil { - log.Fatal(err) - } - - // Restore to mitigate impact on other tests: - _, err = cli.MemberUpdate(context.Background(), resp.Members[0].ID, resp.Members[0].PeerURLs) - if err != nil { - log.Fatal(err) - } - }) - // Output: -} diff --git a/tests/integration/clientv3/examples/example_kv_test.go b/tests/integration/clientv3/examples/example_kv_test.go deleted file mode 100644 index 6bfb2428055..00000000000 --- a/tests/integration/clientv3/examples/example_kv_test.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "log" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockKV_put() {} - -func ExampleKV_put() { - forUnitTestsRunInMockedContext(mockKV_put, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err = cli.Put(ctx, "sample_key", "sample_value") - cancel() - if err != nil { - log.Fatal(err) - } - }) - // Output: -} - -func mockKV_putErrorHandling() { - fmt.Println("client-side error: etcdserver: key is not provided") -} - -func ExampleKV_putErrorHandling() { - forUnitTestsRunInMockedContext(mockKV_putErrorHandling, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err = cli.Put(ctx, "", "sample_value") - cancel() - if err != nil { - switch err { - case context.Canceled: - fmt.Printf("ctx is canceled by another routine: %v\n", err) - case context.DeadlineExceeded: - fmt.Printf("ctx is attached with a deadline is exceeded: %v\n", err) - case rpctypes.ErrEmptyKey: - fmt.Printf("client-side error: %v\n", err) - default: - fmt.Printf("bad cluster endpoints, which are not etcd servers: %v\n", err) - } - } - }) - // Output: client-side error: etcdserver: key is not provided -} - -func mockKV_get() { - fmt.Println("foo : bar") -} - -func ExampleKV_get() { - forUnitTestsRunInMockedContext(mockKV_get, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - _, err = cli.Put(context.TODO(), "foo", "bar") - if err != nil { - log.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - resp, err := cli.Get(ctx, "foo") - cancel() - if err != nil { - log.Fatal(err) - } - for _, ev := range resp.Kvs { - fmt.Printf("%s : %s\n", ev.Key, ev.Value) - } - }) - // Output: foo : bar -} - -func mockKV_getWithRev() { - fmt.Println("foo : bar1") -} - -func ExampleKV_getWithRev() { - forUnitTestsRunInMockedContext(mockKV_getWithRev, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - presp, err := cli.Put(context.TODO(), "foo", "bar1") - if err != nil { - log.Fatal(err) - } - _, err = cli.Put(context.TODO(), "foo", "bar2") - if err != nil { - log.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - resp, err := cli.Get(ctx, "foo", clientv3.WithRev(presp.Header.Revision)) - cancel() - if err != nil { - log.Fatal(err) - } - for _, ev := range resp.Kvs { - fmt.Printf("%s : %s\n", ev.Key, ev.Value) - } - }) - // Output: foo : bar1 -} - -func mockKV_getSortedPrefix() { - fmt.Println(`key_2 : value`) - fmt.Println(`key_1 : value`) - fmt.Println(`key_0 : value`) -} - -func ExampleKV_getSortedPrefix() { - forUnitTestsRunInMockedContext(mockKV_getSortedPrefix, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - for i := range make([]int, 3) { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err = cli.Put(ctx, fmt.Sprintf("key_%d", i), "value") - cancel() - if err != nil { - log.Fatal(err) - } - } - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - resp, err := cli.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend)) - cancel() - if err != nil { - log.Fatal(err) - } - for _, ev := range resp.Kvs { - fmt.Printf("%s : %s\n", ev.Key, ev.Value) - } - }) - // Output: - // key_2 : value - // key_1 : value - // key_0 : value -} - -func mockKV_delete() { - fmt.Println("Deleted all keys: true") -} - -func ExampleKV_delete() { - forUnitTestsRunInMockedContext(mockKV_delete, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - defer cancel() - - // count keys about to be deleted - gresp, err := cli.Get(ctx, "key", clientv3.WithPrefix()) - if err != nil { - log.Fatal(err) - } - - // delete the keys - dresp, err := cli.Delete(ctx, "key", clientv3.WithPrefix()) - if err != nil { - log.Fatal(err) - } - - fmt.Println("Deleted all keys:", int64(len(gresp.Kvs)) == dresp.Deleted) - }) - // Output: - // Deleted all keys: true -} - -func mockKV_compact() {} - -func ExampleKV_compact() { - forUnitTestsRunInMockedContext(mockKV_compact, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - resp, err := cli.Get(ctx, "foo") - cancel() - if err != nil { - log.Fatal(err) - } - compRev := resp.Header.Revision // specify compact revision of your choice - - ctx, cancel = context.WithTimeout(context.Background(), requestTimeout) - _, err = cli.Compact(ctx, compRev) - cancel() - if err != nil { - log.Fatal(err) - } - }) - // Output: -} - -func mockKV_txn() { - fmt.Println("key : XYZ") -} - -func ExampleKV_txn() { - forUnitTestsRunInMockedContext(mockKV_txn, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - kvc := clientv3.NewKV(cli) - - _, err = kvc.Put(context.TODO(), "key", "xyz") - if err != nil { - log.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err = kvc.Txn(ctx). - // txn value comparisons are lexical - If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). - // the "Then" runs, since "xyz" > "abc" - Then(clientv3.OpPut("key", "XYZ")). - // the "Else" does not run - Else(clientv3.OpPut("key", "ABC")). - Commit() - cancel() - if err != nil { - log.Fatal(err) - } - - gresp, err := kvc.Get(context.TODO(), "key") - if err != nil { - log.Fatal(err) - } - for _, ev := range gresp.Kvs { - fmt.Printf("%s : %s\n", ev.Key, ev.Value) - } - }) - // Output: key : XYZ -} - -func mockKV_do() {} - -func ExampleKV_do() { - forUnitTestsRunInMockedContext(mockKV_do, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - ops := []clientv3.Op{ - clientv3.OpPut("put-key", "123"), - clientv3.OpGet("put-key"), - clientv3.OpPut("put-key", "456")} - - for _, op := range ops { - if _, err := cli.Do(context.TODO(), op); err != nil { - log.Fatal(err) - } - } - }) - // Output: -} diff --git a/tests/integration/clientv3/examples/example_lease_test.go b/tests/integration/clientv3/examples/example_lease_test.go deleted file mode 100644 index beca3692bfa..00000000000 --- a/tests/integration/clientv3/examples/example_lease_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockLease_grant() { -} - -func ExampleLease_grant() { - forUnitTestsRunInMockedContext(mockLease_grant, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // minimum lease TTL is 5-second - resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - log.Fatal(err) - } - - // after 5 seconds, the key 'foo' will be removed - _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != nil { - log.Fatal(err) - } - }) - //Output: -} - -func mockLease_revoke() { - fmt.Println("number of keys: 0") -} - -func ExampleLease_revoke() { - forUnitTestsRunInMockedContext(mockLease_revoke, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - log.Fatal(err) - } - - _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != nil { - log.Fatal(err) - } - - // revoking lease expires the key attached to its lease ID - _, err = cli.Revoke(context.TODO(), resp.ID) - if err != nil { - log.Fatal(err) - } - - gresp, err := cli.Get(context.TODO(), "foo") - if err != nil { - log.Fatal(err) - } - fmt.Println("number of keys:", len(gresp.Kvs)) - }) - // Output: number of keys: 0 -} - -func mockLease_keepAlive() { - fmt.Println("ttl: 5") -} - -func ExampleLease_keepAlive() { - forUnitTestsRunInMockedContext(mockLease_keepAlive, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - log.Fatal(err) - } - - _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != nil { - log.Fatal(err) - } - - // the key 'foo' will be kept forever - ch, kaerr := cli.KeepAlive(context.TODO(), resp.ID) - if kaerr != nil { - log.Fatal(kaerr) - } - - ka := <-ch - if ka != nil { - fmt.Println("ttl:", ka.TTL) - } else { - fmt.Println("Unexpected NULL") - } - }) - // Output: ttl: 5 -} - -func mockLease_keepAliveOnce() { - fmt.Println("ttl: 5") -} - -func ExampleLease_keepAliveOnce() { - forUnitTestsRunInMockedContext(mockLease_keepAliveOnce, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - log.Fatal(err) - } - - _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != nil { - log.Fatal(err) - } - - // to renew the lease only once - ka, kaerr := cli.KeepAliveOnce(context.TODO(), resp.ID) - if kaerr != nil { - log.Fatal(kaerr) - } - - fmt.Println("ttl:", ka.TTL) - }) - // Output: ttl: 5 -} diff --git a/tests/integration/clientv3/examples/example_maintenance_test.go b/tests/integration/clientv3/examples/example_maintenance_test.go deleted file mode 100644 index ff545e8de7d..00000000000 --- a/tests/integration/clientv3/examples/example_maintenance_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "log" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockMaintenance_status() {} - -func ExampleMaintenance_status() { - forUnitTestsRunInMockedContext(mockMaintenance_status, func() { - for _, ep := range exampleEndpoints() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{ep}, - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - _, err = cli.Status(context.Background(), ep) - if err != nil { - log.Fatal(err) - } - } - }) - // Output: -} - -func mockMaintenance_defragment() {} - -func ExampleMaintenance_defragment() { - forUnitTestsRunInMockedContext(mockMaintenance_defragment, func() { - for _, ep := range exampleEndpoints() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{ep}, - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - if _, err = cli.Defragment(context.TODO(), ep); err != nil { - log.Fatal(err) - } - } - }) - // Output: -} diff --git a/tests/integration/clientv3/examples/example_metrics_test.go b/tests/integration/clientv3/examples/example_metrics_test.go deleted file mode 100644 index d21c6d393e2..00000000000 --- a/tests/integration/clientv3/examples/example_metrics_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "io" - "log" - "net" - "net/http" - "strings" - - grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - clientv3 "go.etcd.io/etcd/client/v3" - "google.golang.org/grpc" -) - -func mockClient_metrics() { - fmt.Println(`grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1`) -} - -func ExampleClient_metrics() { - forUnitTestsRunInMockedContext(mockClient_metrics, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialOptions: []grpc.DialOption{ - grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor), - grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), - }, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // get a key so it shows up in the metrics as a range RPC - cli.Get(context.TODO(), "test_key") - - // listen for all Prometheus metrics - ln, err := net.Listen("tcp", ":0") - if err != nil { - log.Fatal(err) - } - donec := make(chan struct{}) - go func() { - defer close(donec) - http.Serve(ln, promhttp.Handler()) - }() - defer func() { - ln.Close() - <-donec - }() - - // make an http request to fetch all Prometheus metrics - url := "http://" + ln.Addr().String() + "/metrics" - resp, err := http.Get(url) - if err != nil { - log.Fatalf("fetch error: %v", err) - } - b, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - log.Fatalf("fetch error: reading %s: %v", url, err) - } - - // confirm range request in metrics - for _, l := range strings.Split(string(b), "\n") { - if strings.Contains(l, `grpc_client_started_total{grpc_method="Range"`) { - fmt.Println(l) - break - } - } - }) - // Output: - // grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1 -} diff --git a/tests/integration/clientv3/examples/example_test.go b/tests/integration/clientv3/examples/example_test.go deleted file mode 100644 index b9b8be461e7..00000000000 --- a/tests/integration/clientv3/examples/example_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "log" - - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockConfig_insecure() {} - -func ExampleConfig_insecure() { - forUnitTestsRunInMockedContext(mockConfig_insecure, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() // make sure to close the client - - _, err = cli.Put(context.TODO(), "foo", "bar") - if err != nil { - log.Fatal(err) - } - }) - - // Without the line below the test is not being executed - - // Output: -} - -func mockConfig_withTLS() {} - -func ExampleConfig_withTLS() { - forUnitTestsRunInMockedContext(mockConfig_withTLS, func() { - tlsInfo := transport.TLSInfo{ - CertFile: "/tmp/test-certs/test-name-1.pem", - KeyFile: "/tmp/test-certs/test-name-1-key.pem", - TrustedCAFile: "/tmp/test-certs/trusted-ca.pem", - } - tlsConfig, err := tlsInfo.ClientConfig() - if err != nil { - log.Fatal(err) - } - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - TLS: tlsConfig, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() // make sure to close the client - - _, err = cli.Put(context.TODO(), "foo", "bar") - if err != nil { - log.Fatal(err) - } - }) - // Without the line below the test is not being executed - // Output: -} diff --git a/tests/integration/clientv3/examples/example_watch_test.go b/tests/integration/clientv3/examples/example_watch_test.go deleted file mode 100644 index ac44f8ca38d..00000000000 --- a/tests/integration/clientv3/examples/example_watch_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "context" - "fmt" - "log" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" -) - -func mockWatcher_watch() { - fmt.Println(`PUT "foo" : "bar"`) -} - -func ExampleWatcher_watch() { - forUnitTestsRunInMockedContext(mockWatcher_watch, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - rch := cli.Watch(context.Background(), "foo") - for wresp := range rch { - for _, ev := range wresp.Events { - fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value) - } - } - }) - // PUT "foo" : "bar" -} - -func mockWatcher_watchWithPrefix() { - fmt.Println(`PUT "foo1" : "bar"`) -} - -func ExampleWatcher_watchWithPrefix() { - forUnitTestsRunInMockedContext(mockWatcher_watchWithPrefix, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - rch := cli.Watch(context.Background(), "foo", clientv3.WithPrefix()) - for wresp := range rch { - for _, ev := range wresp.Events { - fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value) - } - } - }) - // PUT "foo1" : "bar" -} - -func mockWatcher_watchWithRange() { - fmt.Println(`PUT "foo1" : "bar1"`) - fmt.Println(`PUT "foo2" : "bar2"`) - fmt.Println(`PUT "foo3" : "bar3"`) -} - -func ExampleWatcher_watchWithRange() { - forUnitTestsRunInMockedContext(mockWatcher_watchWithRange, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - defer cli.Close() - - // watches within ['foo1', 'foo4'), in lexicographical order - rch := cli.Watch(context.Background(), "foo1", clientv3.WithRange("foo4")) - - go func() { - cli.Put(context.Background(), "foo1", "bar1") - cli.Put(context.Background(), "foo5", "bar5") - cli.Put(context.Background(), "foo2", "bar2") - cli.Put(context.Background(), "foo3", "bar3") - }() - - i := 0 - for wresp := range rch { - for _, ev := range wresp.Events { - fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value) - i++ - if i == 3 { - // After 3 messages we are done. - cli.Delete(context.Background(), "foo", clientv3.WithPrefix()) - cli.Close() - return - } - } - } - }) - - // Output: - // PUT "foo1" : "bar1" - // PUT "foo2" : "bar2" - // PUT "foo3" : "bar3" -} - -func mockWatcher_watchWithProgressNotify() { - fmt.Println(`wresp.IsProgressNotify: true`) -} - -func ExampleWatcher_watchWithProgressNotify() { - forUnitTestsRunInMockedContext(mockWatcher_watchWithProgressNotify, func() { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: exampleEndpoints(), - DialTimeout: dialTimeout, - }) - if err != nil { - log.Fatal(err) - } - - rch := cli.Watch(context.Background(), "foo", clientv3.WithProgressNotify()) - closedch := make(chan bool) - go func() { - // This assumes that cluster is configured with frequent WatchProgressNotifyInterval - // e.g. WatchProgressNotifyInterval: 200 * time.Millisecond. - time.Sleep(time.Second) - err := cli.Close() - if err != nil { - log.Fatal(err) - } - close(closedch) - }() - wresp := <-rch - fmt.Println("wresp.IsProgressNotify:", wresp.IsProgressNotify()) - <-closedch - }) - - // TODO: Rather wresp.IsProgressNotify: true should be expected - - // Output: - // wresp.IsProgressNotify: true -} diff --git a/tests/integration/clientv3/examples/main_test.go b/tests/integration/clientv3/examples/main_test.go deleted file mode 100644 index b88b15723a6..00000000000 --- a/tests/integration/clientv3/examples/main_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3_test - -import ( - "log" - "os" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "go.etcd.io/etcd/tests/v3/integration" -) - -const ( - dialTimeout = 5 * time.Second - requestTimeout = 10 * time.Second -) - -var lazyCluster = integration.NewLazyClusterWithConfig( - integration2.ClusterConfig{ - Size: 3, - WatchProgressNotifyInterval: 200 * time.Millisecond, - DisableStrictReconfigCheck: true}) - -func exampleEndpoints() []string { return lazyCluster.EndpointsV3() } - -func forUnitTestsRunInMockedContext(_ func(), example func()) { - // For integration tests runs in the provided environment - example() -} - -// TestMain sets up an etcd cluster if running the examples. -func TestMain(m *testing.M) { - testutil.ExitInShortMode("Skipping: the tests require real cluster") - - tempDir, err := os.MkdirTemp(os.TempDir(), "etcd-integration") - if err != nil { - log.Printf("Failed to obtain tempDir: %v", tempDir) - os.Exit(1) - } - defer os.RemoveAll(tempDir) - - err = os.Chdir(tempDir) - if err != nil { - log.Printf("Failed to change working dir to: %s: %v", tempDir, err) - os.Exit(1) - } - log.Printf("Running tests (examples) in dir(%v): ...", tempDir) - v := m.Run() - lazyCluster.Terminate() - - if v == 0 { - testutil.MustCheckLeakedGoroutine() - } - os.Exit(v) -} diff --git a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go deleted file mode 100644 index 36ed38e55cc..00000000000 --- a/tests/integration/clientv3/experimental/recipes/v3_barrier_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipes_test - -import ( - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestBarrierSingleNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - testBarrier(t, 5, func() *clientv3.Client { return clus.Client(0) }) -} - -func TestBarrierMultiNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() }) -} - -func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { - b := recipe.NewBarrier(chooseClient(), "test-barrier") - if err := b.Hold(); err != nil { - t.Fatalf("could not hold barrier (%v)", err) - } - if err := b.Hold(); err == nil { - t.Fatalf("able to double-hold barrier") - } - - donec := make(chan struct{}) - stopc := make(chan struct{}) - defer close(stopc) - - for i := 0; i < waiters; i++ { - go func() { - br := recipe.NewBarrier(chooseClient(), "test-barrier") - if err := br.Wait(); err != nil { - t.Errorf("could not wait on barrier (%v)", err) - } - select { - case donec <- struct{}{}: - case <-stopc: - } - - }() - } - - select { - case <-donec: - t.Fatalf("barrier did not wait") - default: - } - - if err := b.Release(); err != nil { - t.Fatalf("could not release barrier (%v)", err) - } - - timerC := time.After(time.Duration(waiters*100) * time.Millisecond) - for i := 0; i < waiters; i++ { - select { - case <-timerC: - t.Fatalf("barrier timed out") - case <-donec: - } - } -} diff --git a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go b/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go deleted file mode 100644 index 92ef058c5ff..00000000000 --- a/tests/integration/clientv3/experimental/recipes/v3_double_barrier_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipes_test - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestDoubleBarrier(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - waiters := 10 - session, err := concurrency.NewSession(clus.RandClient()) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - b := recipe.NewDoubleBarrier(session, "test-barrier", waiters) - donec := make(chan struct{}) - defer close(donec) - for i := 0; i < waiters-1; i++ { - go func() { - session, err := concurrency.NewSession(clus.RandClient()) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters) - if err := bb.Enter(); err != nil { - t.Errorf("could not enter on barrier (%v)", err) - } - <-donec - if err := bb.Leave(); err != nil { - t.Errorf("could not leave on barrier (%v)", err) - } - <-donec - }() - } - - time.Sleep(10 * time.Millisecond) - select { - case donec <- struct{}{}: - t.Fatalf("barrier did not enter-wait") - default: - } - - if err := b.Enter(); err != nil { - t.Fatalf("could not enter last barrier (%v)", err) - } - - timerC := time.After(time.Duration(waiters*100) * time.Millisecond) - for i := 0; i < waiters-1; i++ { - select { - case <-timerC: - t.Fatalf("barrier enter timed out") - case donec <- struct{}{}: - } - } - - time.Sleep(10 * time.Millisecond) - select { - case donec <- struct{}{}: - t.Fatalf("barrier did not leave-wait") - default: - } - - b.Leave() - timerC = time.After(time.Duration(waiters*100) * time.Millisecond) - for i := 0; i < waiters-1; i++ { - select { - case <-timerC: - t.Fatalf("barrier leave timed out") - case donec <- struct{}{}: - } - } -} - -func TestDoubleBarrierTooManyClients(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - waiters := 10 - session, err := concurrency.NewSession(clus.RandClient()) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - b := recipe.NewDoubleBarrier(session, "test-barrier", waiters) - donec := make(chan struct{}) - var ( - wgDone sync.WaitGroup // make sure all clients have finished the tasks - wgEntered sync.WaitGroup // make sure all clients have entered the double barrier - ) - wgDone.Add(waiters) - wgEntered.Add(waiters) - for i := 0; i < waiters; i++ { - go func() { - defer wgDone.Done() - session, err := concurrency.NewSession(clus.RandClient()) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters) - if err := bb.Enter(); err != nil { - t.Errorf("could not enter on barrier (%v)", err) - } - wgEntered.Done() - <-donec - if err := bb.Leave(); err != nil { - t.Errorf("could not leave on barrier (%v)", err) - } - }() - } - - // Wait until all clients have already entered the double barrier, so - // no any other client can enter the barrier. - wgEntered.Wait() - t.Log("Try to enter into double barrier") - if err := b.Enter(); err != recipe.ErrTooManyClients { - t.Errorf("Unexcepted error, expected: ErrTooManyClients, got: %v", err) - } - - resp, err := clus.RandClient().Get(context.TODO(), "test-barrier/waiters", clientv3.WithPrefix()) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - // Make sure the extra `b.Enter()` did not create a new ephemeral key - assert.Equal(t, waiters, len(resp.Kvs)) - close(donec) - - wgDone.Wait() -} - -func TestDoubleBarrierFailover(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - waiters := 10 - donec := make(chan struct{}) - defer close(donec) - - s0, err := concurrency.NewSession(clus.Client(0)) - if err != nil { - t.Error(err) - } - defer s0.Orphan() - s1, err := concurrency.NewSession(clus.Client(0)) - if err != nil { - t.Error(err) - } - defer s1.Orphan() - - // sacrificial barrier holder; lease will be revoked - go func() { - b := recipe.NewDoubleBarrier(s0, "test-barrier", waiters) - if berr := b.Enter(); berr != nil { - t.Errorf("could not enter on barrier (%v)", berr) - } - <-donec - }() - - for i := 0; i < waiters-1; i++ { - go func() { - b := recipe.NewDoubleBarrier(s1, "test-barrier", waiters) - if berr := b.Enter(); berr != nil { - t.Errorf("could not enter on barrier (%v)", berr) - } - <-donec - b.Leave() - <-donec - }() - } - - // wait for barrier enter to unblock - for i := 0; i < waiters; i++ { - select { - case donec <- struct{}{}: - case <-time.After(10 * time.Second): - t.Fatalf("timed out waiting for enter, %d", i) - } - } - - if err = s0.Close(); err != nil { - t.Fatal(err) - } - // join on rest of waiters - for i := 0; i < waiters-1; i++ { - select { - case donec <- struct{}{}: - case <-time.After(10 * time.Second): - t.Fatalf("timed out waiting for leave, %d", i) - } - } -} diff --git a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go b/tests/integration/clientv3/experimental/recipes/v3_lock_test.go deleted file mode 100644 index 79f61662032..00000000000 --- a/tests/integration/clientv3/experimental/recipes/v3_lock_test.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipes_test - -import ( - "context" - "fmt" - "math/rand" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMutexLockSingleNode(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - var clients []*clientv3.Client - testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) - integration2.CloseClients(t, clients) -} - -func TestMutexLockMultiNode(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - var clients []*clientv3.Client - testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) - integration2.CloseClients(t, clients) -} - -func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { - // stream lock acquisitions - lockedC := make(chan *concurrency.Mutex, waiters) - errC := make(chan error, waiters) - - var wg sync.WaitGroup - wg.Add(waiters) - - for i := 0; i < waiters; i++ { - go func(i int) { - defer wg.Done() - session, err := concurrency.NewSession(chooseClient()) - if err != nil { - errC <- fmt.Errorf("#%d: failed to create new session: %w", i, err) - return - } - m := concurrency.NewMutex(session, "test-mutex") - if err := m.Lock(context.TODO()); err != nil { - errC <- fmt.Errorf("#%d: failed to wait on lock: %w", i, err) - return - } - lockedC <- m - }(i) - } - // unlock locked mutexes - timerC := time.After(time.Duration(waiters) * time.Second) - for i := 0; i < waiters; i++ { - select { - case <-timerC: - t.Fatalf("timed out waiting for lock %d", i) - case err := <-errC: - t.Fatalf("Unexpected error: %v", err) - case m := <-lockedC: - // lock acquired with m - select { - case <-lockedC: - t.Fatalf("lock %d followers did not wait", i) - default: - } - if err := m.Unlock(context.TODO()); err != nil { - t.Fatalf("could not release lock (%v)", err) - } - } - } - wg.Wait() -} - -func TestMutexTryLockSingleNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - t.Logf("3 nodes cluster created...") - var clients []*clientv3.Client - testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients)) - integration2.CloseClients(t, clients) -} - -func TestMutexTryLockMultiNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - var clients []*clientv3.Client - testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients)) - integration2.CloseClients(t, clients) -} - -func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - lockedC := make(chan *concurrency.Mutex) - notlockedC := make(chan *concurrency.Mutex) - - for i := 0; i < lockers; i++ { - go func(i int) { - session, err := concurrency.NewSession(chooseClient()) - if err != nil { - t.Error(err) - } - m := concurrency.NewMutex(session, "test-mutex-try-lock") - err = m.TryLock(ctx) - if err == nil { - select { - case lockedC <- m: - case <-ctx.Done(): - t.Errorf("Thread: %v, Context failed: %v", i, err) - } - } else if err == concurrency.ErrLocked { - select { - case notlockedC <- m: - case <-ctx.Done(): - t.Errorf("Thread: %v, Context failed: %v", i, err) - } - } else { - t.Errorf("Thread: %v; Unexpected Error %v", i, err) - } - }(i) - } - - timerC := time.After(30 * time.Second) - select { - case <-lockedC: - for i := 0; i < lockers-1; i++ { - select { - case <-lockedC: - t.Fatalf("Multiple Mutes locked on same key") - case <-notlockedC: - case <-timerC: - t.Errorf("timed out waiting for lock") - } - } - case <-timerC: - t.Errorf("timed out waiting for lock (30s)") - } -} - -// TestMutexSessionRelock ensures that acquiring the same lock with the same -// session will not result in deadlock. -func TestMutexSessionRelock(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - session, err := concurrency.NewSession(clus.RandClient()) - if err != nil { - t.Error(err) - } - - m := concurrency.NewMutex(session, "test-mutex") - if err := m.Lock(context.TODO()); err != nil { - t.Fatal(err) - } - - m2 := concurrency.NewMutex(session, "test-mutex") - if err := m2.Lock(context.TODO()); err != nil { - t.Fatal(err) - } -} - -// TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all -// waiters older than the new owner are gone by testing the case where -// the waiter prior to the acquirer expires before the current holder. -func TestMutexWaitsOnCurrentHolder(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cctx := context.Background() - - cli := clus.Client(0) - - firstOwnerSession, err := concurrency.NewSession(cli) - if err != nil { - t.Error(err) - } - defer firstOwnerSession.Close() - firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex") - if err = firstOwnerMutex.Lock(cctx); err != nil { - t.Fatal(err) - } - - victimSession, err := concurrency.NewSession(cli) - if err != nil { - t.Error(err) - } - defer victimSession.Close() - victimDonec := make(chan struct{}) - go func() { - defer close(victimDonec) - concurrency.NewMutex(victimSession, "test-mutex").Lock(cctx) - }() - - // ensure mutexes associated with firstOwnerSession and victimSession waits before new owner - wch := cli.Watch(cctx, "test-mutex", clientv3.WithPrefix(), clientv3.WithRev(1)) - putCounts := 0 - for putCounts < 2 { - select { - case wrp := <-wch: - putCounts += len(wrp.Events) - case <-time.After(time.Second): - t.Fatal("failed to receive watch response") - } - } - if putCounts != 2 { - t.Fatalf("expect 2 put events, but got %v", putCounts) - } - - newOwnerSession, err := concurrency.NewSession(cli) - if err != nil { - t.Error(err) - } - defer newOwnerSession.Close() - newOwnerDonec := make(chan struct{}) - go func() { - defer close(newOwnerDonec) - concurrency.NewMutex(newOwnerSession, "test-mutex").Lock(cctx) - }() - - select { - case wrp := <-wch: - if len(wrp.Events) != 1 { - t.Fatalf("expect a event, but got %v events", len(wrp.Events)) - } - if e := wrp.Events[0]; e.Type != mvccpb.PUT { - t.Fatalf("expect a put event on prefix test-mutex, but got event type %v", e.Type) - } - case <-time.After(time.Second): - t.Fatalf("failed to receive a watch response") - } - - // simulate losing the client that's next in line to acquire the lock - victimSession.Close() - - // ensures the deletion of victim waiter from server side. - select { - case wrp := <-wch: - if len(wrp.Events) != 1 { - t.Fatalf("expect a event, but got %v events", len(wrp.Events)) - } - if e := wrp.Events[0]; e.Type != mvccpb.DELETE { - t.Fatalf("expect a delete event on prefix test-mutex, but got event type %v", e.Type) - } - case <-time.After(time.Second): - t.Fatal("failed to receive a watch response") - } - - select { - case <-newOwnerDonec: - t.Fatal("new owner obtained lock before first owner unlocked") - default: - } - - if err := firstOwnerMutex.Unlock(cctx); err != nil { - t.Fatal(err) - } - - select { - case <-newOwnerDonec: - case <-time.After(time.Second): - t.Fatal("new owner failed to obtain lock") - } - - select { - case <-victimDonec: - case <-time.After(time.Second): - t.Fatal("victim mutex failed to exit after first owner releases lock") - } -} - -func BenchmarkMutex4Waiters(b *testing.B) { - integration2.BeforeTest(b) - // XXX switch tests to use TB interface - clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(nil) - for i := 0; i < b.N; i++ { - testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() }) - } -} - -func TestRWMutexSingleNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) }) -} - -func TestRWMutexMultiNode(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() }) -} - -func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) { - // stream rwlock acquistions - rlockedC := make(chan *recipe.RWMutex, 1) - wlockedC := make(chan *recipe.RWMutex, 1) - for i := 0; i < waiters; i++ { - go func() { - session, err := concurrency.NewSession(chooseClient()) - if err != nil { - t.Error(err) - } - rwm := recipe.NewRWMutex(session, "test-rwmutex") - if rand.Intn(2) == 0 { - if err := rwm.RLock(); err != nil { - t.Errorf("could not rlock (%v)", err) - } - rlockedC <- rwm - } else { - if err := rwm.Lock(); err != nil { - t.Errorf("could not lock (%v)", err) - } - wlockedC <- rwm - } - }() - } - // unlock locked rwmutexes - timerC := time.After(time.Duration(waiters) * time.Second) - for i := 0; i < waiters; i++ { - select { - case <-timerC: - t.Fatalf("timed out waiting for lock %d", i) - case wl := <-wlockedC: - select { - case <-rlockedC: - t.Fatalf("rlock %d readers did not wait", i) - default: - } - if err := wl.Unlock(); err != nil { - t.Fatalf("could not release lock (%v)", err) - } - case rl := <-rlockedC: - select { - case <-wlockedC: - t.Fatalf("rlock %d writers did not wait", i) - default: - } - if err := rl.RUnlock(); err != nil { - t.Fatalf("could not release rlock (%v)", err) - } - } - } -} diff --git a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go b/tests/integration/clientv3/experimental/recipes/v3_queue_test.go deleted file mode 100644 index 7ace22eb8f6..00000000000 --- a/tests/integration/clientv3/experimental/recipes/v3_queue_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package recipes_test - -import ( - "fmt" - "math/rand" - "sync/atomic" - "testing" - - recipe "go.etcd.io/etcd/client/v3/experimental/recipes" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -const ( - manyQueueClients = 3 - queueItemsPerClient = 2 -) - -// TestQueueOneReaderOneWriter confirms the queue is FIFO -func TestQueueOneReaderOneWriter(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - done := make(chan struct{}) - defer func() { - <-done - }() - go func() { - defer func() { - done <- struct{}{} - }() - etcdc := clus.RandClient() - q := recipe.NewQueue(etcdc, "testq") - for i := 0; i < 5; i++ { - if err := q.Enqueue(fmt.Sprintf("%d", i)); err != nil { - t.Errorf("error enqueuing (%v)", err) - } - } - }() - - etcdc := clus.RandClient() - q := recipe.NewQueue(etcdc, "testq") - for i := 0; i < 5; i++ { - s, err := q.Dequeue() - if err != nil { - t.Fatalf("error dequeueing (%v)", err) - } - if s != fmt.Sprintf("%d", i) { - t.Fatalf("expected dequeue value %v, got %v", s, i) - } - } -} - -func TestQueueManyReaderOneWriter(t *testing.T) { - testQueueNReaderMWriter(t, manyQueueClients, 1) -} - -func TestQueueOneReaderManyWriter(t *testing.T) { - testQueueNReaderMWriter(t, 1, manyQueueClients) -} - -func TestQueueManyReaderManyWriter(t *testing.T) { - testQueueNReaderMWriter(t, manyQueueClients, manyQueueClients) -} - -// BenchmarkQueue benchmarks Queues using many/many readers/writers -func BenchmarkQueue(b *testing.B) { - integration2.BeforeTest(b) - - // XXX switch tests to use TB interface - clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(nil) - for i := 0; i < b.N; i++ { - testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients) - } -} - -// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities. -func TestPrQueueOneReaderOneWriter(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // write out five items with random priority - etcdc := clus.RandClient() - q := recipe.NewPriorityQueue(etcdc, "testprq") - for i := 0; i < 5; i++ { - // [0, 2] priority for priority collision to test seq keys - pr := uint16(rand.Intn(3)) - if err := q.Enqueue(fmt.Sprintf("%d", pr), pr); err != nil { - t.Fatalf("error enqueuing (%v)", err) - } - } - - // read back items; confirm priority order is respected - lastPr := -1 - for i := 0; i < 5; i++ { - s, err := q.Dequeue() - if err != nil { - t.Fatalf("error dequeueing (%v)", err) - } - curPr := 0 - if _, err := fmt.Sscanf(s, "%d", &curPr); err != nil { - t.Fatalf(`error parsing item "%s" (%v)`, s, err) - } - if lastPr > curPr { - t.Fatalf("expected priority %v > %v", curPr, lastPr) - } - } -} - -func TestPrQueueManyReaderManyWriter(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - rqs := newPriorityQueues(clus, manyQueueClients) - wqs := newPriorityQueues(clus, manyQueueClients) - testReadersWriters(t, rqs, wqs) -} - -// BenchmarkPrQueueOneReaderOneWriter benchmarks Queues using n/n readers/writers -func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) { - integration2.BeforeTest(b) - - // XXX switch tests to use TB interface - clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(nil) - rqs := newPriorityQueues(clus, 1) - wqs := newPriorityQueues(clus, 1) - for i := 0; i < b.N; i++ { - testReadersWriters(nil, rqs, wqs) - } -} - -func testQueueNReaderMWriter(t *testing.T, n int, m int) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - testReadersWriters(t, newQueues(clus, n), newQueues(clus, m)) -} - -func newQueues(clus *integration2.Cluster, n int) (qs []testQueue) { - for i := 0; i < n; i++ { - etcdc := clus.RandClient() - qs = append(qs, recipe.NewQueue(etcdc, "q")) - } - return qs -} - -func newPriorityQueues(clus *integration2.Cluster, n int) (qs []testQueue) { - for i := 0; i < n; i++ { - etcdc := clus.RandClient() - q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")} - qs = append(qs, q) - } - return qs -} - -func testReadersWriters(t *testing.T, rqs []testQueue, wqs []testQueue) { - rerrc := make(chan error) - werrc := make(chan error) - manyWriters(wqs, queueItemsPerClient, werrc) - manyReaders(rqs, len(wqs)*queueItemsPerClient, rerrc) - for range wqs { - if err := <-werrc; err != nil { - t.Errorf("error writing (%v)", err) - } - } - for range rqs { - if err := <-rerrc; err != nil { - t.Errorf("error reading (%v)", err) - } - } -} - -func manyReaders(qs []testQueue, totalReads int, errc chan<- error) { - var rxReads int32 - for _, q := range qs { - go func(q testQueue) { - for { - total := atomic.AddInt32(&rxReads, 1) - if int(total) > totalReads { - break - } - if _, err := q.Dequeue(); err != nil { - errc <- err - return - } - } - errc <- nil - }(q) - } -} - -func manyWriters(qs []testQueue, writesEach int, errc chan<- error) { - for _, q := range qs { - go func(q testQueue) { - for j := 0; j < writesEach; j++ { - if err := q.Enqueue("foo"); err != nil { - errc <- err - return - } - } - errc <- nil - }(q) - } -} - -type testQueue interface { - Enqueue(val string) error - Dequeue() (string, error) -} - -type flatPriorityQueue struct{ *recipe.PriorityQueue } - -func (q *flatPriorityQueue) Enqueue(val string) error { - // randomized to stress dequeuing logic; order isn't important - return q.PriorityQueue.Enqueue(val, uint16(rand.Intn(2))) -} -func (q *flatPriorityQueue) Dequeue() (string, error) { - return q.PriorityQueue.Dequeue() -} diff --git a/tests/integration/clientv3/kv_test.go b/tests/integration/clientv3/kv_test.go deleted file mode 100644 index 3442f5285f4..00000000000 --- a/tests/integration/clientv3/kv_test.go +++ /dev/null @@ -1,898 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestKVPutError(t *testing.T) { - integration2.BeforeTest(t) - - var ( - maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go - quota = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486. - ) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - _, err := kv.Put(ctx, "", "bar") - if err != rpctypes.ErrEmptyKey { - t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err) - } - - _, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) - if err != rpctypes.ErrRequestTooLarge { - t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err) - } - - _, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50))) - if err != nil { // below quota - t.Fatal(err) - } - - time.Sleep(1 * time.Second) // give enough time for commit - - _, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50))) - if err != rpctypes.ErrNoSpace { // over quota - t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err) - } -} - -func TestKVPutWithLease(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lapi := clus.RandClient() - - kv := clus.RandClient() - ctx := context.TODO() - - lease, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Fatalf("failed to create lease %v", err) - } - - key := "hello" - val := "world" - if _, err := kv.Put(ctx, key, val, clientv3.WithLease(lease.ID)); err != nil { - t.Fatalf("couldn't put %q (%v)", key, err) - } - resp, err := kv.Get(ctx, key) - if err != nil { - t.Fatalf("couldn't get key (%v)", err) - } - if len(resp.Kvs) != 1 { - t.Fatalf("expected 1 key, got %d", len(resp.Kvs)) - } - if !bytes.Equal([]byte(val), resp.Kvs[0].Value) { - t.Errorf("val = %s, want %s", val, resp.Kvs[0].Value) - } - if lease.ID != clientv3.LeaseID(resp.Kvs[0].Lease) { - t.Errorf("val = %d, want %d", lease.ID, resp.Kvs[0].Lease) - } -} - -// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value. -func TestKVPutWithIgnoreValue(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.RandClient() - - _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()) - if err != rpctypes.ErrKeyNotFound { - t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) - } - - if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - - if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil { - t.Fatal(err) - } - rr, rerr := kv.Get(context.TODO(), "foo") - if rerr != nil { - t.Fatal(rerr) - } - if len(rr.Kvs) != 1 { - t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs)) - } - if !bytes.Equal(rr.Kvs[0].Value, []byte("bar")) { - t.Fatalf("value expected 'bar', got %q", rr.Kvs[0].Value) - } -} - -// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key. -func TestKVPutWithIgnoreLease(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.RandClient() - - lapi := clus.RandClient() - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); err != rpctypes.ErrKeyNotFound { - t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err) - } - - if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil { - t.Fatal(err) - } - - if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil { - t.Fatal(err) - } - - rr, rerr := kv.Get(context.TODO(), "zoo") - if rerr != nil { - t.Fatal(rerr) - } - if len(rr.Kvs) != 1 { - t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs)) - } - if rr.Kvs[0].Lease != int64(resp.ID) { - t.Fatalf("lease expected %v, got %v", resp.ID, rr.Kvs[0].Lease) - } -} - -func TestKVPutWithRequireLeader(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - - // wait for election timeout, then member[0] will not have a leader. - var ( - electionTicks = 10 - tickDuration = 10 * time.Millisecond - ) - time.Sleep(time.Duration(3*electionTicks) * tickDuration) - - kv := clus.Client(0) - _, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar") - if err != rpctypes.ErrNoLeader { - t.Fatal(err) - } - - cnt, err := clus.Members[0].Metric( - "etcd_server_client_requests_total", - `type="unary"`, - fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), - ) - if err != nil { - t.Fatal(err) - } - cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } - if cv < 1 { // >1 when retried - t.Fatalf("expected at least 1, got %q", cnt) - } - - // clients may give timeout errors since the members are stopped; take - // the clients so that terminating the cluster won't complain - clus.Client(1).Close() - clus.Client(2).Close() - clus.TakeClient(1) - clus.TakeClient(2) -} - -func TestKVRange(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - keySet := []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"} - for i, key := range keySet { - if _, err := kv.Put(ctx, key, ""); err != nil { - t.Fatalf("#%d: couldn't put %q (%v)", i, key, err) - } - } - resp, err := kv.Get(ctx, keySet[0]) - if err != nil { - t.Fatalf("couldn't get key (%v)", err) - } - wheader := resp.Header - - tests := []struct { - begin, end string - rev int64 - opts []clientv3.OpOption - - wantSet []*mvccpb.KeyValue - }{ - // fetch entire keyspace using WithFromKey - { - "\x00", "", - 0, - []clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)}, - - []*mvccpb.KeyValue{ - {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1}, - {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1}, - {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3}, - {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1}, - {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1}, - {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1}, - }, - }, - } - - for i, tt := range tests { - opts := []clientv3.OpOption{clientv3.WithRange(tt.end), clientv3.WithRev(tt.rev)} - opts = append(opts, tt.opts...) - resp, err := kv.Get(ctx, tt.begin, opts...) - if err != nil { - t.Fatalf("#%d: couldn't range (%v)", i, err) - } - if !reflect.DeepEqual(wheader, resp.Header) { - t.Fatalf("#%d: wheader expected %+v, got %+v", i, wheader, resp.Header) - } - if !reflect.DeepEqual(tt.wantSet, resp.Kvs) { - t.Fatalf("#%d: resp.Kvs expected %+v, got %+v", i, tt.wantSet, resp.Kvs) - } - } -} - -func TestKVGetErrConnClosed(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - donec := make(chan struct{}) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - clus.TakeClient(0) - - go func() { - defer close(donec) - _, err := cli.Get(context.TODO(), "foo") - if !clientv3.IsConnCanceled(err) { - t.Errorf("expected %v, got %v", context.Canceled, err) - } - }() - - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("kv.Get took too long") - case <-donec: - } -} - -func TestKVNewAfterClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - _, err := cli.Get(context.TODO(), "foo") - if !clientv3.IsConnCanceled(err) { - t.Errorf("expected %v, got %v", context.Canceled, err) - } - close(donec) - }() - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("kv.Get took too long") - case <-donec: - } -} - -func TestKVDeleteRange(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - tests := []struct { - key string - opts []clientv3.OpOption - wkeys []string - }{ - // * - { - key: "\x00", - opts: []clientv3.OpOption{clientv3.WithFromKey()}, - wkeys: nil, - }, - } - - for i, tt := range tests { - keySet := []string{"a", "b", "c", "c/abc", "d"} - for j, key := range keySet { - if _, err := kv.Put(ctx, key, ""); err != nil { - t.Fatalf("#%d: couldn't put %q (%v)", j, key, err) - } - } - - _, err := kv.Delete(ctx, tt.key, tt.opts...) - if err != nil { - t.Fatalf("#%d: couldn't delete range (%v)", i, err) - } - - resp, err := kv.Get(ctx, "a", clientv3.WithFromKey()) - if err != nil { - t.Fatalf("#%d: couldn't get keys (%v)", i, err) - } - var keys []string - for _, kv := range resp.Kvs { - keys = append(keys, string(kv.Key)) - } - if !reflect.DeepEqual(tt.wkeys, keys) { - t.Errorf("#%d: resp.Kvs got %v, expected %v", i, keys, tt.wkeys) - } - } -} - -func TestKVCompactError(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - for i := 0; i < 5; i++ { - if _, err := kv.Put(ctx, "foo", "bar"); err != nil { - t.Fatalf("couldn't put 'foo' (%v)", err) - } - } - _, err := kv.Compact(ctx, 6) - if err != nil { - t.Fatalf("couldn't compact 6 (%v)", err) - } - - _, err = kv.Compact(ctx, 6) - if err != rpctypes.ErrCompacted { - t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err) - } - - _, err = kv.Compact(ctx, 100) - if err != rpctypes.ErrFutureRev { - t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err) - } -} - -func TestKVCompact(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - for i := 0; i < 10; i++ { - if _, err := kv.Put(ctx, "foo", "bar"); err != nil { - t.Fatalf("couldn't put 'foo' (%v)", err) - } - } - - _, err := kv.Compact(ctx, 7) - if err != nil { - t.Fatalf("couldn't compact kv space (%v)", err) - } - _, err = kv.Compact(ctx, 7) - if err == nil || err != rpctypes.ErrCompacted { - t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted) - } - - wcli := clus.RandClient() - // new watcher could precede receiving the compaction without quorum first - wcli.Get(ctx, "quorum-get") - - wchan := wcli.Watch(ctx, "foo", clientv3.WithRev(3)) - - wr := <-wchan - if wr.CompactRevision != 7 { - t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision) - } - if !wr.Canceled { - t.Fatalf("expected canceled watcher on compacted revision, got %v", wr.Canceled) - } - if wr.Err() != rpctypes.ErrCompacted { - t.Fatalf("watch response error expected %v, got %v", rpctypes.ErrCompacted, wr.Err()) - } - wr, ok := <-wchan - if ok { - t.Fatalf("wchan got %v, expected closed", wr) - } - if wr.Err() != nil { - t.Fatalf("watch response error expected nil, got %v", wr.Err()) - } - - _, err = kv.Compact(ctx, 1000) - if err == nil || err != rpctypes.ErrFutureRev { - t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev) - } -} - -// TestKVGetRetry ensures get will retry on disconnect. -func TestKVGetRetry(t *testing.T) { - integration2.BeforeTest(t) - - clusterSize := 3 - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true}) - defer clus.Terminate(t) - - // because killing leader and following election - // could give no other endpoints for client reconnection - fIdx := (clus.WaitLeader(t) + 1) % clusterSize - - kv := clus.Client(fIdx) - ctx := context.TODO() - - if _, err := kv.Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } - - clus.Members[fIdx].Stop(t) - - donec := make(chan struct{}, 1) - go func() { - // Get will fail, but reconnect will trigger - gresp, gerr := kv.Get(ctx, "foo") - if gerr != nil { - t.Error(gerr) - } - wkvs := []*mvccpb.KeyValue{ - { - Key: []byte("foo"), - Value: []byte("bar"), - CreateRevision: 2, - ModRevision: 2, - Version: 1, - }, - } - if !reflect.DeepEqual(gresp.Kvs, wkvs) { - t.Errorf("bad get: got %v, want %v", gresp.Kvs, wkvs) - } - donec <- struct{}{} - }() - - time.Sleep(100 * time.Millisecond) - clus.Members[fIdx].Restart(t) - clus.Members[fIdx].WaitOK(t) - - select { - case <-time.After(20 * time.Second): - t.Fatalf("timed out waiting for get") - case <-donec: - } -} - -// TestKVPutFailGetRetry ensures a get will retry following a failed put. -func TestKVPutFailGetRetry(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - kv := clus.Client(0) - clus.Members[0].Stop(t) - - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - defer cancel() - _, err := kv.Put(ctx, "foo", "bar") - if err == nil { - t.Fatalf("got success on disconnected put, wanted error") - } - - donec := make(chan struct{}, 1) - go func() { - // Get will fail, but reconnect will trigger - gresp, gerr := kv.Get(context.TODO(), "foo") - if gerr != nil { - t.Error(gerr) - } - if len(gresp.Kvs) != 0 { - t.Errorf("bad get kvs: got %+v, want empty", gresp.Kvs) - } - donec <- struct{}{} - }() - - time.Sleep(100 * time.Millisecond) - clus.Members[0].Restart(t) - - select { - case <-time.After(20 * time.Second): - t.Fatalf("timed out waiting for get") - case <-donec: - } -} - -// TestKVGetCancel tests that a context cancel on a Get terminates as expected. -func TestKVGetCancel(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - oldconn := clus.Client(0).ActiveConnection() - kv := clus.Client(0) - - ctx, cancel := context.WithCancel(context.TODO()) - cancel() - - resp, err := kv.Get(ctx, "abc") - if err == nil { - t.Fatalf("cancel on get response %v, expected context error", resp) - } - newconn := clus.Client(0).ActiveConnection() - if oldconn != newconn { - t.Fatalf("cancel on get broke client connection") - } -} - -// TestKVGetStoppedServerAndClose ensures closing after a failed Get works. -func TestKVGetStoppedServerAndClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.Members[0].Stop(t) - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - // this Get fails and triggers an asynchronous connection retry - _, err := cli.Get(ctx, "abc") - cancel() - if err != nil && !(IsCanceled(err) || IsClientTimeout(err)) { - t.Fatal(err) - } -} - -// TestKVPutStoppedServerAndClose ensures closing after a failed Put works. -func TestKVPutStoppedServerAndClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.Members[0].Stop(t) - - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - // get retries on all errors. - // so here we use it to eat the potential broken pipe error for the next put. - // grpc client might see a broken pipe error when we issue the get request before - // grpc finds out the original connection is down due to the member shutdown. - _, err := cli.Get(ctx, "abc") - cancel() - if err != nil && !(IsCanceled(err) || IsClientTimeout(err)) { - t.Fatal(err) - } - - ctx, cancel = context.WithTimeout(context.TODO(), time.Second) - // this Put fails and triggers an asynchronous connection retry - _, err = cli.Put(ctx, "abc", "123") - cancel() - if err != nil && !(IsCanceled(err) || IsClientTimeout(err) || IsUnavailable(err)) { - t.Fatal(err) - } -} - -// TestKVPutAtMostOnce ensures that a Put will only occur at most once -// in the presence of network errors. -func TestKVPutAtMostOnce(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - clus.Members[0].Bridge().DropConnections() - donec := make(chan struct{}) - go func() { - defer close(donec) - for i := 0; i < 10; i++ { - clus.Members[0].Bridge().DropConnections() - time.Sleep(5 * time.Millisecond) - } - }() - _, err := clus.Client(0).Put(context.TODO(), "k", "v") - <-donec - if err != nil { - break - } - } - - resp, err := clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - if resp.Kvs[0].Version > 11 { - t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0]) - } -} - -// TestKVLargeRequests tests various client/server side request limits. -func TestKVLargeRequests(t *testing.T) { - integration2.BeforeTest(t) - tests := []struct { - // make sure that "MaxCallSendMsgSize" < server-side default send/recv limit - maxRequestBytesServer uint - maxCallSendBytesClient int - maxCallRecvBytesClient int - - valueSize int - expectError error - }{ - { - maxRequestBytesServer: 256, - maxCallSendBytesClient: 0, - maxCallRecvBytesClient: 0, - valueSize: 1024, - expectError: rpctypes.ErrRequestTooLarge, - }, - - // without proper client-side receive size limit - // "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)" - { - - maxRequestBytesServer: 7*1024*1024 + 512*1024, - maxCallSendBytesClient: 7 * 1024 * 1024, - maxCallRecvBytesClient: 0, - valueSize: 5 * 1024 * 1024, - expectError: nil, - }, - - { - maxRequestBytesServer: 10 * 1024 * 1024, - maxCallSendBytesClient: 100 * 1024 * 1024, - maxCallRecvBytesClient: 0, - valueSize: 10 * 1024 * 1024, - expectError: rpctypes.ErrRequestTooLarge, - }, - { - maxRequestBytesServer: 10 * 1024 * 1024, - maxCallSendBytesClient: 10 * 1024 * 1024, - maxCallRecvBytesClient: 0, - valueSize: 10 * 1024 * 1024, - expectError: status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "), - }, - { - maxRequestBytesServer: 10 * 1024 * 1024, - maxCallSendBytesClient: 100 * 1024 * 1024, - maxCallRecvBytesClient: 0, - valueSize: 10*1024*1024 + 5, - expectError: rpctypes.ErrRequestTooLarge, - }, - { - maxRequestBytesServer: 10 * 1024 * 1024, - maxCallSendBytesClient: 10 * 1024 * 1024, - maxCallRecvBytesClient: 0, - valueSize: 10*1024*1024 + 5, - expectError: status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "), - }, - } - for i, test := range tests { - clus := integration2.NewCluster(t, - &integration2.ClusterConfig{ - Size: 1, - MaxRequestBytes: test.maxRequestBytesServer, - ClientMaxCallSendMsgSize: test.maxCallSendBytesClient, - ClientMaxCallRecvMsgSize: test.maxCallRecvBytesClient, - }, - ) - cli := clus.Client(0) - _, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize)) - - if _, ok := err.(rpctypes.EtcdError); ok { - if err != test.expectError { - t.Errorf("#%d: expected %v, got %v", i, test.expectError, err) - } - } else if err != nil && !strings.HasPrefix(err.Error(), test.expectError.Error()) { - t.Errorf("#%d: expected error starting with '%s', got '%s'", i, test.expectError.Error(), err.Error()) - } - - // put request went through, now expects large response back - if err == nil { - _, err = cli.Get(context.TODO(), "foo") - if err != nil { - t.Errorf("#%d: get expected no error, got %v", i, err) - } - } - - clus.Terminate(t) - } -} - -// TestKVForLearner ensures learner member only accepts serializable read request. -func TestKVForLearner(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - // we have to add and launch learner member after initial cluster was created, because - // bootstrapping a cluster with learner member is not supported. - clus.AddAndLaunchLearnerMember(t) - - learners, err := clus.GetLearnerMembers() - if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) - } - if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) - } - - if len(clus.Members) != 4 { - t.Fatalf("expecting 4 members in cluster after adding the learner member, got %d", len(clus.Members)) - } - // note: - // 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members - // 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config, - // because the implementation of integration test has diverged from embed/etcd.go. - learnerEp := clus.Members[3].GRPCURL() - cfg := clientv3.Config{ - Endpoints: []string{learnerEp}, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - // this client only has endpoint of the learner member - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatalf("failed to create clientv3: %v", err) - } - defer cli.Close() - - // wait until learner member is ready - <-clus.Members[3].ReadyNotify() - - tests := []struct { - op clientv3.Op - wErr bool - }{ - { - op: clientv3.OpGet("foo", clientv3.WithSerializable()), - wErr: false, - }, - { - op: clientv3.OpGet("foo"), - wErr: true, - }, - { - op: clientv3.OpPut("foo", "bar"), - wErr: true, - }, - { - op: clientv3.OpDelete("foo"), - wErr: true, - }, - { - op: clientv3.OpTxn([]clientv3.Cmp{clientv3.Compare(clientv3.CreateRevision("foo"), "=", 0)}, nil, nil), - wErr: true, - }, - } - - for idx, test := range tests { - _, err := cli.Do(context.TODO(), test.op) - if err != nil && !test.wErr { - t.Errorf("%d: expect no error, got %v", idx, err) - } - if err == nil && test.wErr { - t.Errorf("%d: expect error, got nil", idx) - } - } -} - -// TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member -func TestBalancerSupportLearner(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - // we have to add and launch learner member after initial cluster was created, because - // bootstrapping a cluster with learner member is not supported. - clus.AddAndLaunchLearnerMember(t) - - learners, err := clus.GetLearnerMembers() - if err != nil { - t.Fatalf("failed to get the learner members in cluster: %v", err) - } - if len(learners) != 1 { - t.Fatalf("added 1 learner to cluster, got %d", len(learners)) - } - - // clus.Members[3] is the newly added learner member, which was appended to clus.Members - learnerEp := clus.Members[3].GRPCURL() - cfg := clientv3.Config{ - Endpoints: []string{learnerEp}, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatalf("failed to create clientv3: %v", err) - } - defer cli.Close() - - // wait until learner member is ready - <-clus.Members[3].ReadyNotify() - - if _, err := cli.Get(context.Background(), "foo"); err == nil { - t.Fatalf("expect Get request to learner to fail, got no error") - } - t.Logf("Expected: Read from learner error: %v", err) - - eps := []string{learnerEp, clus.Members[0].GRPCURL()} - cli.SetEndpoints(eps...) - if _, err := cli.Get(context.Background(), "foo"); err != nil { - t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err) - } -} diff --git a/tests/integration/clientv3/lease/doc.go b/tests/integration/clientv3/lease/doc.go deleted file mode 100644 index 0061520d3fa..00000000000 --- a/tests/integration/clientv3/lease/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease diff --git a/tests/integration/clientv3/lease/lease_test.go b/tests/integration/clientv3/lease/lease_test.go deleted file mode 100644 index f23ed672865..00000000000 --- a/tests/integration/clientv3/lease/lease_test.go +++ /dev/null @@ -1,847 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease_test - -import ( - "context" - "fmt" - "reflect" - "sort" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestLeaseNotFoundError(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.RandClient() - - _, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500))) - if err != rpctypes.ErrLeaseNotFound { - t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err) - } -} - -func TestLeaseGrant(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lapi := clus.RandClient() - - kv := clus.RandClient() - - _, merr := lapi.Grant(context.Background(), clientv3.MaxLeaseTTL+1) - if merr != rpctypes.ErrLeaseTTLTooLarge { - t.Fatalf("err = %v, want %v", merr, rpctypes.ErrLeaseTTLTooLarge) - } - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - _, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != nil { - t.Fatalf("failed to create key with lease %v", err) - } -} - -func TestLeaseRevoke(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lapi := clus.RandClient() - - kv := clus.RandClient() - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - _, err = lapi.Revoke(context.Background(), resp.ID) - if err != nil { - t.Errorf("failed to revoke lease %v", err) - } - - _, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID)) - if err != rpctypes.ErrLeaseNotFound { - t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound) - } -} - -func TestLeaseKeepAliveOnce(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lapi := clus.RandClient() - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - _, err = lapi.KeepAliveOnce(context.Background(), resp.ID) - if err != nil { - t.Errorf("failed to keepalive lease %v", err) - } - - _, err = lapi.KeepAliveOnce(context.Background(), clientv3.LeaseID(0)) - if err != rpctypes.ErrLeaseNotFound { - t.Errorf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err) - } -} - -func TestLeaseKeepAlive(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lapi := clus.Client(0) - clus.TakeClient(0) - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - rc, kerr := lapi.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Errorf("failed to keepalive lease %v", kerr) - } - - kresp, ok := <-rc - if !ok { - t.Errorf("chan is closed, want not closed") - } - - if kresp == nil { - t.Fatalf("unexpected null response") - } - - if kresp.ID != resp.ID { - t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) - } - - lapi.Close() - - _, ok = <-rc - if ok { - t.Errorf("chan is not closed, want lease Close() closes chan") - } -} - -func TestLeaseKeepAliveOneSecond(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - resp, err := cli.Grant(context.Background(), 1) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - rc, kerr := cli.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Errorf("failed to keepalive lease %v", kerr) - } - - for i := 0; i < 3; i++ { - if _, ok := <-rc; !ok { - t.Errorf("chan is closed, want not closed") - } - } -} - -// TestLeaseKeepAliveHandleFailure tests lease keep alive handling faillure -// TODO: add a client that can connect to all the members of cluster via unix sock. -// TODO: test handle more complicated failures. -func TestLeaseKeepAliveHandleFailure(t *testing.T) { - t.Skip("test it when we have a cluster client") - - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - // TODO: change this line to get a cluster client - lapi := clus.RandClient() - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - rc, kerr := lapi.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Errorf("failed to keepalive lease %v", kerr) - } - - kresp := <-rc - if kresp.ID != resp.ID { - t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) - } - - // restart the connected member. - clus.Members[0].Stop(t) - - select { - case <-rc: - t.Fatalf("unexpected keepalive") - case <-time.After(10*time.Second/3 + 1): - } - - // recover the member. - clus.Members[0].Restart(t) - - kresp = <-rc - if kresp.ID != resp.ID { - t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) - } - - lapi.Close() - - _, ok := <-rc - if ok { - t.Errorf("chan is not closed, want lease Close() closes chan") - } -} - -type leaseCh struct { - lid clientv3.LeaseID - ch <-chan *clientv3.LeaseKeepAliveResponse -} - -// TestLeaseKeepAliveNotFound ensures a revoked lease won't halt other leases. -func TestLeaseKeepAliveNotFound(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - var lchs []leaseCh - for i := 0; i < 3; i++ { - resp, rerr := cli.Grant(context.TODO(), 5) - if rerr != nil { - t.Fatal(rerr) - } - kach, kaerr := cli.KeepAlive(context.Background(), resp.ID) - if kaerr != nil { - t.Fatal(kaerr) - } - lchs = append(lchs, leaseCh{resp.ID, kach}) - } - - if _, err := cli.Revoke(context.TODO(), lchs[1].lid); err != nil { - t.Fatal(err) - } - - <-lchs[0].ch - if _, ok := <-lchs[0].ch; !ok { - t.Fatal("closed keepalive on wrong lease") - } - if _, ok := <-lchs[1].ch; ok { - t.Fatal("expected closed keepalive") - } -} - -func TestLeaseGrantErrConnClosed(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - defer close(donec) - _, err := cli.Grant(context.TODO(), 5) - if !clientv3.IsConnCanceled(err) { - // context.Canceled if grpc-go balancer calls 'Get' with an inflight client.Close. - t.Errorf("expected %v, or server unavailable, got %v", context.Canceled, err) - } - }() - - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("le.Grant took too long") - case <-donec: - } -} - -// TestLeaseKeepAliveFullResponseQueue ensures when response -// queue is full thus dropping keepalive response sends, -// keepalive request is sent with the same rate of TTL / 3. -func TestLeaseKeepAliveFullResponseQueue(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lapi := clus.Client(0) - - // expect lease keepalive every 10-second - lresp, err := lapi.Grant(context.Background(), 30) - if err != nil { - t.Fatalf("failed to create lease %v", err) - } - id := lresp.ID - - old := clientv3.LeaseResponseChSize - defer func() { - clientv3.LeaseResponseChSize = old - }() - clientv3.LeaseResponseChSize = 0 - - // never fetch from response queue, and let it become full - _, err = lapi.KeepAlive(context.Background(), id) - if err != nil { - t.Fatalf("failed to keepalive lease %v", err) - } - - // TTL should not be refreshed after 3 seconds - // expect keepalive to be triggered after TTL/3 - time.Sleep(3 * time.Second) - - tr, terr := lapi.TimeToLive(context.Background(), id) - if terr != nil { - t.Fatalf("failed to get lease information %v", terr) - } - if tr.TTL >= 29 { - t.Errorf("unexpected kept-alive lease TTL %d", tr.TTL) - } -} - -func TestLeaseGrantNewAfterClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - _, err := cli.Grant(context.TODO(), 5) - if !clientv3.IsConnCanceled(err) { - t.Errorf("expected %v or server unavailable, got %v", context.Canceled, err) - } - close(donec) - }() - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("le.Grant took too long") - case <-donec: - } -} - -func TestLeaseRevokeNewAfterClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - resp, err := cli.Grant(context.TODO(), 5) - if err != nil { - t.Fatal(err) - } - leaseID := resp.ID - - clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - - errMsgCh := make(chan string, 1) - go func() { - _, err := cli.Revoke(context.TODO(), leaseID) - if !clientv3.IsConnCanceled(err) { - errMsgCh <- fmt.Sprintf("expected %v or server unavailable, got %v", context.Canceled, err) - } else { - errMsgCh <- "" - } - }() - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("le.Revoke took too long") - case errMsg := <-errMsgCh: - if errMsg != "" { - t.Fatalf(errMsg) - } - } -} - -// TestLeaseKeepAliveCloseAfterDisconnectRevoke ensures the keep alive channel is closed -// following a disconnection, lease revoke, then reconnect. -func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - // setup lease and do a keepalive - resp, err := cli.Grant(context.Background(), 10) - if err != nil { - t.Fatal(err) - } - rc, kerr := cli.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Fatal(kerr) - } - kresp := <-rc - if kresp.ID != resp.ID { - t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID) - } - - // keep client disconnected - clus.Members[0].Stop(t) - time.Sleep(time.Second) - clus.WaitLeader(t) - - if _, err := clus.Client(1).Revoke(context.TODO(), resp.ID); err != nil { - t.Fatal(err) - } - - clus.Members[0].Restart(t) - - // some responses may still be buffered; drain until close - timer := time.After(time.Duration(kresp.TTL) * time.Second) - for kresp != nil { - select { - case kresp = <-rc: - case <-timer: - t.Fatalf("keepalive channel did not close") - } - } -} - -// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if -// the initial keep alive request never gets a response. -func TestLeaseKeepAliveInitTimeout(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - // setup lease and do a keepalive - resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } - // keep client disconnected - clus.Members[0].Stop(t) - rc, kerr := cli.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Fatal(kerr) - } - select { - case ka, ok := <-rc: - if ok { - t.Fatalf("unexpected keepalive %v, expected closed channel", ka) - } - case <-time.After(10 * time.Second): - t.Fatalf("keepalive channel did not close") - } - - clus.Members[0].Restart(t) -} - -// TestLeaseKeepAliveTTLTimeout ensures the keep alive channel closes if -// a keep alive request after the first never gets a response. -func TestLeaseKeepAliveTTLTimeout(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - // setup lease and do a keepalive - resp, err := cli.Grant(context.Background(), 5) - if err != nil { - t.Fatal(err) - } - rc, kerr := cli.KeepAlive(context.Background(), resp.ID) - if kerr != nil { - t.Fatal(kerr) - } - if kresp := <-rc; kresp.ID != resp.ID { - t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID) - } - - // keep client disconnected - clus.Members[0].Stop(t) - select { - case ka, ok := <-rc: - if ok { - t.Fatalf("unexpected keepalive %v, expected closed channel", ka) - } - case <-time.After(10 * time.Second): - t.Fatalf("keepalive channel did not close") - } - - clus.Members[0].Restart(t) -} - -func TestLeaseTimeToLive(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - c := clus.RandClient() - lapi := c - - resp, err := lapi.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - - kv := clus.RandClient() - keys := []string{"foo1", "foo2"} - for i := range keys { - if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil { - t.Fatal(err) - } - } - - // linearized read to ensure Puts propagated to server backing lapi - if _, err := c.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - - lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys()) - if lerr != nil { - t.Fatal(lerr) - } - if lresp.ID != resp.ID { - t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID) - } - if lresp.GrantedTTL != int64(10) { - t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL) - } - if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL { - t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL) - } - ks := make([]string, len(lresp.Keys)) - for i := range lresp.Keys { - ks[i] = string(lresp.Keys[i]) - } - sort.Strings(ks) - if !reflect.DeepEqual(ks, keys) { - t.Fatalf("keys expected %v, got %v", keys, ks) - } - - lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID) - if lerr != nil { - t.Fatal(lerr) - } - if len(lresp.Keys) != 0 { - t.Fatalf("unexpected keys %+v", lresp.Keys) - } -} - -func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - resp, err := cli.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - _, err = cli.Revoke(context.Background(), resp.ID) - if err != nil { - t.Errorf("failed to Revoke lease %v", err) - } - - lresp, err := cli.TimeToLive(context.Background(), resp.ID) - // TimeToLive() should return a response with TTL=-1. - if err != nil { - t.Fatalf("expected err to be nil") - } - if lresp == nil { - t.Fatalf("expected lresp not to be nil") - } - if lresp.ResponseHeader == nil { - t.Fatalf("expected ResponseHeader not to be nil") - } - if lresp.ID != resp.ID { - t.Fatalf("expected Lease ID %v, but got %v", resp.ID, lresp.ID) - } - if lresp.TTL != -1 { - t.Fatalf("expected TTL %v, but got %v", lresp.TTL, lresp.TTL) - } -} - -func TestLeaseLeases(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - - var ids []clientv3.LeaseID - for i := 0; i < 5; i++ { - resp, err := cli.Grant(context.Background(), 10) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - ids = append(ids, resp.ID) - } - - resp, err := cli.Leases(context.Background()) - if err != nil { - t.Fatal(err) - } - if len(resp.Leases) != 5 { - t.Fatalf("len(resp.Leases) expected 5, got %d", len(resp.Leases)) - } - for i := range resp.Leases { - if ids[i] != resp.Leases[i].ID { - t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], resp.Leases[i].ID) - } - } -} - -// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum -// for a while. -func TestLeaseRenewLostQuorum(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.Client(0) - r, err := cli.Grant(context.TODO(), 4) - if err != nil { - t.Fatal(err) - } - - kctx, kcancel := context.WithCancel(context.Background()) - defer kcancel() - ka, err := cli.KeepAlive(kctx, r.ID) - if err != nil { - t.Fatal(err) - } - // consume first keepalive so next message sends when cluster is down - <-ka - lastKa := time.Now() - - // force keepalive stream message to timeout - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - // Use TTL-2 since the client closes the keepalive channel if no - // keepalive arrives before the lease deadline; the client will - // try to resend a keepalive after TTL/3 seconds, so for a TTL of 4, - // sleeping for 2s should be sufficient time for issuing a retry. - // The cluster has two seconds to recover and reply to the keepalive. - time.Sleep(time.Duration(r.TTL-2) * time.Second) - clus.Members[1].Restart(t) - clus.Members[2].Restart(t) - - if time.Since(lastKa) > time.Duration(r.TTL)*time.Second { - t.Skip("waited too long for server stop and restart") - } - - select { - case _, ok := <-ka: - if !ok { - t.Fatalf("keepalive closed") - } - case <-time.After(time.Duration(r.TTL) * time.Second): - t.Fatalf("timed out waiting for keepalive") - } -} - -func TestLeaseKeepAliveLoopExit(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx := context.Background() - cli := clus.Client(0) - clus.TakeClient(0) - - resp, err := cli.Grant(ctx, 5) - if err != nil { - t.Fatal(err) - } - cli.Close() - - _, err = cli.KeepAlive(ctx, resp.ID) - if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok { - t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err) - } -} - -// TestV3LeaseFailureOverlap issues Grant and KeepAlive requests to a cluster -// before, during, and after quorum loss to confirm Grant/KeepAlive tolerates -// transient cluster failure. -func TestV3LeaseFailureOverlap(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) - defer clus.Terminate(t) - - numReqs := 5 - cli := clus.Client(0) - - // bring up a session, tear it down - updown := func(i int) error { - sess, err := concurrency.NewSession(cli) - if err != nil { - return err - } - ch := make(chan struct{}) - go func() { - defer close(ch) - sess.Close() - }() - select { - case <-ch: - case <-time.After(time.Minute / 4): - t.Fatalf("timeout %d", i) - } - return nil - } - - var wg sync.WaitGroup - mkReqs := func(n int) { - wg.Add(numReqs) - for i := 0; i < numReqs; i++ { - go func() { - defer wg.Done() - err := updown(n) - if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost { - return - } - t.Error(err) - }() - } - } - - mkReqs(1) - clus.Members[1].Stop(t) - mkReqs(2) - time.Sleep(time.Second) - mkReqs(3) - clus.Members[1].Restart(t) - mkReqs(4) - wg.Wait() -} - -// TestLeaseWithRequireLeader checks keep-alive channel close when no leader. -func TestLeaseWithRequireLeader(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) - defer clus.Terminate(t) - - c := clus.Client(0) - lid1, err1 := c.Grant(context.TODO(), 60) - if err1 != nil { - t.Fatal(err1) - } - lid2, err2 := c.Grant(context.TODO(), 60) - if err2 != nil { - t.Fatal(err2) - } - // kaReqLeader close if the leader is lost - kaReqLeader, kerr1 := c.KeepAlive(clientv3.WithRequireLeader(context.TODO()), lid1.ID) - if kerr1 != nil { - t.Fatal(kerr1) - } - // kaWait will wait even if the leader is lost - kaWait, kerr2 := c.KeepAlive(context.TODO(), lid2.ID) - if kerr2 != nil { - t.Fatal(kerr2) - } - - select { - case <-kaReqLeader: - case <-time.After(5 * time.Second): - t.Fatalf("require leader first keep-alive timed out") - } - select { - case <-kaWait: - case <-time.After(5 * time.Second): - t.Fatalf("leader not required first keep-alive timed out") - } - - clus.Members[1].Stop(t) - // kaReqLeader may issue multiple requests while waiting for the first - // response from proxy server; drain any stray keepalive responses - time.Sleep(100 * time.Millisecond) - for { - <-kaReqLeader - if len(kaReqLeader) == 0 { - break - } - } - - select { - case resp, ok := <-kaReqLeader: - if ok { - t.Fatalf("expected closed require leader, got response %+v", resp) - } - case <-time.After(5 * time.Second): - t.Fatal("keepalive with require leader took too long to close") - } - select { - case _, ok := <-kaWait: - if !ok { - t.Fatalf("got closed channel with no require leader, expected non-closed") - } - case <-time.After(10 * time.Millisecond): - // wait some to detect any closes happening soon after kaReqLeader closing - } -} diff --git a/tests/integration/clientv3/lease/leasing_test.go b/tests/integration/clientv3/lease/leasing_test.go deleted file mode 100644 index cc9f7056736..00000000000 --- a/tests/integration/clientv3/lease/leasing_test.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease_test - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/client/v3/leasing" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestLeasingPutGet(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lKV1, closeLKV1, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV1() - - lKV2, closeLKV2, err := leasing.NewKV(clus.Client(1), "foo/") - testutil.AssertNil(t, err) - defer closeLKV2() - - lKV3, closeLKV3, err := leasing.NewKV(clus.Client(2), "foo/") - testutil.AssertNil(t, err) - defer closeLKV3() - - resp, err := lKV1.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Errorf("expected nil, got %q", resp.Kvs[0].Key) - } - - if _, err = lKV1.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } - if resp, err = lKV2.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - if string(resp.Kvs[0].Key) != "abc" { - t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) - } - if string(resp.Kvs[0].Value) != "def" { - t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) - } - - if _, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - if _, err = lKV2.Put(context.TODO(), "abc", "ghi"); err != nil { - t.Fatal(err) - } - - if resp, err = lKV3.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - if string(resp.Kvs[0].Key) != "abc" { - t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) - } - - if string(resp.Kvs[0].Value) != "ghi" { - t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) - } -} - -// TestLeasingInterval checks the leasing KV fetches key intervals. -func TestLeasingInterval(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - keys := []string{"abc/a", "abc/b", "abc/a/a"} - for _, k := range keys { - if _, err = clus.Client(0).Put(context.TODO(), k, "v"); err != nil { - t.Fatal(err) - } - } - - resp, err := lkv.Get(context.TODO(), "abc/", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 3 { - t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) - } - - // load into cache - if _, err = lkv.Get(context.TODO(), "abc/a"); err != nil { - t.Fatal(err) - } - - // get when prefix is also a cached key - if resp, err = lkv.Get(context.TODO(), "abc/a", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 2 { - t.Fatalf("expected keys %+v, got response keys %+v", keys, resp.Kvs) - } -} - -// TestLeasingPutInvalidateNew checks the leasing KV updates its cache on a Put to a new key. -func TestLeasingPutInvalidateNew(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } - - lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - cResp, cerr := clus.Client(0).Get(context.TODO(), "k") - if cerr != nil { - t.Fatal(cerr) - } - if !reflect.DeepEqual(lkvResp, cResp) { - t.Fatalf(`expected %+v, got response %+v`, cResp, lkvResp) - } -} - -// TestLeasingPutInvalidateExisting checks the leasing KV updates its cache on a Put to an existing key. -func TestLeasingPutInvalidateExisting(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - if _, err := clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "k", "v"); err != nil { - t.Fatal(err) - } - - lkvResp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - cResp, cerr := clus.Client(0).Get(context.TODO(), "k") - if cerr != nil { - t.Fatal(cerr) - } - if !reflect.DeepEqual(lkvResp, cResp) { - t.Fatalf(`expected %+v, got response %+v`, cResp, lkvResp) - } -} - -// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased. -func TestLeasingGetNoLeaseTTL(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - lresp, err := clus.Client(0).Grant(context.TODO(), 60) - testutil.AssertNil(t, err) - - _, err = clus.Client(0).Put(context.TODO(), "k", "v", clientv3.WithLease(lresp.ID)) - testutil.AssertNil(t, err) - - gresp, err := lkv.Get(context.TODO(), "k") - testutil.AssertNil(t, err) - assert.Equal(t, len(gresp.Kvs), 1) - - clus.Members[0].Stop(t) - - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - _, err = lkv.Get(ctx, "k") - cancel() - assert.Equal(t, err, ctx.Err()) -} - -// TestLeasingGetSerializable checks the leasing KV can make serialized requests -// when the etcd cluster is partitioned. -func TestLeasingGetSerializable(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } - - clus.Members[1].Stop(t) - - // don't necessarily try to acquire leasing key ownership for new key - resp, err := lkv.Get(context.TODO(), "uncached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Fatalf(`expected no keys, got response %+v`, resp) - } - - clus.Members[0].Stop(t) - - // leasing key ownership should have "cached" locally served - cachedResp, err := lkv.Get(context.TODO(), "cached", clientv3.WithSerializable()) - if err != nil { - t.Fatal(err) - } - if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { - t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) - } -} - -// TestLeasingPrevKey checks the cache respects WithPrevKV on puts. -func TestLeasingPrevKey(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - // acquire leasing key - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - resp, err := lkv.Put(context.TODO(), "k", "def", clientv3.WithPrevKV()) - if err != nil { - t.Fatal(err) - } - if resp.PrevKv == nil || string(resp.PrevKv.Value) != "abc" { - t.Fatalf(`expected PrevKV.Value="abc", got response %+v`, resp) - } -} - -// TestLeasingRevGet checks the cache respects Get by Revision. -func TestLeasingRevGet(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - putResp, err := clus.Client(0).Put(context.TODO(), "k", "abc") - if err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k", "def"); err != nil { - t.Fatal(err) - } - - // check historic revision - getResp, gerr := lkv.Get(context.TODO(), "k", clientv3.WithRev(putResp.Header.Revision)) - if gerr != nil { - t.Fatal(gerr) - } - if len(getResp.Kvs) != 1 || string(getResp.Kvs[0].Value) != "abc" { - t.Fatalf(`expected "k"->"abc" at rev=%d, got response %+v`, putResp.Header.Revision, getResp) - } - // check current revision - getResp, gerr = lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(gerr) - } - if len(getResp.Kvs) != 1 || string(getResp.Kvs[0].Value) != "def" { - t.Fatalf(`expected "k"->"def" at rev=%d, got response %+v`, putResp.Header.Revision, getResp) - } -} - -// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server. -func TestLeasingGetWithOpts(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - // in cache - if _, err = lkv.Get(context.TODO(), "k", clientv3.WithKeysOnly()); err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - - opts := []clientv3.OpOption{ - clientv3.WithKeysOnly(), - clientv3.WithLimit(1), - clientv3.WithMinCreateRev(1), - clientv3.WithMinModRev(1), - clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend), - clientv3.WithSerializable(), - } - for _, opt := range opts { - if _, err := lkv.Get(context.TODO(), "k", opt); err != nil { - t.Fatal(err) - } - } - - var getOpts []clientv3.OpOption - for i := 0; i < len(opts); i++ { - getOpts = append(getOpts, opts[rand.Intn(len(opts))]) - } - getOpts = getOpts[:rand.Intn(len(opts))] - if _, err := lkv.Get(context.TODO(), "k", getOpts...); err != nil { - t.Fatal(err) - } -} - -// TestLeasingConcurrentPut ensures that a get after concurrent puts returns -// the recently put data. -func TestLeasingConcurrentPut(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - // force key into leasing key cache - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - // concurrently put through leasing client - numPuts := 16 - putc := make(chan *clientv3.PutResponse, numPuts) - for i := 0; i < numPuts; i++ { - go func() { - resp, perr := lkv.Put(context.TODO(), "k", "abc") - if perr != nil { - t.Error(perr) - } - putc <- resp - }() - } - // record maximum revision from puts - maxRev := int64(0) - for i := 0; i < numPuts; i++ { - if resp := <-putc; resp.Header.Revision > maxRev { - maxRev = resp.Header.Revision - } - } - - // confirm Get gives most recently put revisions - getResp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } - if mr := getResp.Kvs[0].ModRevision; mr != maxRev { - t.Errorf("expected ModRevision %d, got %d", maxRev, mr) - } - if ver := getResp.Kvs[0].Version; ver != int64(numPuts) { - t.Errorf("expected Version %d, got %d", numPuts, ver) - } -} - -func TestLeasingDisconnectedGet(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "cached", "abc"); err != nil { - t.Fatal(err) - } - // get key so it's cached - if _, err = lkv.Get(context.TODO(), "cached"); err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - - // leasing key ownership should have "cached" locally served - cachedResp, err := lkv.Get(context.TODO(), "cached") - if err != nil { - t.Fatal(err) - } - if len(cachedResp.Kvs) != 1 || string(cachedResp.Kvs[0].Value) != "abc" { - t.Fatalf(`expected "cached"->"abc", got response %+v`, cachedResp) - } -} - -func TestLeasingDeleteOwner(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - - // get+own / delete / get - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - - if len(resp.Kvs) != 0 { - t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) - } - // try to double delete - if _, err = lkv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } -} - -func TestLeasingDeleteNonOwner(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV1() - - lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV2() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - // acquire ownership - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - // delete via non-owner - if _, err = lkv2.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - // key should be removed from lkv1 - resp, err := lkv1.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Fatalf(`expected "k" to be deleted, got response %+v`, resp) - } -} - -func TestLeasingOverwriteResponse(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - - resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - - resp.Kvs[0].Key[0] = 'z' - resp.Kvs[0].Value[0] = 'z' - - resp, err = lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - - if string(resp.Kvs[0].Key) != "k" { - t.Errorf(`expected key "k", got %q`, string(resp.Kvs[0].Key)) - } - if string(resp.Kvs[0].Value) != "abc" { - t.Errorf(`expected value "abc", got %q`, string(resp.Kvs[0].Value)) - } -} - -func TestLeasingOwnerPutResponse(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - _, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(gerr) - } - presp, err := lkv.Put(context.TODO(), "k", "def") - if err != nil { - t.Fatal(err) - } - if presp == nil { - t.Fatal("expected put response, got nil") - } - - clus.Members[0].Stop(t) - - gresp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(gerr) - } - if gresp.Kvs[0].ModRevision != presp.Header.Revision { - t.Errorf("expected mod revision %d, got %d", presp.Header.Revision, gresp.Kvs[0].ModRevision) - } - if gresp.Kvs[0].Version != 2 { - t.Errorf("expected version 2, got version %d", gresp.Kvs[0].Version) - } -} - -func TestLeasingTxnOwnerGetRange(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - keyCount := rand.Intn(10) + 1 - for i := 0; i < keyCount; i++ { - k := fmt.Sprintf("k-%d", i) - if _, err := clus.Client(0).Put(context.TODO(), k, k+k); err != nil { - t.Fatal(err) - } - } - if _, err := lkv.Get(context.TODO(), "k-"); err != nil { - t.Fatal(err) - } - - tresp, terr := lkv.Txn(context.TODO()).Then(clientv3.OpGet("k-", clientv3.WithPrefix())).Commit() - if terr != nil { - t.Fatal(terr) - } - if resp := tresp.Responses[0].GetResponseRange(); len(resp.Kvs) != keyCount { - t.Fatalf("expected %d keys, got response %+v", keyCount, resp.Kvs) - } -} - -func TestLeasingTxnOwnerGet(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - client := clus.Client(0) - - lkv, closeLKV, err := leasing.NewKV(client, "pfx/") - testutil.AssertNil(t, err) - - defer func() { - // In '--tags cluster_proxy' mode the client need to be closed before - // closeLKV(). This interrupts all outstanding watches. Closing by closeLKV() - // is not sufficient as (unfortunately) context close does not interrupts Watches. - // See ./clientv3/watch.go: - // >> Currently, client contexts are overwritten with "valCtx" that never closes. << - clus.TakeClient(0) // avoid double Close() of the client. - client.Close() - closeLKV() - }() - - // TODO: Randomization in tests is a bad practice (except explicitly exploratory). - keyCount := rand.Intn(10) + 1 - var ops []clientv3.Op - presps := make([]*clientv3.PutResponse, keyCount) - for i := range presps { - k := fmt.Sprintf("k-%d", i) - presp, err := client.Put(context.TODO(), k, k+k) - if err != nil { - t.Fatal(err) - } - presps[i] = presp - - if _, err = lkv.Get(context.TODO(), k); err != nil { - t.Fatal(err) - } - ops = append(ops, clientv3.OpGet(k)) - } - - // TODO: Randomization in unit tests is a bad practice (except explicitly exploratory). - ops = ops[:rand.Intn(len(ops))] - - // served through cache - clus.Members[0].Stop(t) - - var thenOps, elseOps []clientv3.Op - cmps, useThen := randCmps("k-", presps) - - if useThen { - thenOps = ops - elseOps = []clientv3.Op{clientv3.OpPut("k", "1")} - } else { - thenOps = []clientv3.Op{clientv3.OpPut("k", "1")} - elseOps = ops - } - - tresp, terr := lkv.Txn(context.TODO()). - If(cmps...). - Then(thenOps...). - Else(elseOps...).Commit() - - if terr != nil { - t.Fatal(terr) - } - if tresp.Succeeded != useThen { - t.Fatalf("expected succeeded=%v, got tresp=%+v", useThen, tresp) - } - if len(tresp.Responses) != len(ops) { - t.Fatalf("expected %d responses, got %d", len(ops), len(tresp.Responses)) - } - wrev := presps[len(presps)-1].Header.Revision - if tresp.Header.Revision < wrev { - t.Fatalf("expected header revision >= %d, got %d", wrev, tresp.Header.Revision) - } - for i := range ops { - k := fmt.Sprintf("k-%d", i) - rr := tresp.Responses[i].GetResponseRange() - if rr == nil { - t.Errorf("expected get response, got %+v", tresp.Responses[i]) - } - if string(rr.Kvs[0].Key) != k || string(rr.Kvs[0].Value) != k+k { - t.Errorf(`expected key for %q, got %+v`, k, rr.Kvs) - } - } -} - -func TestLeasingTxnOwnerDeleteRange(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - keyCount := rand.Intn(10) + 1 - for i := 0; i < keyCount; i++ { - k := fmt.Sprintf("k-%d", i) - if _, perr := clus.Client(0).Put(context.TODO(), k, k+k); perr != nil { - t.Fatal(perr) - } - } - - // cache in lkv - resp, err := lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != keyCount { - t.Fatalf("expected %d keys, got %d", keyCount, len(resp.Kvs)) - } - - if _, terr := lkv.Txn(context.TODO()).Then(clientv3.OpDelete("k-", clientv3.WithPrefix())).Commit(); terr != nil { - t.Fatal(terr) - } - - resp, err = lkv.Get(context.TODO(), "k-", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Fatalf("expected no keys, got %d", len(resp.Kvs)) - } -} - -func TestLeasingTxnOwnerDelete(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - - // cache in lkv - if _, gerr := lkv.Get(context.TODO(), "k"); gerr != nil { - t.Fatal(gerr) - } - - if _, terr := lkv.Txn(context.TODO()).Then(clientv3.OpDelete("k")).Commit(); terr != nil { - t.Fatal(terr) - } - - resp, err := lkv.Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Fatalf("expected no keys, got %d", len(resp.Kvs)) - } -} - -func TestLeasingTxnOwnerIf(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - // served through cache - clus.Members[0].Stop(t) - - tests := []struct { - cmps []clientv3.Cmp - wSucceeded bool - wResponses int - }{ - // success - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Value("k"), "=", "abc")}, - wSucceeded: true, - wResponses: 1, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.CreateRevision("k"), "=", 2)}, - wSucceeded: true, - wResponses: 1, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision("k"), "=", 2)}, - wSucceeded: true, - wResponses: 1, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Version("k"), "=", 1)}, - wSucceeded: true, - wResponses: 1, - }, - // failure - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Value("k"), ">", "abc")}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.CreateRevision("k"), ">", 2)}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision("k"), "=", 2)}, - wSucceeded: true, - wResponses: 1, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Version("k"), ">", 1)}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Value("k"), "<", "abc")}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.CreateRevision("k"), "<", 2)}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision("k"), "<", 2)}, - }, - { - cmps: []clientv3.Cmp{clientv3.Compare(clientv3.Version("k"), "<", 1)}, - }, - { - cmps: []clientv3.Cmp{ - clientv3.Compare(clientv3.Version("k"), "=", 1), - clientv3.Compare(clientv3.Version("k"), "<", 1), - }, - }, - } - - for i, tt := range tests { - tresp, terr := lkv.Txn(context.TODO()).If(tt.cmps...).Then(clientv3.OpGet("k")).Commit() - if terr != nil { - t.Fatal(terr) - } - if tresp.Succeeded != tt.wSucceeded { - t.Errorf("#%d: expected succeeded %v, got %v", i, tt.wSucceeded, tresp.Succeeded) - } - if len(tresp.Responses) != tt.wResponses { - t.Errorf("#%d: expected %d responses, got %d", i, tt.wResponses, len(tresp.Responses)) - } - } -} - -func TestLeasingTxnCancel(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV1() - - lkv2, closeLKV2, err := leasing.NewKV(clus.Client(1), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV2() - - // acquire lease but disconnect so no revoke in time - if _, err = lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - clus.Members[0].Stop(t) - - // wait for leader election, if any - if _, err = clus.Client(1).Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithCancel(context.TODO()) - go func() { - time.Sleep(100 * time.Millisecond) - cancel() - }() - if _, err := lkv2.Txn(ctx).Then(clientv3.OpPut("k", "v")).Commit(); err != context.Canceled { - t.Fatalf("expected %v, got %v", context.Canceled, err) - } -} - -func TestLeasingTxnNonOwnerPut(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV2() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k2", "123"); err != nil { - t.Fatal(err) - } - // cache in lkv - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k2"); err != nil { - t.Fatal(err) - } - // invalidate via lkv2 txn - opArray := make([]clientv3.Op, 0) - opArray = append(opArray, clientv3.OpPut("k2", "456")) - tresp, terr := lkv2.Txn(context.TODO()).Then( - clientv3.OpTxn(nil, opArray, nil), - clientv3.OpPut("k", "def"), - clientv3.OpPut("k3", "999"), // + a key not in any cache - ).Commit() - if terr != nil { - t.Fatal(terr) - } - if !tresp.Succeeded || len(tresp.Responses) != 3 { - t.Fatalf("expected txn success, got %+v", tresp) - } - // check cache was invalidated - gresp, gerr := lkv.Get(context.TODO(), "k") - if gerr != nil { - t.Fatal(err) - } - if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Value) != "def" { - t.Errorf(`expected value "def", got %+v`, gresp) - } - gresp, gerr = lkv.Get(context.TODO(), "k2") - if gerr != nil { - t.Fatal(gerr) - } - if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Value) != "456" { - t.Errorf(`expected value "def", got %+v`, gresp) - } - - // check puts were applied and are all in the same revision - w := clus.Client(0).Watch( - clus.Client(0).Ctx(), - "k", - clientv3.WithRev(tresp.Header.Revision), - clientv3.WithPrefix()) - wresp := <-w - c := 0 - var evs []clientv3.Event - for _, ev := range wresp.Events { - evs = append(evs, *ev) - if ev.Kv.ModRevision == tresp.Header.Revision { - c++ - } - } - if c != 3 { - t.Fatalf("expected 3 put events, got %+v", evs) - } -} - -// TestLeasingTxnRandIfThenOrElse randomly leases keys two separate clients, then -// issues a random If/{Then,Else} transaction on those keys to one client. -func TestLeasingTxnRandIfThenOrElse(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err1) - defer closeLKV1() - - lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err2) - defer closeLKV2() - - keyCount := 16 - dat := make([]*clientv3.PutResponse, keyCount) - for i := 0; i < keyCount; i++ { - k, v := fmt.Sprintf("k-%d", i), fmt.Sprintf("%d", i) - dat[i], err1 = clus.Client(0).Put(context.TODO(), k, v) - if err1 != nil { - t.Fatal(err1) - } - } - - // nondeterministically populate leasing caches - var wg sync.WaitGroup - getc := make(chan struct{}, keyCount) - getRandom := func(kv clientv3.KV) { - defer wg.Done() - for i := 0; i < keyCount/2; i++ { - k := fmt.Sprintf("k-%d", rand.Intn(keyCount)) - if _, err := kv.Get(context.TODO(), k); err != nil { - t.Error(err) - } - getc <- struct{}{} - } - } - wg.Add(2) - defer wg.Wait() - go getRandom(lkv1) - go getRandom(lkv2) - - // random list of comparisons, all true - cmps, useThen := randCmps("k-", dat) - // random list of puts/gets; unique keys - var ops []clientv3.Op - usedIdx := make(map[int]struct{}) - for i := 0; i < keyCount; i++ { - idx := rand.Intn(keyCount) - if _, ok := usedIdx[idx]; ok { - continue - } - usedIdx[idx] = struct{}{} - k := fmt.Sprintf("k-%d", idx) - switch rand.Intn(2) { - case 0: - ops = append(ops, clientv3.OpGet(k)) - case 1: - ops = append(ops, clientv3.OpPut(k, "a")) - // TODO: add delete - } - } - // random lengths - ops = ops[:rand.Intn(len(ops))] - - // wait for some gets to populate the leasing caches before committing - for i := 0; i < keyCount/2; i++ { - <-getc - } - - // randomly choose between then and else blocks - var thenOps, elseOps []clientv3.Op - if useThen { - thenOps = ops - } else { - // force failure - elseOps = ops - } - - tresp, terr := lkv1.Txn(context.TODO()).If(cmps...).Then(thenOps...).Else(elseOps...).Commit() - if terr != nil { - t.Fatal(terr) - } - // cmps always succeed - if tresp.Succeeded != useThen { - t.Fatalf("expected succeeded=%v, got tresp=%+v", useThen, tresp) - } - // get should match what was put - checkPuts := func(s string, kv clientv3.KV) { - for _, op := range ops { - if !op.IsPut() { - continue - } - resp, rerr := kv.Get(context.TODO(), string(op.KeyBytes())) - if rerr != nil { - t.Fatal(rerr) - } - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "a" { - t.Fatalf(`%s: expected value="a", got %+v`, s, resp.Kvs) - } - } - } - checkPuts("client(0)", clus.Client(0)) - checkPuts("lkv1", lkv1) - checkPuts("lkv2", lkv2) -} - -func TestLeasingOwnerPutError(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - if resp, err := lkv.Put(ctx, "k", "v"); err == nil { - t.Fatalf("expected error, got response %+v", resp) - } -} - -func TestLeasingOwnerDeleteError(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - if resp, err := lkv.Delete(ctx, "k"); err == nil { - t.Fatalf("expected error, got response %+v", resp) - } -} - -func TestLeasingNonOwnerPutError(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - - clus.Members[0].Stop(t) - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - if resp, err := lkv.Put(ctx, "k", "v"); err == nil { - t.Fatalf("expected error, got response %+v", resp) - } -} - -func TestLeasingOwnerDeletePrefix(t *testing.T) { - testLeasingOwnerDelete(t, clientv3.OpDelete("key/", clientv3.WithPrefix())) -} - -func TestLeasingOwnerDeleteFrom(t *testing.T) { - testLeasingOwnerDelete(t, clientv3.OpDelete("kd", clientv3.WithFromKey())) -} - -func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "0/") - testutil.AssertNil(t, err) - defer closeLKV() - - for i := 0; i < 8; i++ { - if _, err = clus.Client(0).Put(context.TODO(), fmt.Sprintf("key/%d", i), "123"); err != nil { - t.Fatal(err) - } - } - - if _, err = lkv.Get(context.TODO(), "key/1"); err != nil { - t.Fatal(err) - } - - opResp, delErr := lkv.Do(context.TODO(), del) - if delErr != nil { - t.Fatal(delErr) - } - delResp := opResp.Del() - - // confirm keys are invalidated from cache and deleted on etcd - for i := 0; i < 8; i++ { - resp, err := lkv.Get(context.TODO(), fmt.Sprintf("key/%d", i)) - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 0 { - t.Fatalf("expected no keys on key/%d, got %+v", i, resp) - } - } - - // confirm keys were deleted atomically - - w := clus.Client(0).Watch( - clus.Client(0).Ctx(), - "key/", - clientv3.WithRev(delResp.Header.Revision), - clientv3.WithPrefix()) - - if wresp := <-w; len(wresp.Events) != 8 { - t.Fatalf("expected %d delete events,got %d", 8, len(wresp.Events)) - } -} - -func TestLeasingDeleteRangeBounds(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") - testutil.AssertNil(t, err) - defer closeDelKV() - - getkv, closeGetKv, err := leasing.NewKV(clus.Client(0), "0/") - testutil.AssertNil(t, err) - defer closeGetKv() - - for _, k := range []string{"j", "m"} { - if _, err = clus.Client(0).Put(context.TODO(), k, "123"); err != nil { - t.Fatal(err) - } - if _, err = getkv.Get(context.TODO(), k); err != nil { - t.Fatal(err) - } - } - - if _, err = delkv.Delete(context.TODO(), "k", clientv3.WithPrefix()); err != nil { - t.Fatal(err) - } - - // leases still on server? - for _, k := range []string{"j", "m"} { - resp, geterr := clus.Client(0).Get(context.TODO(), "0/"+k, clientv3.WithPrefix()) - if geterr != nil { - t.Fatal(geterr) - } - if len(resp.Kvs) != 1 { - t.Fatalf("expected leasing key, got %+v", resp) - } - } - - // j and m should still have leases registered since not under k* - clus.Members[0].Stop(t) - - if _, err = getkv.Get(context.TODO(), "j"); err != nil { - t.Fatal(err) - } - if _, err = getkv.Get(context.TODO(), "m"); err != nil { - t.Fatal(err) - } -} - -func TestLeasingDeleteRangeContendTxn(t *testing.T) { - then := []clientv3.Op{clientv3.OpDelete("key/", clientv3.WithPrefix())} - testLeasingDeleteRangeContend(t, clientv3.OpTxn(nil, then, nil)) -} - -func TestLeaseDeleteRangeContendDel(t *testing.T) { - op := clientv3.OpDelete("key/", clientv3.WithPrefix()) - testLeasingDeleteRangeContend(t, op) -} - -func testLeasingDeleteRangeContend(t *testing.T, op clientv3.Op) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/") - testutil.AssertNil(t, err) - defer closeDelKV() - - putkv, closePutKV, err := leasing.NewKV(clus.Client(0), "0/") - testutil.AssertNil(t, err) - defer closePutKV() - - for i := 0; i < 8; i++ { - key := fmt.Sprintf("key/%d", i) - if _, err = clus.Client(0).Put(context.TODO(), key, "123"); err != nil { - t.Fatal(err) - } - if _, err = putkv.Get(context.TODO(), key); err != nil { - t.Fatal(err) - } - } - - ctx, cancel := context.WithCancel(context.TODO()) - donec := make(chan struct{}) - go func() { - defer close(donec) - for i := 0; ctx.Err() == nil; i++ { - key := fmt.Sprintf("key/%d", i%8) - putkv.Put(ctx, key, "123") - putkv.Get(ctx, key) - } - }() - - _, delErr := delkv.Do(context.TODO(), op) - cancel() - <-donec - if delErr != nil { - t.Fatal(delErr) - } - - // confirm keys on non-deleter match etcd - for i := 0; i < 8; i++ { - key := fmt.Sprintf("key/%d", i) - resp, err := putkv.Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } - servResp, err := clus.Client(0).Get(context.TODO(), key) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(resp.Kvs, servResp.Kvs) { - t.Errorf("#%d: expected %+v, got %+v", i, servResp.Kvs, resp.Kvs) - } - } -} - -func TestLeasingPutGetDeleteConcurrent(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkvs := make([]clientv3.KV, 16) - for i := range lkvs { - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") - testutil.AssertNil(t, err) - defer closeLKV() - lkvs[i] = lkv - } - - getdel := func(kv clientv3.KV) { - if _, err := kv.Put(context.TODO(), "k", "abc"); err != nil { - t.Fatal(err) - } - time.Sleep(time.Millisecond) - if _, err := kv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err := kv.Delete(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - time.Sleep(2 * time.Millisecond) - } - - var wg sync.WaitGroup - wg.Add(16) - for i := 0; i < 16; i++ { - go func() { - defer wg.Done() - for _, kv := range lkvs { - getdel(kv) - } - }() - } - wg.Wait() - - resp, err := lkvs[0].Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - - if len(resp.Kvs) > 0 { - t.Fatalf("expected no kvs, got %+v", resp.Kvs) - } - - resp, err = clus.Client(0).Get(context.TODO(), "k") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) > 0 { - t.Fatalf("expected no kvs, got %+v", resp.Kvs) - } -} - -// TestLeasingReconnectOwnerRevoke checks that revocation works if -// disconnected when trying to submit revoke txn. -func TestLeasingReconnectOwnerRevoke(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err1) - defer closeLKV1() - - lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(1), "foo/") - testutil.AssertNil(t, err2) - defer closeLKV2() - - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - // force leader away from member 0 - clus.Members[0].Stop(t) - clus.WaitLeader(t) - clus.Members[0].Restart(t) - - cctx, cancel := context.WithCancel(context.TODO()) - sdonec, pdonec := make(chan struct{}), make(chan struct{}) - // make lkv1 connection choppy so Txn fails - go func() { - defer close(sdonec) - for i := 0; i < 3 && cctx.Err() == nil; i++ { - clus.Members[0].Stop(t) - time.Sleep(10 * time.Millisecond) - clus.Members[0].Restart(t) - } - }() - go func() { - defer close(pdonec) - if _, err := lkv2.Put(cctx, "k", "v"); err != nil { - t.Log(err) - } - // blocks until lkv1 connection comes back - resp, err := lkv1.Get(cctx, "k") - if err != nil { - t.Error(err) - } - if string(resp.Kvs[0].Value) != "v" { - t.Errorf(`expected "v" value, got %+v`, resp) - } - }() - select { - case <-pdonec: - cancel() - <-sdonec - case <-time.After(15 * time.Second): - cancel() - <-sdonec - <-pdonec - t.Fatal("took too long to revoke and put") - } -} - -// TestLeasingReconnectOwnerRevokeCompact checks that revocation works if -// disconnected and the watch is compacted. -func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err1) - defer closeLKV1() - - lkv2, closeLKV2, err2 := leasing.NewKV(clus.Client(1), "foo/") - testutil.AssertNil(t, err2) - defer closeLKV2() - - if _, err := lkv1.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - clus.WaitLeader(t) - - // put some more revisions for compaction - _, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } - presp, err := clus.Client(1).Put(context.TODO(), "a", "123") - if err != nil { - t.Fatal(err) - } - // compact while lkv1 is disconnected - rev := presp.Header.Revision - if _, err = clus.Client(1).Compact(context.TODO(), rev); err != nil { - t.Fatal(err) - } - - clus.Members[0].Restart(t) - - cctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer cancel() - if _, err = lkv2.Put(cctx, "k", "v"); err != nil { - t.Fatal(err) - } - resp, err := lkv1.Get(cctx, "k") - if err != nil { - t.Fatal(err) - } - if string(resp.Kvs[0].Value) != "v" { - t.Fatalf(`expected "v" value, got %+v`, resp) - } -} - -// TestLeasingReconnectOwnerConsistency checks a write error on an owner will -// not cause inconsistency between the server and the client. -func TestLeasingReconnectOwnerConsistency(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - defer closeLKV() - testutil.AssertNil(t, err) - - if _, err = lkv.Put(context.TODO(), "k", "x"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Put(context.TODO(), "kk", "y"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - v := fmt.Sprintf("%d", i) - donec := make(chan struct{}) - clus.Members[0].Bridge().DropConnections() - go func() { - defer close(donec) - for i := 0; i < 20; i++ { - clus.Members[0].Bridge().DropConnections() - time.Sleep(time.Millisecond) - } - }() - switch rand.Intn(7) { - case 0: - _, err = lkv.Put(context.TODO(), "k", v) - case 1: - _, err = lkv.Delete(context.TODO(), "k") - case 2: - txn := lkv.Txn(context.TODO()).Then( - clientv3.OpGet("k"), - clientv3.OpDelete("k"), - ) - _, err = txn.Commit() - case 3: - txn := lkv.Txn(context.TODO()).Then( - clientv3.OpGet("k"), - clientv3.OpPut("k", v), - ) - _, err = txn.Commit() - case 4: - _, err = lkv.Do(context.TODO(), clientv3.OpPut("k", v)) - case 5: - _, err = lkv.Do(context.TODO(), clientv3.OpDelete("k")) - case 6: - _, err = lkv.Delete(context.TODO(), "k", clientv3.WithPrefix()) - } - <-donec - if err != nil { - // TODO wrap input client to generate errors - break - } - } - - lresp, lerr := lkv.Get(context.TODO(), "k") - if lerr != nil { - t.Fatal(lerr) - } - cresp, cerr := clus.Client(0).Get(context.TODO(), "k") - if cerr != nil { - t.Fatal(cerr) - } - if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) { - t.Fatalf("expected %+v, got %+v", cresp, lresp) - } -} - -func TestLeasingTxnAtomicCache(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - puts, gets := make([]clientv3.Op, 16), make([]clientv3.Op, 16) - for i := range puts { - k := fmt.Sprintf("k-%d", i) - puts[i], gets[i] = clientv3.OpPut(k, k), clientv3.OpGet(k) - } - if _, err = clus.Client(0).Txn(context.TODO()).Then(puts...).Commit(); err != nil { - t.Fatal(err) - } - for i := range gets { - if _, err = lkv.Do(context.TODO(), gets[i]); err != nil { - t.Fatal(err) - } - } - - numPutters, numGetters := 16, 16 - - var wgPutters, wgGetters sync.WaitGroup - wgPutters.Add(numPutters) - wgGetters.Add(numGetters) - txnerrCh := make(chan error, 1) - - f := func() { - defer wgPutters.Done() - for i := 0; i < 10; i++ { - if _, txnerr := lkv.Txn(context.TODO()).Then(puts...).Commit(); txnerr != nil { - select { - case txnerrCh <- txnerr: - default: - } - } - } - } - - donec := make(chan struct{}, numPutters) - g := func() { - defer wgGetters.Done() - for { - select { - case <-donec: - return - default: - } - tresp, err := lkv.Txn(context.TODO()).Then(gets...).Commit() - if err != nil { - t.Error(err) - } - revs := make([]int64, len(gets)) - for i, resp := range tresp.Responses { - rr := resp.GetResponseRange() - revs[i] = rr.Kvs[0].ModRevision - } - for i := 1; i < len(revs); i++ { - if revs[i] != revs[i-1] { - t.Errorf("expected matching revisions, got %+v", revs) - } - } - } - } - - for i := 0; i < numGetters; i++ { - go g() - } - for i := 0; i < numPutters; i++ { - go f() - } - - wgPutters.Wait() - select { - case txnerr := <-txnerrCh: - t.Fatal(txnerr) - default: - } - close(donec) - wgGetters.Wait() -} - -// TestLeasingReconnectTxn checks that Txn is resilient to disconnects. -func TestLeasingReconnectTxn(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - defer close(donec) - clus.Members[0].Bridge().DropConnections() - for i := 0; i < 10; i++ { - clus.Members[0].Bridge().DropConnections() - time.Sleep(time.Millisecond) - } - time.Sleep(10 * time.Millisecond) - }() - - _, lerr := lkv.Txn(context.TODO()). - If(clientv3.Compare(clientv3.Version("k"), "=", 0)). - Then(clientv3.OpGet("k")). - Commit() - <-donec - if lerr != nil { - t.Fatal(lerr) - } -} - -// TestLeasingReconnectNonOwnerGet checks a get error on an owner will -// not cause inconsistency between the server and the client. -func TestLeasingReconnectNonOwnerGet(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - // populate a few keys so some leasing gets have keys - for i := 0; i < 4; i++ { - k := fmt.Sprintf("k-%d", i*2) - if _, err = lkv.Put(context.TODO(), k, k[2:]); err != nil { - t.Fatal(err) - } - } - - n := 0 - for i := 0; i < 10; i++ { - donec := make(chan struct{}) - clus.Members[0].Bridge().DropConnections() - go func() { - defer close(donec) - for j := 0; j < 10; j++ { - clus.Members[0].Bridge().DropConnections() - time.Sleep(time.Millisecond) - } - }() - _, err = lkv.Get(context.TODO(), fmt.Sprintf("k-%d", i)) - <-donec - n++ - if err != nil { - break - } - } - for i := 0; i < n; i++ { - k := fmt.Sprintf("k-%d", i) - lresp, lerr := lkv.Get(context.TODO(), k) - if lerr != nil { - t.Fatal(lerr) - } - cresp, cerr := clus.Client(0).Get(context.TODO(), k) - if cerr != nil { - t.Fatal(cerr) - } - if !reflect.DeepEqual(lresp.Kvs, cresp.Kvs) { - t.Fatalf("expected %+v, got %+v", cresp, lresp) - } - } -} - -func TestLeasingTxnRangeCmp(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = clus.Client(0).Put(context.TODO(), "k", "a"); err != nil { - t.Fatal(err) - } - // k2 version = 2 - if _, err = clus.Client(0).Put(context.TODO(), "k2", "a"); err != nil { - t.Fatal(err) - } - if _, err = clus.Client(0).Put(context.TODO(), "k2", "a"); err != nil { - t.Fatal(err) - } - - // cache k - if _, err = lkv.Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - - cmp := clientv3.Compare(clientv3.Version("k").WithPrefix(), "=", 1) - tresp, terr := lkv.Txn(context.TODO()).If(cmp).Commit() - if terr != nil { - t.Fatal(terr) - } - if tresp.Succeeded { - t.Fatalf("expected Succeeded=false, got %+v", tresp) - } -} - -func TestLeasingDo(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - ops := []clientv3.Op{ - clientv3.OpTxn(nil, nil, nil), - clientv3.OpGet("a"), - clientv3.OpPut("a/abc", "v"), - clientv3.OpDelete("a", clientv3.WithPrefix()), - clientv3.OpTxn(nil, nil, nil), - } - for i, op := range ops { - resp, resperr := lkv.Do(context.TODO(), op) - if resperr != nil { - t.Errorf("#%d: failed (%v)", i, resperr) - } - switch { - case op.IsGet() && resp.Get() == nil: - t.Errorf("#%d: get but nil get response", i) - case op.IsPut() && resp.Put() == nil: - t.Errorf("#%d: put op but nil get response", i) - case op.IsDelete() && resp.Del() == nil: - t.Errorf("#%d: delete op but nil delete response", i) - case op.IsTxn() && resp.Txn() == nil: - t.Errorf("#%d: txn op but nil txn response", i) - } - } - - gresp, err := clus.Client(0).Get(context.TODO(), "a", clientv3.WithPrefix()) - if err != nil { - t.Fatal(err) - } - if len(gresp.Kvs) != 0 { - t.Fatalf("expected no keys, got %+v", gresp.Kvs) - } -} - -func TestLeasingTxnOwnerPutBranch(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV() - - n := 0 - treeOp := makePutTreeOp("tree", &n, 4) - for i := 0; i < n; i++ { - k := fmt.Sprintf("tree/%d", i) - if _, err = clus.Client(0).Put(context.TODO(), k, "a"); err != nil { - t.Fatal(err) - } - if _, err = lkv.Get(context.TODO(), k); err != nil { - t.Fatal(err) - } - } - - if _, err = lkv.Do(context.TODO(), treeOp); err != nil { - t.Fatal(err) - } - - // lkv shouldn't need to call out to server for updated leased keys - clus.Members[0].Stop(t) - - for i := 0; i < n; i++ { - k := fmt.Sprintf("tree/%d", i) - lkvResp, err := lkv.Get(context.TODO(), k) - if err != nil { - t.Fatal(err) - } - clusResp, err := clus.Client(1).Get(context.TODO(), k) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(clusResp.Kvs, lkvResp.Kvs) { - t.Fatalf("expected %+v, got %+v", clusResp.Kvs, lkvResp.Kvs) - } - } -} - -func makePutTreeOp(pfx string, v *int, depth int) clientv3.Op { - key := fmt.Sprintf("%s/%d", pfx, *v) - *v = *v + 1 - if depth == 0 { - return clientv3.OpPut(key, "leaf") - } - - t, e := makePutTreeOp(pfx, v, depth-1), makePutTreeOp(pfx, v, depth-1) - tPut, ePut := clientv3.OpPut(key, "then"), clientv3.OpPut(key, "else") - - cmps := make([]clientv3.Cmp, 1) - if rand.Intn(2) == 0 { - // follow then path - cmps[0] = clientv3.Compare(clientv3.Version("nokey"), "=", 0) - } else { - // follow else path - cmps[0] = clientv3.Compare(clientv3.Version("nokey"), ">", 0) - } - - return clientv3.OpTxn(cmps, []clientv3.Op{t, tPut}, []clientv3.Op{e, ePut}) -} - -func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, then bool) { - for i := 0; i < len(dat); i++ { - idx := rand.Intn(len(dat)) - k := fmt.Sprintf("%s%d", pfx, idx) - rev := dat[idx].Header.Revision - var cmp clientv3.Cmp - switch rand.Intn(4) { - case 0: - cmp = clientv3.Compare(clientv3.CreateRevision(k), ">", rev-1) - case 1: - cmp = clientv3.Compare(clientv3.Version(k), "=", 1) - case 2: - cmp = clientv3.Compare(clientv3.CreateRevision(k), "=", rev) - case 3: - cmp = clientv3.Compare(clientv3.CreateRevision(k), "!=", rev+1) - - } - cmps = append(cmps, cmp) - } - cmps = cmps[:rand.Intn(len(dat))] - if rand.Intn(2) == 0 { - return cmps, true - } - i := rand.Intn(len(dat)) - cmps = append(cmps, clientv3.Compare(clientv3.Version(fmt.Sprintf("k-%d", i)), "=", 0)) - return cmps, false -} - -func TestLeasingSessionExpire(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) - testutil.AssertNil(t, err) - defer closeLKV() - - lkv2, closeLKV2, err := leasing.NewKV(clus.Client(0), "foo/") - testutil.AssertNil(t, err) - defer closeLKV2() - - // acquire lease on abc - if _, err = lkv.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - - // down endpoint lkv uses for keepalives - clus.Members[0].Stop(t) - if err = waitForLeasingExpire(clus.Client(1), "foo/abc"); err != nil { - t.Fatal(err) - } - waitForExpireAck(t, lkv) - clus.Members[0].Restart(t) - integration2.WaitClientV3(t, lkv2) - if _, err = lkv2.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } - - resp, err := lkv.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } - if v := string(resp.Kvs[0].Value); v != "def" { - t.Fatalf("expected %q, got %q", "v", v) - } -} - -func TestLeasingSessionExpireCancel(t *testing.T) { - tests := []func(context.Context, clientv3.KV) error{ - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Get(ctx, "abc") - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Delete(ctx, "abc") - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Put(ctx, "abc", "v") - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Txn(ctx).Then(clientv3.OpGet("abc")).Commit() - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Do(ctx, clientv3.OpPut("abc", "v")) - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Do(ctx, clientv3.OpDelete("abc")) - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - _, err := kv.Do(ctx, clientv3.OpGet("abc")) - return err - }, - func(ctx context.Context, kv clientv3.KV) error { - op := clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpGet("abc")}, nil) - _, err := kv.Do(ctx, op) - return err - }, - } - for i := range tests { - t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1)) - testutil.AssertNil(t, err) - defer closeLKV() - - if _, err = lkv.Get(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - - // down endpoint lkv uses for keepalives - clus.Members[0].Stop(t) - if err := waitForLeasingExpire(clus.Client(1), "foo/abc"); err != nil { - t.Fatal(err) - } - waitForExpireAck(t, lkv) - - ctx, cancel := context.WithCancel(context.TODO()) - errc := make(chan error, 1) - go func() { errc <- tests[i](ctx, lkv) }() - // some delay to get past for ctx.Err() != nil {} loops - time.Sleep(100 * time.Millisecond) - cancel() - - select { - case err := <-errc: - if err != ctx.Err() { - t.Errorf("#%d: expected %v of server unavailable, got %v", i, ctx.Err(), err) - } - case <-time.After(5 * time.Second): - t.Errorf("#%d: timed out waiting for cancel", i) - } - clus.Members[0].Restart(t) - }) - } -} - -func waitForLeasingExpire(kv clientv3.KV, lkey string) error { - for { - time.Sleep(1 * time.Second) - resp, err := kv.Get(context.TODO(), lkey, clientv3.WithPrefix()) - if err != nil { - return err - } - if len(resp.Kvs) == 0 { - // server expired the leasing key - return nil - } - } -} - -func waitForExpireAck(t *testing.T, kv clientv3.KV) { - // wait for leasing client to acknowledge lost lease - for i := 0; i < 10; i++ { - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - _, err := kv.Get(ctx, "abc") - cancel() - if err == ctx.Err() { - return - } else if err != nil { - t.Logf("current error: %v", err) - } - time.Sleep(time.Second) - } - t.Fatalf("waited too long to acknlowedge lease expiration") -} diff --git a/tests/integration/clientv3/lease/main_test.go b/tests/integration/clientv3/lease/main_test.go deleted file mode 100644 index 1057c4ab86e..00000000000 --- a/tests/integration/clientv3/lease/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lease_test - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/tests/integration/clientv3/main_test.go b/tests/integration/clientv3/main_test.go deleted file mode 100644 index 9481beebcb5..00000000000 --- a/tests/integration/clientv3/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package clientv3test - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/tests/integration/clientv3/maintenance_test.go b/tests/integration/clientv3/maintenance_test.go deleted file mode 100644 index d338a1bdca6..00000000000 --- a/tests/integration/clientv3/maintenance_test.go +++ /dev/null @@ -1,437 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "bytes" - "context" - "crypto/sha256" - "fmt" - "io" - "math" - "os" - "path/filepath" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" - "google.golang.org/grpc" -) - -func TestMaintenanceHashKV(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - for i := 0; i < 3; i++ { - if _, err := clus.RandClient().Put(context.Background(), "foo", "bar"); err != nil { - t.Fatal(err) - } - } - - var hv uint32 - for i := 0; i < 3; i++ { - cli := clus.Client(i) - // ensure writes are replicated - if _, err := cli.Get(context.TODO(), "foo"); err != nil { - t.Fatal(err) - } - hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL(), 0) - if err != nil { - t.Fatal(err) - } - if hv == 0 { - hv = hresp.Hash - continue - } - if hv != hresp.Hash { - t.Fatalf("#%d: hash expected %d, got %d", i, hv, hresp.Hash) - } - } -} - -// TestCompactionHash tests compaction hash -// TODO: Change this to fuzz test -func TestCompactionHash(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } - - testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL()}, 1000) -} - -type hashTestCase struct { - *clientv3.Client - url string -} - -func (tc hashTestCase) Put(ctx context.Context, key, value string) error { - _, err := tc.Client.Put(ctx, key, value) - return err -} - -func (tc hashTestCase) Delete(ctx context.Context, key string) error { - _, err := tc.Client.Delete(ctx, key) - return err -} - -func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) { - resp, err := tc.Client.HashKV(ctx, tc.url, rev) - return testutil.KeyValueHash{Hash: resp.Hash, CompactRevision: resp.CompactRevision, Revision: resp.Header.Revision}, err -} - -func (tc hashTestCase) Defrag(ctx context.Context) error { - _, err := tc.Client.Defragment(ctx, tc.url) - return err -} - -func (tc hashTestCase) Compact(ctx context.Context, rev int64) error { - _, err := tc.Client.Compact(ctx, rev) - // Wait for compaction to be compacted - time.Sleep(50 * time.Millisecond) - return err -} - -func TestMaintenanceMoveLeader(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - oldLeadIdx := clus.WaitLeader(t) - targetIdx := (oldLeadIdx + 1) % 3 - target := uint64(clus.Members[targetIdx].ID()) - - cli := clus.Client(targetIdx) - _, err := cli.MoveLeader(context.Background(), target) - if err != rpctypes.ErrNotLeader { - t.Fatalf("error expected %v, got %v", rpctypes.ErrNotLeader, err) - } - - cli = clus.Client(oldLeadIdx) - _, err = cli.MoveLeader(context.Background(), target) - if err != nil { - t.Fatal(err) - } - - leadIdx := clus.WaitLeader(t) - lead := uint64(clus.Members[leadIdx].ID()) - if target != lead { - t.Fatalf("new leader expected %d, got %d", target, lead) - } -} - -// TestMaintenanceSnapshotCancel ensures that context cancel -// before snapshot reading returns corresponding context errors. -func TestMaintenanceSnapshotCancel(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // reading snapshot with canceled context should error out - ctx, cancel := context.WithCancel(context.Background()) - - // Since http2 spec defines the receive windows's size and max size of - // frame in the stream, the underlayer - gRPC client can pre-read data - // from server even if the application layer hasn't read it yet. - // - // And the initialized cluster has 20KiB snapshot, which can be - // pre-read by underlayer. We should increase the snapshot's size here, - // just in case that io.Copy won't return the canceled error. - populateDataIntoCluster(t, clus, 3, 1024*1024) - - rc1, err := clus.RandClient().Snapshot(ctx) - if err != nil { - t.Fatal(err) - } - defer rc1.Close() - - cancel() - _, err = io.Copy(io.Discard, rc1) - if err != context.Canceled { - t.Errorf("expected %v, got %v", context.Canceled, err) - } -} - -// TestMaintenanceSnapshotWithVersionTimeout ensures that SnapshotWithVersion function -// returns corresponding context errors when context timeout happened before snapshot reading -func TestMaintenanceSnapshotWithVersionTimeout(t *testing.T) { - testMaintenanceSnapshotTimeout(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) { - resp, err := client.SnapshotWithVersion(ctx) - if err != nil { - return nil, err - } - return resp.Snapshot, nil - }) -} - -// TestMaintenanceSnapshotTimeout ensures that Snapshot function -// returns corresponding context errors when context timeout happened before snapshot reading -func TestMaintenanceSnapshotTimeout(t *testing.T) { - testMaintenanceSnapshotTimeout(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) { - return client.Snapshot(ctx) - }) -} - -// testMaintenanceSnapshotTimeout given snapshot function ensures that it -// returns corresponding context errors when context timeout happened before snapshot reading -func testMaintenanceSnapshotTimeout(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // reading snapshot with deadline exceeded should error out - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - // Since http2 spec defines the receive windows's size and max size of - // frame in the stream, the underlayer - gRPC client can pre-read data - // from server even if the application layer hasn't read it yet. - // - // And the initialized cluster has 20KiB snapshot, which can be - // pre-read by underlayer. We should increase the snapshot's size here, - // just in case that io.Copy won't return the timeout error. - populateDataIntoCluster(t, clus, 3, 1024*1024) - - rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } - defer rc2.Close() - - time.Sleep(2 * time.Second) - - _, err = io.Copy(io.Discard, rc2) - if err != nil && !IsClientTimeout(err) { - t.Errorf("expected client timeout, got %v", err) - } -} - -// TestMaintenanceSnapshotWithVersionErrorInflight ensures that ReaderCloser returned by SnapshotWithVersion function -// will fail to read with corresponding context errors on inflight context cancel timeout. -func TestMaintenanceSnapshotWithVersionErrorInflight(t *testing.T) { - testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) { - resp, err := client.SnapshotWithVersion(ctx) - if err != nil { - return nil, err - } - return resp.Snapshot, nil - }) -} - -// TestMaintenanceSnapshotErrorInflight ensures that ReaderCloser returned by Snapshot function -// will fail to read with corresponding context errors on inflight context cancel timeout. -func TestMaintenanceSnapshotErrorInflight(t *testing.T) { - testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) { - return client.Snapshot(ctx) - }) -} - -// testMaintenanceSnapshotErrorInflight given snapshot function ensures that ReaderCloser returned by it -// will fail to read with corresponding context errors on inflight context cancel timeout. -func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) { - integration2.BeforeTest(t) - lg := zaptest.NewLogger(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - // take about 1-second to read snapshot - clus.Members[0].Stop(t) - dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db") - b := backend.NewDefaultBackend(lg, dpath) - s := mvcc.NewStore(lg, b, &lease.FakeLessor{}, mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32}) - rev := 100000 - for i := 2; i <= rev; i++ { - s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease) - } - s.Close() - b.Close() - clus.Members[0].Restart(t) - - // reading snapshot with canceled context should error out - ctx, cancel := context.WithCancel(context.Background()) - rc1, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } - defer rc1.Close() - - donec := make(chan struct{}) - go func() { - time.Sleep(300 * time.Millisecond) - cancel() - close(donec) - }() - _, err = io.Copy(io.Discard, rc1) - if err != nil && err != context.Canceled { - t.Errorf("expected %v, got %v", context.Canceled, err) - } - <-donec - - // reading snapshot with deadline exceeded should error out - ctx, cancel = context.WithTimeout(context.Background(), time.Second) - defer cancel() - rc2, err := snapshot(ctx, clus.RandClient()) - if err != nil { - t.Fatal(err) - } - defer rc2.Close() - - // 300ms left and expect timeout while snapshot reading is in progress - time.Sleep(700 * time.Millisecond) - _, err = io.Copy(io.Discard, rc2) - if err != nil && !IsClientTimeout(err) { - t.Errorf("expected client timeout, got %v", err) - } -} - -// TestMaintenanceSnapshotWithVersionVersion ensures that SnapshotWithVersion returns correct version value. -func TestMaintenanceSnapshotWithVersionVersion(t *testing.T) { - integration2.BeforeTest(t) - - // Set SnapshotCount to 1 to force raft snapshot to ensure that storage version is set - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, SnapshotCount: 1}) - defer clus.Terminate(t) - - // Put some keys to ensure that wal snapshot is triggered - for i := 0; i < 10; i++ { - clus.RandClient().Put(context.Background(), fmt.Sprintf("%d", i), "1") - } - - // reading snapshot with canceled context should error out - resp, err := clus.RandClient().SnapshotWithVersion(context.Background()) - if err != nil { - t.Fatal(err) - } - defer resp.Snapshot.Close() - if resp.Version != "3.6.0" { - t.Errorf("unexpected version, expected %q, got %q", version.Version, resp.Version) - } -} - -func TestMaintenanceSnapshotContentDigest(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - populateDataIntoCluster(t, clus, 3, 1024*1024) - - // reading snapshot with canceled context should error out - resp, err := clus.RandClient().SnapshotWithVersion(context.Background()) - require.NoError(t, err) - defer resp.Snapshot.Close() - - tmpDir := t.TempDir() - snapFile, err := os.Create(filepath.Join(tmpDir, t.Name())) - require.NoError(t, err) - defer snapFile.Close() - - snapSize, err := io.Copy(snapFile, resp.Snapshot) - require.NoError(t, err) - - // read the checksum - checksumSize := int64(sha256.Size) - _, err = snapFile.Seek(-checksumSize, io.SeekEnd) - require.NoError(t, err) - - checksumInBytes, err := io.ReadAll(snapFile) - require.NoError(t, err) - require.Equal(t, int(checksumSize), len(checksumInBytes)) - - // remove the checksum part and rehash - err = snapFile.Truncate(snapSize - checksumSize) - require.NoError(t, err) - - _, err = snapFile.Seek(0, io.SeekStart) - require.NoError(t, err) - - hashWriter := sha256.New() - _, err = io.Copy(hashWriter, snapFile) - require.NoError(t, err) - - // compare the checksum - actualChecksum := hashWriter.Sum(nil) - require.Equal(t, checksumInBytes, actualChecksum) -} - -func TestMaintenanceStatus(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - t.Logf("Waiting for leader...") - clus.WaitLeader(t) - t.Logf("Leader established.") - - eps := make([]string, 3) - for i := 0; i < 3; i++ { - eps[i] = clus.Members[i].GRPCURL() - } - - t.Logf("Creating client...") - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - t.Logf("Creating client [DONE]") - - prevID, leaderFound := uint64(0), false - for i := 0; i < 3; i++ { - resp, err := cli.Status(context.TODO(), eps[i]) - if err != nil { - t.Fatal(err) - } - t.Logf("Response from %v: %v", i, resp) - if prevID == 0 { - prevID, leaderFound = resp.Header.MemberId, resp.Header.MemberId == resp.Leader - continue - } - if prevID == resp.Header.MemberId { - t.Errorf("#%d: status returned duplicate member ID with %016x", i, prevID) - } - if leaderFound && resp.Header.MemberId == resp.Leader { - t.Errorf("#%d: leader already found, but found another %016x", i, resp.Header.MemberId) - } - if !leaderFound { - leaderFound = resp.Header.MemberId == resp.Leader - } - } - if !leaderFound { - t.Fatal("no leader found") - } -} diff --git a/tests/integration/clientv3/metrics_test.go b/tests/integration/clientv3/metrics_test.go deleted file mode 100644 index a0231946d71..00000000000 --- a/tests/integration/clientv3/metrics_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "bufio" - "context" - "io" - "net" - "net/http" - "strconv" - "strings" - "testing" - "time" - - grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "google.golang.org/grpc" - - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestV3ClientMetrics(t *testing.T) { - integration2.BeforeTest(t) - - var ( - addr = "localhost:27989" - ln net.Listener - ) - - srv := &http.Server{Handler: promhttp.Handler()} - srv.SetKeepAlivesEnabled(false) - - ln, err := transport.NewUnixListener(addr) - if err != nil { - t.Errorf("Error: %v occurred while listening on addr: %v", err, addr) - } - - donec := make(chan struct{}) - defer func() { - ln.Close() - <-donec - }() - - // listen for all Prometheus metrics - - go func() { - var err error - - defer close(donec) - - err = srv.Serve(ln) - if err != nil && !transport.IsClosedConnError(err) { - t.Errorf("Err serving http requests: %v", err) - } - }() - - url := "unix://" + addr + "/metrics" - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cfg := clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialOptions: []grpc.DialOption{ - grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor), - grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor), - }, - } - cli, cerr := integration2.NewClient(t, cfg) - if cerr != nil { - t.Fatal(cerr) - } - defer cli.Close() - - wc := cli.Watch(context.Background(), "foo") - - wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream") - - pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary") - - _, err = cli.Put(context.Background(), "foo", "bar") - if err != nil { - t.Errorf("Error putting value in key store") - } - - pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary") - if pBefore+1 != pAfter { - t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore) - } - - // consume watch response - select { - case <-wc: - case <-time.After(10 * time.Second): - t.Error("Timeout occurred for getting watch response") - } - - wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream") - if wBefore+1 != wAfter { - t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore) - } -} - -func sumCountersForMetricAndLabels(t *testing.T, url string, metricName string, matchingLabelValues ...string) int { - count := 0 - for _, line := range getHTTPBodyAsLines(t, url) { - ok := true - if !strings.HasPrefix(line, metricName) { - continue - } - - for _, labelValue := range matchingLabelValues { - if !strings.Contains(line, `"`+labelValue+`"`) { - ok = false - break - } - } - - if !ok { - continue - } - - valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1] - valueFloat, err := strconv.ParseFloat(valueString, 32) - if err != nil { - t.Fatalf("failed parsing value for line: %v and matchingLabelValues: %v", line, matchingLabelValues) - } - count += int(valueFloat) - } - return count -} - -func getHTTPBodyAsLines(t *testing.T, url string) []string { - cfgtls := transport.TLSInfo{} - tr, err := transport.NewTransport(cfgtls, time.Second) - if err != nil { - t.Fatalf("Error getting transport: %v", err) - } - - tr.MaxIdleConns = -1 - tr.DisableKeepAlives = true - - cli := &http.Client{Transport: tr} - - resp, err := cli.Get(url) - if err != nil { - t.Fatalf("Error fetching: %v", err) - } - - reader := bufio.NewReader(resp.Body) - var lines []string - for { - line, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } else { - t.Fatalf("error reading: %v", err) - } - } - lines = append(lines, line) - } - resp.Body.Close() - return lines -} diff --git a/tests/integration/clientv3/mirror_auth_test.go b/tests/integration/clientv3/mirror_auth_test.go deleted file mode 100644 index 8dedd4e94d2..00000000000 --- a/tests/integration/clientv3/mirror_auth_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package clientv3test - -import ( - "context" - "reflect" - "testing" - "time" - - "google.golang.org/grpc" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/mirror" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMirrorSync_Authenticated(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - initialClient := clus.Client(0) - - // Create a user to run the mirror process that only has access to /syncpath - initialClient.RoleAdd(context.Background(), "syncer") - initialClient.RoleGrantPermission(context.Background(), "syncer", "/syncpath", clientv3.GetPrefixRangeEnd("/syncpath"), clientv3.PermissionType(clientv3.PermReadWrite)) - initialClient.UserAdd(context.Background(), "syncer", "syncfoo") - initialClient.UserGrantRole(context.Background(), "syncer", "syncer") - - // Seed /syncpath with some initial data - _, err := initialClient.KV.Put(context.TODO(), "/syncpath/foo", "bar") - if err != nil { - t.Fatal(err) - } - - // Require authentication - authSetupRoot(t, initialClient.Auth) - - // Create a client as the `syncer` user. - cfg := clientv3.Config{ - Endpoints: initialClient.Endpoints(), - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - Username: "syncer", - Password: "syncfoo", - } - syncClient, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer syncClient.Close() - - // Now run the sync process, create changes, and get the initial sync state - syncer := mirror.NewSyncer(syncClient, "/syncpath", 0) - gch, ech := syncer.SyncBase(context.TODO()) - wkvs := []*mvccpb.KeyValue{{Key: []byte("/syncpath/foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}} - - for g := range gch { - if !reflect.DeepEqual(g.Kvs, wkvs) { - t.Fatalf("kv = %v, want %v", g.Kvs, wkvs) - } - } - - for e := range ech { - t.Fatalf("unexpected error %v", e) - } - - // Start a continuous sync - wch := syncer.SyncUpdates(context.TODO()) - - // Update state - _, err = syncClient.KV.Put(context.TODO(), "/syncpath/foo", "baz") - if err != nil { - t.Fatal(err) - } - - // Wait for the updated state to sync - select { - case r := <-wch: - wkv := &mvccpb.KeyValue{Key: []byte("/syncpath/foo"), Value: []byte("baz"), CreateRevision: 2, ModRevision: 3, Version: 2} - if !reflect.DeepEqual(r.Events[0].Kv, wkv) { - t.Fatalf("kv = %v, want %v", r.Events[0].Kv, wkv) - } - case <-time.After(time.Second): - t.Fatal("failed to receive update in one second") - } -} diff --git a/tests/integration/clientv3/mirror_test.go b/tests/integration/clientv3/mirror_test.go deleted file mode 100644 index f21551bbdf0..00000000000 --- a/tests/integration/clientv3/mirror_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "fmt" - "reflect" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/v3/mirror" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMirrorSync(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - c := clus.Client(0) - _, err := c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } - - syncer := mirror.NewSyncer(c, "", 0) - gch, ech := syncer.SyncBase(context.TODO()) - wkvs := []*mvccpb.KeyValue{{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}} - - for g := range gch { - if !reflect.DeepEqual(g.Kvs, wkvs) { - t.Fatalf("kv = %v, want %v", g.Kvs, wkvs) - } - } - - for e := range ech { - t.Fatalf("unexpected error %v", e) - } - - wch := syncer.SyncUpdates(context.TODO()) - - _, err = c.KV.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } - - select { - case r := <-wch: - wkv := &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2} - if !reflect.DeepEqual(r.Events[0].Kv, wkv) { - t.Fatalf("kv = %v, want %v", r.Events[0].Kv, wkv) - } - case <-time.After(time.Second): - t.Fatal("failed to receive update in one second") - } -} - -func TestMirrorSyncBase(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer cluster.Terminate(t) - - cli := cluster.Client(0) - ctx := context.TODO() - - keyCh := make(chan string) - var wg sync.WaitGroup - - for i := 0; i < 50; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - for key := range keyCh { - if _, err := cli.Put(ctx, key, "test"); err != nil { - t.Error(err) - } - } - }() - } - - for i := 0; i < 2000; i++ { - keyCh <- fmt.Sprintf("test%d", i) - } - - close(keyCh) - wg.Wait() - - syncer := mirror.NewSyncer(cli, "test", 0) - respCh, errCh := syncer.SyncBase(ctx) - - count := 0 - - for resp := range respCh { - count = count + len(resp.Kvs) - if !resp.More { - break - } - } - - for err := range errCh { - t.Fatalf("unexpected error %v", err) - } - - if count != 2000 { - t.Errorf("unexpected kv count: %d", count) - } -} diff --git a/tests/integration/clientv3/namespace_test.go b/tests/integration/clientv3/namespace_test.go deleted file mode 100644 index 2aad010f987..00000000000 --- a/tests/integration/clientv3/namespace_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "reflect" - "testing" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/namespace" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestNamespacePutGet(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - c := clus.Client(0) - nsKV := namespace.NewKV(c.KV, "foo/") - - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } - resp, err := nsKV.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } - if string(resp.Kvs[0].Key) != "abc" { - t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key) - } - - resp, err = c.Get(context.TODO(), "foo/abc") - if err != nil { - t.Fatal(err) - } - if string(resp.Kvs[0].Value) != "bar" { - t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value) - } -} - -func TestNamespaceWatch(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - c := clus.Client(0) - nsKV := namespace.NewKV(c.KV, "foo/") - nsWatcher := namespace.NewWatcher(c.Watcher, "foo/") - - if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil { - t.Fatal(err) - } - - nsWch := nsWatcher.Watch(context.TODO(), "abc", clientv3.WithRev(1)) - wkv := &mvccpb.KeyValue{Key: []byte("abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1} - if wr := <-nsWch; len(wr.Events) != 1 || !reflect.DeepEqual(wr.Events[0].Kv, wkv) { - t.Errorf("expected namespaced event %+v, got %+v", wkv, wr.Events[0].Kv) - } - - wch := c.Watch(context.TODO(), "foo/abc", clientv3.WithRev(1)) - wkv = &mvccpb.KeyValue{Key: []byte("foo/abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1} - if wr := <-wch; len(wr.Events) != 1 || !reflect.DeepEqual(wr.Events[0].Kv, wkv) { - t.Errorf("expected unnamespaced event %+v, got %+v", wkv, wr) - } - - // let client close teardown namespace watch - c.Watcher = nsWatcher -} diff --git a/tests/integration/clientv3/naming/endpoints_test.go b/tests/integration/clientv3/naming/endpoints_test.go deleted file mode 100644 index 2225a804beb..00000000000 --- a/tests/integration/clientv3/naming/endpoints_test.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package naming_test - -import ( - "context" - "reflect" - "testing" - - etcd "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/naming/endpoints" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestEndpointManager(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - em, err := endpoints.NewManager(clus.RandClient(), "foo") - if err != nil { - t.Fatal("failed to create EndpointManager", err) - } - ctx, watchCancel := context.WithCancel(context.Background()) - defer watchCancel() - w, err := em.NewWatchChannel(ctx) - if err != nil { - t.Fatal("failed to establish watch", err) - } - - e1 := endpoints.Endpoint{Addr: "127.0.0.1", Metadata: "metadata"} - err = em.AddEndpoint(context.TODO(), "foo/a1", e1) - if err != nil { - t.Fatal("failed to add foo", err) - } - - us := <-w - - if us == nil { - t.Fatal("failed to get update") - } - - wu := &endpoints.Update{ - Op: endpoints.Add, - Key: "foo/a1", - Endpoint: e1, - } - - if !reflect.DeepEqual(us[0], wu) { - t.Fatalf("up = %#v, want %#v", us[0], wu) - } - - err = em.DeleteEndpoint(context.TODO(), "foo/a1") - if err != nil { - t.Fatalf("failed to udpate %v", err) - } - - us = <-w - if us == nil { - t.Fatal("failed to get udpate") - } - - wu = &endpoints.Update{ - Op: endpoints.Delete, - Key: "foo/a1", - } - - if !reflect.DeepEqual(us[0], wu) { - t.Fatalf("up = %#v, want %#v", us[1], wu) - } -} - -// TestEndpointManagerAtomicity ensures the resolver will initialize -// correctly with multiple hosts and correctly receive multiple -// updates in a single revision. -func TestEndpointManagerAtomicity(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - c := clus.RandClient() - em, err := endpoints.NewManager(c, "foo") - if err != nil { - t.Fatal("failed to create EndpointManager", err) - } - - err = em.Update(context.TODO(), []*endpoints.UpdateWithOpts{ - endpoints.NewAddUpdateOpts("foo/host", endpoints.Endpoint{Addr: "127.0.0.1:2000"}), - endpoints.NewAddUpdateOpts("foo/host2", endpoints.Endpoint{Addr: "127.0.0.1:2001"})}) - if err != nil { - t.Fatal(err) - } - - ctx, watchCancel := context.WithCancel(context.Background()) - defer watchCancel() - w, err := em.NewWatchChannel(ctx) - if err != nil { - t.Fatal(err) - } - - updates := <-w - if len(updates) != 2 { - t.Fatalf("expected two updates, got %+v", updates) - } - - _, err = c.Txn(context.TODO()).Then(etcd.OpDelete("foo/host"), etcd.OpDelete("foo/host2")).Commit() - if err != nil { - t.Fatal(err) - } - - updates = <-w - if len(updates) != 2 || (updates[0].Op != endpoints.Delete && updates[1].Op != endpoints.Delete) { - t.Fatalf("expected two delete updates, got %+v", updates) - } -} - -func TestEndpointManagerCRUD(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - em, err := endpoints.NewManager(clus.RandClient(), "foo") - if err != nil { - t.Fatal("failed to create EndpointManager", err) - } - - // Add - k1 := "foo/a1" - e1 := endpoints.Endpoint{Addr: "127.0.0.1", Metadata: "metadata1"} - err = em.AddEndpoint(context.TODO(), k1, e1) - if err != nil { - t.Fatal("failed to add", k1, err) - } - - k2 := "foo/a2" - e2 := endpoints.Endpoint{Addr: "127.0.0.2", Metadata: "metadata2"} - err = em.AddEndpoint(context.TODO(), k2, e2) - if err != nil { - t.Fatal("failed to add", k2, err) - } - - eps, err := em.List(context.TODO()) - if err != nil { - t.Fatal("failed to list foo") - } - if len(eps) != 2 { - t.Fatalf("unexpected the number of endpoints: %d", len(eps)) - } - if !reflect.DeepEqual(eps[k1], e1) { - t.Fatalf("unexpected endpoints: %s", k1) - } - if !reflect.DeepEqual(eps[k2], e2) { - t.Fatalf("unexpected endpoints: %s", k2) - } - - // Delete - err = em.DeleteEndpoint(context.TODO(), k1) - if err != nil { - t.Fatal("failed to delete", k2, err) - } - - eps, err = em.List(context.TODO()) - if err != nil { - t.Fatal("failed to list foo") - } - if len(eps) != 1 { - t.Fatalf("unexpected the number of endpoints: %d", len(eps)) - } - if !reflect.DeepEqual(eps[k2], e2) { - t.Fatalf("unexpected endpoints: %s", k2) - } - - // Update - k3 := "foo/a3" - e3 := endpoints.Endpoint{Addr: "127.0.0.3", Metadata: "metadata3"} - updates := []*endpoints.UpdateWithOpts{ - {Update: endpoints.Update{Op: endpoints.Add, Key: k3, Endpoint: e3}}, - {Update: endpoints.Update{Op: endpoints.Delete, Key: k2}}, - } - err = em.Update(context.TODO(), updates) - if err != nil { - t.Fatal("failed to update", err) - } - - eps, err = em.List(context.TODO()) - if err != nil { - t.Fatal("failed to list foo") - } - if len(eps) != 1 { - t.Fatalf("unexpected the number of endpoints: %d", len(eps)) - } - if !reflect.DeepEqual(eps[k3], e3) { - t.Fatalf("unexpected endpoints: %s", k3) - } -} diff --git a/tests/integration/clientv3/naming/main_test.go b/tests/integration/clientv3/naming/main_test.go deleted file mode 100644 index 92a58a54c7e..00000000000 --- a/tests/integration/clientv3/naming/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package naming_test - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/tests/integration/clientv3/naming/resolver_test.go b/tests/integration/clientv3/naming/resolver_test.go deleted file mode 100644 index 14c3ad72374..00000000000 --- a/tests/integration/clientv3/naming/resolver_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package naming_test - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/client/v3/naming/endpoints" - "go.etcd.io/etcd/client/v3/naming/resolver" - "go.etcd.io/etcd/pkg/v3/grpc_testing" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "google.golang.org/grpc" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -// This test mimics scenario described in grpc_naming.md doc. - -func TestEtcdGrpcResolver(t *testing.T) { - integration2.BeforeTest(t) - - s1PayloadBody := []byte{'1'} - s1 := grpc_testing.NewDummyStubServer(s1PayloadBody) - if err := s1.Start(nil); err != nil { - t.Fatal("failed to start dummy grpc server (s1)", err) - } - defer s1.Stop() - - s2PayloadBody := []byte{'2'} - s2 := grpc_testing.NewDummyStubServer(s2PayloadBody) - if err := s2.Start(nil); err != nil { - t.Fatal("failed to start dummy grpc server (s2)", err) - } - defer s2.Stop() - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - em, err := endpoints.NewManager(clus.Client(0), "foo") - if err != nil { - t.Fatal("failed to create EndpointManager", err) - } - - e1 := endpoints.Endpoint{Addr: s1.Addr()} - e2 := endpoints.Endpoint{Addr: s2.Addr()} - - err = em.AddEndpoint(context.TODO(), "foo/e1", e1) - if err != nil { - t.Fatal("failed to add foo", err) - } - - b, err := resolver.NewBuilder(clus.Client(1)) - if err != nil { - t.Fatal("failed to new resolver builder", err) - } - - conn, err := grpc.Dial("etcd:///foo", grpc.WithInsecure(), grpc.WithResolvers(b)) - if err != nil { - t.Fatal("failed to connect to foo", err) - } - defer conn.Close() - - c := testpb.NewTestServiceClient(conn) - resp, err := c.UnaryCall(context.TODO(), &testpb.SimpleRequest{}, grpc.WaitForReady(true)) - if err != nil { - t.Fatal("failed to invoke rpc to foo (e1)", err) - } - if resp.GetPayload() == nil || !bytes.Equal(resp.GetPayload().GetBody(), s1PayloadBody) { - t.Fatalf("unexpected response from foo (e1): %s", resp.GetPayload().GetBody()) - } - - em.DeleteEndpoint(context.TODO(), "foo/e1") - em.AddEndpoint(context.TODO(), "foo/e2", e2) - - // We use a loop with deadline of 30s to avoid test getting flake - // as it's asynchronous for gRPC Client to update underlying connections. - maxRetries := 300 - retryPeriod := 100 * time.Millisecond - retries := 0 - for { - time.Sleep(retryPeriod) - retries++ - - resp, err = c.UnaryCall(context.TODO(), &testpb.SimpleRequest{}) - if err != nil { - if retries < maxRetries { - continue - } - t.Fatal("failed to invoke rpc to foo (e2)", err) - } - if resp.GetPayload() == nil || !bytes.Equal(resp.GetPayload().GetBody(), s2PayloadBody) { - if retries < maxRetries { - continue - } - t.Fatalf("unexpected response from foo (e2): %s", resp.GetPayload().GetBody()) - } - break - } -} - -func TestEtcdEndpointManager(t *testing.T) { - integration2.BeforeTest(t) - - s1PayloadBody := []byte{'1'} - s1 := grpc_testing.NewDummyStubServer(s1PayloadBody) - err := s1.Start(nil) - assert.NoError(t, err) - defer s1.Stop() - - s2PayloadBody := []byte{'2'} - s2 := grpc_testing.NewDummyStubServer(s2PayloadBody) - err = s2.Start(nil) - assert.NoError(t, err) - defer s2.Stop() - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // Check if any endpoint with the same prefix "foo" will not break the logic with multiple endpoints - em, err := endpoints.NewManager(clus.Client(0), "foo") - assert.NoError(t, err) - emOther, err := endpoints.NewManager(clus.Client(1), "foo_other") - assert.NoError(t, err) - - e1 := endpoints.Endpoint{Addr: s1.Addr()} - e2 := endpoints.Endpoint{Addr: s2.Addr()} - - em.AddEndpoint(context.Background(), "foo/e1", e1) - emOther.AddEndpoint(context.Background(), "foo_other/e2", e2) - - epts, err := em.List(context.Background()) - assert.NoError(t, err) - eptsOther, err := emOther.List(context.Background()) - assert.NoError(t, err) - assert.Equal(t, len(epts), 1) - assert.Equal(t, len(eptsOther), 1) -} diff --git a/tests/integration/clientv3/ordering_kv_test.go b/tests/integration/clientv3/ordering_kv_test.go deleted file mode 100644 index 11cb9080a07..00000000000 --- a/tests/integration/clientv3/ordering_kv_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/ordering" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestDetectKvOrderViolation(t *testing.T) { - var errOrderViolation = errors.New("DetectedOrderViolation") - - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - cfg := clientv3.Config{ - Endpoints: []string{ - clus.Members[0].GRPCURL(), - clus.Members[1].GRPCURL(), - clus.Members[2].GRPCURL(), - }, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer func() { assert.NoError(t, cli.Close()) }() - ctx := context.TODO() - - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } - // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } - - // stop third member in order to force the member to have an outdated revision - clus.Members[2].Stop(t) - time.Sleep(1 * time.Second) // give enough time for operation - _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } - - // perform get request against the first member, in order to - // set up kvOrdering to expect "foo" revisions greater than that of - // the third member. - orderingKv := ordering.NewKV(cli.KV, - func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { - return errOrderViolation - }) - v, err := orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } - t.Logf("Read from the first member: v:%v err:%v", v, err) - assert.Equal(t, []byte("buzz"), v.Kvs[0].Value) - - // ensure that only the third member is queried during requests - clus.Members[0].Stop(t) - clus.Members[1].Stop(t) - assert.NoError(t, clus.Members[2].Restart(t)) - // force OrderingKv to query the third member - cli.SetEndpoints(clus.Members[2].GRPCURL()) - time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed - - t.Logf("Quering m2 after restart") - v, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable()) - t.Logf("Quering m2 returned: v:%v erro:%v ", v, err) - if err != errOrderViolation { - t.Fatalf("expected %v, got err:%v v:%v", errOrderViolation, err, v) - } -} - -func TestDetectTxnOrderViolation(t *testing.T) { - var errOrderViolation = errors.New("DetectedOrderViolation") - - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - cfg := clientv3.Config{ - Endpoints: []string{ - clus.Members[0].GRPCURL(), - clus.Members[1].GRPCURL(), - clus.Members[2].GRPCURL(), - }, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer func() { assert.NoError(t, cli.Close()) }() - ctx := context.TODO() - - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } - // ensure that the second member has the current revision for the key foo - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } - - // stop third member in order to force the member to have an outdated revision - clus.Members[2].Stop(t) - time.Sleep(1 * time.Second) // give enough time for operation - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } - - // perform get request against the first member, in order to - // set up kvOrdering to expect "foo" revisions greater than that of - // the third member. - orderingKv := ordering.NewKV(cli.KV, - func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error { - return errOrderViolation - }) - orderingTxn := orderingKv.Txn(ctx) - _, err = orderingTxn.If( - clientv3.Compare(clientv3.Value("b"), ">", "a"), - ).Then( - clientv3.OpGet("foo"), - ).Commit() - if err != nil { - t.Fatal(err) - } - - // ensure that only the third member is queried during requests - clus.Members[0].Stop(t) - clus.Members[1].Stop(t) - assert.NoError(t, clus.Members[2].Restart(t)) - // force OrderingKv to query the third member - cli.SetEndpoints(clus.Members[2].GRPCURL()) - time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed - _, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable()) - if err != errOrderViolation { - t.Fatalf("expected %v, got %v", errOrderViolation, err) - } - orderingTxn = orderingKv.Txn(ctx) - _, err = orderingTxn.If( - clientv3.Compare(clientv3.Value("b"), ">", "a"), - ).Then( - clientv3.OpGet("foo", clientv3.WithSerializable()), - ).Commit() - if err != errOrderViolation { - t.Fatalf("expected %v, got %v", errOrderViolation, err) - } -} diff --git a/tests/integration/clientv3/ordering_util_test.go b/tests/integration/clientv3/ordering_util_test.go deleted file mode 100644 index 56e34762089..00000000000 --- a/tests/integration/clientv3/ordering_util_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/ordering" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestEndpointSwitchResolvesViolation(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - eps := []string{ - clus.Members[0].GRPCURL(), - clus.Members[1].GRPCURL(), - clus.Members[2].GRPCURL(), - } - cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}} - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - ctx := context.TODO() - - if _, err = clus.Client(0).Put(ctx, "foo", "bar"); err != nil { - t.Fatal(err) - } - // ensure that the second member has current revision for key "foo" - if _, err = clus.Client(1).Get(ctx, "foo"); err != nil { - t.Fatal(err) - } - - // create partition between third members and the first two members - // in order to guarantee that the third member's revision of "foo" - // falls behind as updates to "foo" are issued to the first two members. - clus.Members[2].InjectPartition(t, clus.Members[:2]...) - time.Sleep(1 * time.Second) // give enough time for the operation - - // update to "foo" will not be replicated to the third member due to the partition - if _, err = clus.Client(1).Put(ctx, "foo", "buzz"); err != nil { - t.Fatal(err) - } - - cli.SetEndpoints(eps...) - time.Sleep(1 * time.Second) // give enough time for the operation - orderingKv := ordering.NewKV(cli.KV, ordering.NewOrderViolationSwitchEndpointClosure(cli)) - // set prevRev to the second member's revision of "foo" such that - // the revision is higher than the third member's revision of "foo" - _, err = orderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } - - t.Logf("Reconfigure client to speak only to the 'partitioned' member") - cli.SetEndpoints(clus.Members[2].GRPCURL()) - time.Sleep(1 * time.Second) // give enough time for the operation - _, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable()) - if err != ordering.ErrNoGreaterRev { - t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error") - } -} - -func TestUnresolvableOrderViolation(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 5, UseBridge: true}) - defer clus.Terminate(t) - cfg := clientv3.Config{ - Endpoints: []string{ - clus.Members[0].GRPCURL(), - clus.Members[1].GRPCURL(), - clus.Members[2].GRPCURL(), - clus.Members[3].GRPCURL(), - clus.Members[4].GRPCURL(), - }, - } - cli, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - eps := cli.Endpoints() - ctx := context.TODO() - - cli.SetEndpoints(clus.Members[0].GRPCURL()) - time.Sleep(1 * time.Second) - _, err = cli.Put(ctx, "foo", "bar") - if err != nil { - t.Fatal(err) - } - - // stop fourth member in order to force the member to have an outdated revision - clus.Members[3].Stop(t) - time.Sleep(1 * time.Second) // give enough time for operation - // stop fifth member in order to force the member to have an outdated revision - clus.Members[4].Stop(t) - time.Sleep(1 * time.Second) // give enough time for operation - _, err = cli.Put(ctx, "foo", "buzz") - if err != nil { - t.Fatal(err) - } - - cli.SetEndpoints(eps...) - time.Sleep(1 * time.Second) // give enough time for operation - OrderingKv := ordering.NewKV(cli.KV, ordering.NewOrderViolationSwitchEndpointClosure(cli)) - // set prevRev to the first member's revision of "foo" such that - // the revision is higher than the fourth and fifth members' revision of "foo" - _, err = OrderingKv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } - - clus.Members[0].Stop(t) - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - err = clus.Members[3].Restart(t) - if err != nil { - t.Fatal(err) - } - err = clus.Members[4].Restart(t) - if err != nil { - t.Fatal(err) - } - clus.Members[3].WaitStarted(t) - cli.SetEndpoints(clus.Members[3].GRPCURL()) - time.Sleep(1 * time.Second) // give enough time for operation - - _, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable()) - if err != ordering.ErrNoGreaterRev { - t.Fatalf("expected %v, got %v", ordering.ErrNoGreaterRev, err) - } -} diff --git a/tests/integration/clientv3/snapshot/v3_snapshot_test.go b/tests/integration/clientv3/snapshot/v3_snapshot_test.go deleted file mode 100644 index 412efc7e441..00000000000 --- a/tests/integration/clientv3/snapshot/v3_snapshot_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snapshot_test - -import ( - "context" - "fmt" - "math/rand" - "net/url" - "os" - "path/filepath" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/snapshot" - "go.etcd.io/etcd/server/v3/embed" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "go.uber.org/zap/zaptest" -) - -// TestSaveSnapshotFilePermissions ensures that the snapshot is saved with -// the correct file permissions. -func TestSaveSnapshotFilePermissions(t *testing.T) { - expectedFileMode := os.FileMode(fileutil.PrivateFileMode) - kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} - _, dbPath := createSnapshotFile(t, newEmbedConfig(t), kvs) - defer os.RemoveAll(dbPath) - - dbInfo, err := os.Stat(dbPath) - if err != nil { - t.Fatalf("failed to get test snapshot file status: %v", err) - } - actualFileMode := dbInfo.Mode() - - if expectedFileMode != actualFileMode { - t.Fatalf("expected test snapshot file mode %s, got %s:", expectedFileMode, actualFileMode) - } -} - -// TestSaveSnapshotVersion ensures that the snapshot returns proper storage version. -func TestSaveSnapshotVersion(t *testing.T) { - // Put some keys to ensure that wal snapshot is triggered - var kvs []kv - for i := 0; i < 10; i++ { - kvs = append(kvs, kv{fmt.Sprintf("%d", i), "test"}) - } - cfg := newEmbedConfig(t) - // Force raft snapshot to ensure that storage version is set - cfg.SnapshotCount = 1 - ver, dbPath := createSnapshotFile(t, cfg, kvs) - defer os.RemoveAll(dbPath) - - if ver != "3.6.0" { - t.Fatalf("expected snapshot version %s, got %s:", "3.6.0", ver) - } -} - -type kv struct { - k, v string -} - -func newEmbedConfig(t *testing.T) *embed.Config { - clusterN := 1 - urls := newEmbedURLs(clusterN * 2) - cURLs, pURLs := urls[:clusterN], urls[clusterN:] - cfg := integration2.NewEmbedConfig(t, "default") - cfg.ClusterState = "new" - cfg.LCUrls, cfg.ACUrls = cURLs, cURLs - cfg.LPUrls, cfg.APUrls = pURLs, pURLs - cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) - return cfg -} - -// creates a snapshot file and returns the file path. -func createSnapshotFile(t *testing.T, cfg *embed.Config, kvs []kv) (version string, dbPath string) { - testutil.SkipTestIfShortMode(t, - "Snapshot creation tests are depending on embedded etcd server so are integration-level tests.") - - srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer func() { - srv.Close() - }() - select { - case <-srv.Server.ReadyNotify(): - case <-time.After(3 * time.Second): - t.Fatalf("failed to start embed.Etcd for creating snapshots") - } - - ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - for i := range kvs { - ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) - _, err = cli.Put(ctx, kvs[i].k, kvs[i].v) - cancel() - if err != nil { - t.Fatal(err) - } - } - - dbPath = filepath.Join(t.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond())) - version, err = snapshot.SaveWithVersion(context.Background(), zaptest.NewLogger(t), ccfg, dbPath) - if err != nil { - t.Fatal(err) - } - return version, dbPath -} - -func newEmbedURLs(n int) (urls []url.URL) { - urls = make([]url.URL, n) - for i := 0; i < n; i++ { - rand.Seed(int64(time.Now().Nanosecond())) - u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d", rand.Intn(45000))) - urls[i] = *u - } - return urls -} diff --git a/tests/integration/clientv3/txn_test.go b/tests/integration/clientv3/txn_test.go deleted file mode 100644 index 84a56146d99..00000000000 --- a/tests/integration/clientv3/txn_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestTxnError(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.RandClient() - ctx := context.TODO() - - _, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar1"), clientv3.OpPut("foo", "bar2")).Commit() - if err != rpctypes.ErrDuplicateKey { - t.Fatalf("expected %v, got %v", rpctypes.ErrDuplicateKey, err) - } - - ops := make([]clientv3.Op, int(embed.DefaultMaxTxnOps+10)) - for i := range ops { - ops[i] = clientv3.OpPut(fmt.Sprintf("foo%d", i), "") - } - _, err = kv.Txn(ctx).Then(ops...).Commit() - if err != rpctypes.ErrTooManyOps { - t.Fatalf("expected %v, got %v", rpctypes.ErrTooManyOps, err) - } -} - -func TestTxnWriteFail(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - kv := clus.Client(0) - - clus.Members[0].Stop(t) - - txnc, getc := make(chan struct{}), make(chan struct{}) - go func() { - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - defer cancel() - resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit() - if err == nil { - t.Errorf("expected error, got response %v", resp) - } - close(txnc) - }() - - go func() { - defer close(getc) - select { - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for txn fail") - case <-txnc: - } - // and ensure the put didn't take - gresp, gerr := clus.Client(1).Get(context.TODO(), "foo") - if gerr != nil { - t.Error(gerr) - } - if len(gresp.Kvs) != 0 { - t.Errorf("expected no keys, got %v", gresp.Kvs) - } - }() - - select { - case <-time.After(5 * clus.Members[1].ServerConfig.ReqTimeout()): - t.Fatalf("timed out waiting for get") - case <-getc: - } - - // reconnect so terminate doesn't complain about double-close - clus.Members[0].Restart(t) -} - -func TestTxnReadRetry(t *testing.T) { - t.Skipf("skipping txn read retry test: re-enable after we do retry on txn read request") - - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - kv := clus.Client(0) - - thenOps := [][]clientv3.Op{ - {clientv3.OpGet("foo")}, - {clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpGet("foo")}, nil)}, - {clientv3.OpTxn(nil, nil, nil)}, - {}, - } - for i := range thenOps { - clus.Members[0].Stop(t) - <-clus.Members[0].StopNotify() - - donec := make(chan struct{}, 1) - go func() { - _, err := kv.Txn(context.TODO()).Then(thenOps[i]...).Commit() - if err != nil { - t.Errorf("expected response, got error %v", err) - } - donec <- struct{}{} - }() - // wait for txn to fail on disconnect - time.Sleep(100 * time.Millisecond) - - // restart node; client should resume - clus.Members[0].Restart(t) - select { - case <-donec: - case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()): - t.Fatalf("waited too long") - } - } -} - -func TestTxnSuccess(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kv := clus.Client(0) - ctx := context.TODO() - - _, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit() - if err != nil { - t.Fatal(err) - } - - resp, err := kv.Get(ctx, "foo") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" { - t.Fatalf("unexpected Get response %v", resp) - } -} - -func TestTxnCompareRange(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kv := clus.Client(0) - fooResp, err := kv.Put(context.TODO(), "foo/", "bar") - if err != nil { - t.Fatal(err) - } - if _, err = kv.Put(context.TODO(), "foo/a", "baz"); err != nil { - t.Fatal(err) - } - tresp, terr := kv.Txn(context.TODO()).If( - clientv3.Compare( - clientv3.CreateRevision("foo/"), "=", fooResp.Header.Revision). - WithPrefix(), - ).Commit() - if terr != nil { - t.Fatal(terr) - } - if tresp.Succeeded { - t.Fatal("expected prefix compare to false, got compares as true") - } -} - -func TestTxnNested(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kv := clus.Client(0) - - tresp, err := kv.Txn(context.TODO()). - If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). - Then( - clientv3.OpPut("foo", "bar"), - clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpPut("abc", "123")}, nil)). - Else(clientv3.OpPut("foo", "baz")).Commit() - if err != nil { - t.Fatal(err) - } - if len(tresp.Responses) != 2 { - t.Errorf("expected 2 top-level txn responses, got %+v", tresp.Responses) - } - - // check txn writes were applied - resp, err := kv.Get(context.TODO(), "foo") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" { - t.Errorf("unexpected Get response %+v", resp) - } - resp, err = kv.Get(context.TODO(), "abc") - if err != nil { - t.Fatal(err) - } - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "123" { - t.Errorf("unexpected Get response %+v", resp) - } -} diff --git a/tests/integration/clientv3/user_test.go b/tests/integration/clientv3/user_test.go deleted file mode 100644 index a6698d32eea..00000000000 --- a/tests/integration/clientv3/user_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "testing" - "time" - - "google.golang.org/grpc" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestUserError(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - authapi := clus.RandClient() - - _, err := authapi.UserAdd(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } - - _, err = authapi.UserAdd(context.TODO(), "foo", "bar") - if err != rpctypes.ErrUserAlreadyExist { - t.Fatalf("expected %v, got %v", rpctypes.ErrUserAlreadyExist, err) - } - - _, err = authapi.UserDelete(context.TODO(), "not-exist-user") - if err != rpctypes.ErrUserNotFound { - t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err) - } - - _, err = authapi.UserGrantRole(context.TODO(), "foo", "test-role-does-not-exist") - if err != rpctypes.ErrRoleNotFound { - t.Fatalf("expected %v, got %v", rpctypes.ErrRoleNotFound, err) - } -} - -func TestUserErrorAuth(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - authapi := clus.RandClient() - authSetupRoot(t, authapi.Auth) - - // unauthenticated client - if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserEmpty { - t.Fatalf("expected %v, got %v", rpctypes.ErrUserEmpty, err) - } - - // wrong id or password - cfg := clientv3.Config{ - Endpoints: authapi.Endpoints(), - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - cfg.Username, cfg.Password = "wrong-id", "123" - if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { - t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) - } - cfg.Username, cfg.Password = "root", "wrong-pass" - if _, err := integration2.NewClient(t, cfg); err != rpctypes.ErrAuthFailed { - t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err) - } - - cfg.Username, cfg.Password = "root", "123" - authed, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - defer authed.Close() - - if _, err := authed.UserList(context.TODO()); err != nil { - t.Fatal(err) - } -} - -func authSetupRoot(t *testing.T, auth clientv3.Auth) { - if _, err := auth.UserAdd(context.TODO(), "root", "123"); err != nil { - t.Fatal(err) - } - if _, err := auth.RoleAdd(context.TODO(), "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.UserGrantRole(context.TODO(), "root", "root"); err != nil { - t.Fatal(err) - } - if _, err := auth.AuthEnable(context.TODO()); err != nil { - t.Fatal(err) - } -} - -// TestGetTokenWithoutAuth is when Client can connect to etcd even if they -// supply credentials and the server is in AuthDisable mode. -func TestGetTokenWithoutAuth(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - - authapi := clus.RandClient() - - var err error - var client *clientv3.Client - - // make sure "auth" was disabled - if _, err = authapi.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } - - // "Username" and "Password" must be used - cfg := clientv3.Config{ - Endpoints: authapi.Endpoints(), - DialTimeout: 5 * time.Second, - Username: "root", - Password: "123", - } - - client, err = integration2.NewClient(t, cfg) - if err == nil { - defer client.Close() - } - - switch err { - case nil: - t.Log("passes as expected") - case context.DeadlineExceeded: - t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints()) - default: - t.Errorf("other errors:%v", err) - } -} diff --git a/tests/integration/clientv3/util.go b/tests/integration/clientv3/util.go deleted file mode 100644 index 67d1fd59cc9..00000000000 --- a/tests/integration/clientv3/util.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// MustWaitPinReady waits up to 3-second until connection is up (pin endpoint). -// Fatal on time-out. -func MustWaitPinReady(t *testing.T, cli *clientv3.Client) { - // TODO: decrease timeout after balancer rewrite!!! - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _, err := cli.Get(ctx, "foo") - cancel() - if err != nil { - t.Fatal(err) - } -} - -// IsServerCtxTimeout checks reason of the error. -// e.g. due to clock drifts in server-side, -// client context times out first in server-side -// while original client-side context is not timed out yet -func IsServerCtxTimeout(err error) bool { - if err == nil { - return false - } - ev, ok := status.FromError(err) - if !ok { - return false - } - code := ev.Code() - return (code == codes.DeadlineExceeded /*3.5+"*/ || code == codes.Unknown /*<=3.4*/) && - strings.Contains(err.Error(), "context deadline exceeded") -} - -// IsClientTimeout checks reason of the error. -// In grpc v1.11.3+ dial timeouts can error out with transport.ErrConnClosing. Previously dial timeouts -// would always error out with context.DeadlineExceeded. -func IsClientTimeout(err error) bool { - if err == nil { - return false - } - if err == context.DeadlineExceeded { - return true - } - ev, ok := status.FromError(err) - if !ok { - return false - } - code := ev.Code() - return code == codes.DeadlineExceeded -} - -func IsCanceled(err error) bool { - if err == nil { - return false - } - if err == context.Canceled { - return true - } - ev, ok := status.FromError(err) - if !ok { - return false - } - code := ev.Code() - return code == codes.Canceled -} - -func IsUnavailable(err error) bool { - if err == nil { - return false - } - if err == context.Canceled { - return true - } - ev, ok := status.FromError(err) - if !ok { - return false - } - code := ev.Code() - return code == codes.Unavailable -} - -// populateDataIntoCluster populates the key-value pairs into cluster and the -// key will be named by testing.T.Name()-index. -func populateDataIntoCluster(t *testing.T, cluster *integration2.Cluster, numKeys int, valueSize int) { - ctx := context.Background() - - for i := 0; i < numKeys; i++ { - _, err := cluster.RandClient().Put(ctx, - fmt.Sprintf("%s-%v", t.Name(), i), strings.Repeat("a", valueSize)) - - if err != nil { - t.Errorf("populating data expected no error, but got %v", err) - } - } -} diff --git a/tests/integration/clientv3/watch_fragment_test.go b/tests/integration/clientv3/watch_fragment_test.go deleted file mode 100644 index 81450f5f9aa..00000000000 --- a/tests/integration/clientv3/watch_fragment_test.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -package clientv3test - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestWatchFragmentDisable ensures that large watch -// response exceeding server-side request limit can -// arrive even without watch response fragmentation. -func TestWatchFragmentDisable(t *testing.T) { - testWatchFragment(t, false, false) -} - -// TestWatchFragmentDisableWithGRPCLimit verifies -// large watch response exceeding server-side request -// limit and client-side gRPC response receive limit -// cannot arrive without watch events fragmentation, -// because multiple events exceed client-side gRPC -// response receive limit. -func TestWatchFragmentDisableWithGRPCLimit(t *testing.T) { - testWatchFragment(t, false, true) -} - -// TestWatchFragmentEnable ensures that large watch -// response exceeding server-side request limit arrive -// with watch response fragmentation. -func TestWatchFragmentEnable(t *testing.T) { - testWatchFragment(t, true, false) -} - -// TestWatchFragmentEnableWithGRPCLimit verifies -// large watch response exceeding server-side request -// limit and client-side gRPC response receive limit -// can arrive only when watch events are fragmented. -func TestWatchFragmentEnableWithGRPCLimit(t *testing.T) { - testWatchFragment(t, true, true) -} - -// testWatchFragment triggers watch response that spans over multiple -// revisions exceeding server request limits when combined. -func testWatchFragment(t *testing.T, fragment, exceedRecvLimit bool) { - integration2.BeforeTest(t) - - cfg := &integration2.ClusterConfig{ - Size: 1, - MaxRequestBytes: 1.5 * 1024 * 1024, - } - if exceedRecvLimit { - cfg.ClientMaxCallRecvMsgSize = 1.5 * 1024 * 1024 - } - clus := integration2.NewCluster(t, cfg) - defer clus.Terminate(t) - - cli := clus.Client(0) - errc := make(chan error) - for i := 0; i < 10; i++ { - go func(i int) { - _, err := cli.Put(context.TODO(), - fmt.Sprint("foo", i), - strings.Repeat("a", 1024*1024), - ) - errc <- err - }(i) - } - for i := 0; i < 10; i++ { - if err := <-errc; err != nil { - t.Fatalf("failed to put: %v", err) - } - } - - opts := []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithRev(1)} - if fragment { - opts = append(opts, clientv3.WithFragment()) - } - wch := cli.Watch(context.TODO(), "foo", opts...) - - // expect 10 MiB watch response - select { - case ws := <-wch: - // without fragment, should exceed gRPC client receive limit - if !fragment && exceedRecvLimit { - if len(ws.Events) != 0 { - t.Fatalf("expected 0 events with watch fragmentation, got %d", len(ws.Events)) - } - exp := "code = ResourceExhausted desc = grpc: received message larger than max (" - if !strings.Contains(ws.Err().Error(), exp) { - t.Fatalf("expected 'ResourceExhausted' error, got %v", ws.Err()) - } - return - } - - // still expect merged watch events - if len(ws.Events) != 10 { - t.Fatalf("expected 10 events with watch fragmentation, got %d", len(ws.Events)) - } - if ws.Err() != nil { - t.Fatalf("unexpected error %v", ws.Err()) - } - - case <-time.After(testutil.RequestTimeout): - t.Fatalf("took too long to receive events") - } -} diff --git a/tests/integration/clientv3/watch_test.go b/tests/integration/clientv3/watch_test.go deleted file mode 100644 index 28e0faa41f3..00000000000 --- a/tests/integration/clientv3/watch_test.go +++ /dev/null @@ -1,1217 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3test - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "sort" - "strconv" - "testing" - "time" - - "google.golang.org/grpc/metadata" - - mvccpb "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/api/v3/version" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -type watcherTest func(*testing.T, *watchctx) - -type watchctx struct { - clus *integration2.Cluster - w clientv3.Watcher - kv clientv3.KV - wclientMember int - kvMember int - ch clientv3.WatchChan -} - -func runWatchTest(t *testing.T, f watcherTest) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - wclientMember := rand.Intn(3) - w := clus.Client(wclientMember).Watcher - // select a different client for KV operations so puts succeed if - // a test knocks out the watcher client. - kvMember := rand.Intn(3) - for kvMember == wclientMember { - kvMember = rand.Intn(3) - } - kv := clus.Client(kvMember).KV - - wctx := &watchctx{clus, w, kv, wclientMember, kvMember, nil} - f(t, wctx) -} - -// TestWatchMultiWatcher modifies multiple keys and observes the changes. -func TestWatchMultiWatcher(t *testing.T) { - runWatchTest(t, testWatchMultiWatcher) -} - -func testWatchMultiWatcher(t *testing.T, wctx *watchctx) { - numKeyUpdates := 4 - keys := []string{"foo", "bar", "baz"} - - donec := make(chan struct{}) - // wait for watcher shutdown - defer func() { - for i := 0; i < len(keys)+1; i++ { - <-donec - } - }() - readyc := make(chan struct{}) - for _, k := range keys { - // key watcher - go func(key string) { - ch := wctx.w.Watch(context.TODO(), key) - if ch == nil { - t.Errorf("expected watcher channel, got nil") - } - readyc <- struct{}{} - for i := 0; i < numKeyUpdates; i++ { - resp, ok := <-ch - if !ok { - t.Errorf("watcher unexpectedly closed") - } - v := fmt.Sprintf("%s-%d", key, i) - gotv := string(resp.Events[0].Kv.Value) - if gotv != v { - t.Errorf("#%d: got %s, wanted %s", i, gotv, v) - } - } - donec <- struct{}{} - }(k) - } - // prefix watcher on "b" (bar and baz) - go func() { - prefixc := wctx.w.Watch(context.TODO(), "b", clientv3.WithPrefix()) - if prefixc == nil { - t.Errorf("expected watcher channel, got nil") - } - readyc <- struct{}{} - var evs []*clientv3.Event - for i := 0; i < numKeyUpdates*2; i++ { - resp, ok := <-prefixc - if !ok { - t.Errorf("watcher unexpectedly closed") - } - evs = append(evs, resp.Events...) - } - - // check response - var expected []string - bkeys := []string{"bar", "baz"} - for _, k := range bkeys { - for i := 0; i < numKeyUpdates; i++ { - expected = append(expected, fmt.Sprintf("%s-%d", k, i)) - } - } - var got []string - for _, ev := range evs { - got = append(got, string(ev.Kv.Value)) - } - sort.Strings(got) - if !reflect.DeepEqual(expected, got) { - t.Errorf("got %v, expected %v", got, expected) - } - - // ensure no extra data - select { - case resp, ok := <-prefixc: - if !ok { - t.Errorf("watcher unexpectedly closed") - } - t.Errorf("unexpected event %+v", resp) - case <-time.After(time.Second): - } - donec <- struct{}{} - }() - - // wait for watcher bring up - for i := 0; i < len(keys)+1; i++ { - <-readyc - } - // generate events - ctx := context.TODO() - for i := 0; i < numKeyUpdates; i++ { - for _, k := range keys { - v := fmt.Sprintf("%s-%d", k, i) - if _, err := wctx.kv.Put(ctx, k, v); err != nil { - t.Fatal(err) - } - } - } -} - -// TestWatchRange tests watcher creates ranges -func TestWatchRange(t *testing.T) { - runWatchTest(t, testWatchRange) -} - -func testWatchRange(t *testing.T, wctx *watchctx) { - if wctx.ch = wctx.w.Watch(context.TODO(), "a", clientv3.WithRange("c")); wctx.ch == nil { - t.Fatalf("expected non-nil channel") - } - putAndWatch(t, wctx, "a", "a") - putAndWatch(t, wctx, "b", "b") - putAndWatch(t, wctx, "bar", "bar") -} - -// TestWatchReconnRequest tests the send failure path when requesting a watcher. -func TestWatchReconnRequest(t *testing.T) { - runWatchTest(t, testWatchReconnRequest) -} - -func testWatchReconnRequest(t *testing.T, wctx *watchctx) { - donec, stopc := make(chan struct{}), make(chan struct{}, 1) - go func() { - timer := time.After(2 * time.Second) - defer close(donec) - // take down watcher connection - for { - wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections() - select { - case <-timer: - // spinning on close may live lock reconnection - return - case <-stopc: - return - default: - } - } - }() - // should reconnect when requesting watch - if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil { - t.Fatalf("expected non-nil channel") - } - - // wait for disconnections to stop - stopc <- struct{}{} - <-donec - - // spinning on dropping connections may trigger a leader election - // due to resource starvation; l-read to ensure the cluster is stable - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - if _, err := wctx.kv.Get(ctx, "_"); err != nil { - t.Fatal(err) - } - cancel() - - // ensure watcher works - putAndWatch(t, wctx, "a", "a") -} - -// TestWatchReconnInit tests watcher resumes correctly if connection lost -// before any data was sent. -func TestWatchReconnInit(t *testing.T) { - runWatchTest(t, testWatchReconnInit) -} - -func testWatchReconnInit(t *testing.T, wctx *watchctx) { - if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil { - t.Fatalf("expected non-nil channel") - } - wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections() - // watcher should recover - putAndWatch(t, wctx, "a", "a") -} - -// TestWatchReconnRunning tests watcher resumes correctly if connection lost -// after data was sent. -func TestWatchReconnRunning(t *testing.T) { - runWatchTest(t, testWatchReconnRunning) -} - -func testWatchReconnRunning(t *testing.T, wctx *watchctx) { - if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil { - t.Fatalf("expected non-nil channel") - } - putAndWatch(t, wctx, "a", "a") - // take down watcher connection - wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections() - // watcher should recover - putAndWatch(t, wctx, "a", "b") -} - -// TestWatchCancelImmediate ensures a closed channel is returned -// if the context is cancelled. -func TestWatchCancelImmediate(t *testing.T) { - runWatchTest(t, testWatchCancelImmediate) -} - -func testWatchCancelImmediate(t *testing.T, wctx *watchctx) { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - wch := wctx.w.Watch(ctx, "a") - select { - case wresp, ok := <-wch: - if ok { - t.Fatalf("read wch got %v; expected closed channel", wresp) - } - default: - t.Fatalf("closed watcher channel should not block") - } -} - -// TestWatchCancelInit tests watcher closes correctly after no events. -func TestWatchCancelInit(t *testing.T) { - runWatchTest(t, testWatchCancelInit) -} - -func testWatchCancelInit(t *testing.T, wctx *watchctx) { - ctx, cancel := context.WithCancel(context.Background()) - if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { - t.Fatalf("expected non-nil watcher channel") - } - cancel() - select { - case <-time.After(time.Second): - t.Fatalf("took too long to cancel") - case _, ok := <-wctx.ch: - if ok { - t.Fatalf("expected watcher channel to close") - } - } -} - -// TestWatchCancelRunning tests watcher closes correctly after events. -func TestWatchCancelRunning(t *testing.T) { - runWatchTest(t, testWatchCancelRunning) -} - -func testWatchCancelRunning(t *testing.T, wctx *watchctx) { - ctx, cancel := context.WithCancel(context.Background()) - if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil { - t.Fatalf("expected non-nil watcher channel") - } - if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil { - t.Fatal(err) - } - cancel() - select { - case <-time.After(time.Second): - t.Fatalf("took too long to cancel") - case _, ok := <-wctx.ch: - if !ok { - // closed before getting put; OK - break - } - // got the PUT; should close next - select { - case <-time.After(time.Second): - t.Fatalf("took too long to close") - case v, ok2 := <-wctx.ch: - if ok2 { - t.Fatalf("expected watcher channel to close, got %v", v) - } - } - } -} - -func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { - if _, err := wctx.kv.Put(context.TODO(), key, val); err != nil { - t.Fatal(err) - } - select { - case <-time.After(5 * time.Second): - t.Fatalf("watch timed out") - case v, ok := <-wctx.ch: - if !ok { - t.Fatalf("unexpected watch close") - } - if err := v.Err(); err != nil { - t.Fatalf("unexpected watch response error: %v", err) - } - if string(v.Events[0].Kv.Value) != val { - t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val) - } - } -} - -func TestWatchResumeInitRev(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.Client(0) - if _, err := cli.Put(context.TODO(), "b", "2"); err != nil { - t.Fatal(err) - } - if _, err := cli.Put(context.TODO(), "a", "3"); err != nil { - t.Fatal(err) - } - // if resume is broken, it'll pick up this key first instead of a=3 - if _, err := cli.Put(context.TODO(), "a", "4"); err != nil { - t.Fatal(err) - } - - wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify()) - if resp, ok := <-wch; !ok || resp.Header.Revision != 4 { - t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok) - } - // pause wch - clus.Members[0].Bridge().DropConnections() - clus.Members[0].Bridge().PauseConnections() - - select { - case resp, ok := <-wch: - t.Skipf("wch should block, got (%+v, %v); drop not fast enough", resp, ok) - case <-time.After(100 * time.Millisecond): - } - - // resume wch - clus.Members[0].Bridge().UnpauseConnections() - - select { - case resp, ok := <-wch: - if !ok { - t.Fatal("unexpected watch close") - } - if len(resp.Events) == 0 { - t.Fatal("expected event on watch") - } - if string(resp.Events[0].Kv.Value) != "3" { - t.Fatalf("expected value=3, got event %+v", resp.Events[0]) - } - case <-time.After(5 * time.Second): - t.Fatal("watch timed out") - } -} - -// TestWatchResumeCompacted checks that the watcher gracefully closes in case -// that it tries to resume to a revision that's been compacted out of the store. -// Since the watcher's server restarts with stale data, the watcher will receive -// either a compaction error or all keys by staying in sync before the compaction -// is finally applied. -func TestWatchResumeCompacted(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - // create a waiting watcher at rev 1 - w := clus.Client(0) - wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1)) - select { - case w := <-wch: - t.Errorf("unexpected message from wch %v", w) - default: - } - clus.Members[0].Stop(t) - - clus.WaitLeader(t) - - // put some data and compact away - numPuts := 5 - kv := clus.Client(1) - for i := 0; i < numPuts; i++ { - if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - } - if _, err := kv.Compact(context.TODO(), 3); err != nil { - t.Fatal(err) - } - - clus.Members[0].Restart(t) - - // since watch's server isn't guaranteed to be synced with the cluster when - // the watch resumes, there is a window where the watch can stay synced and - // read off all events; if the watcher misses the window, it will go out of - // sync and get a compaction error. - wRev := int64(2) - for int(wRev) <= numPuts+1 { - var wresp clientv3.WatchResponse - var ok bool - select { - case wresp, ok = <-wch: - if !ok { - t.Fatalf("expected wresp, but got closed channel") - } - case <-time.After(5 * time.Second): - t.Fatalf("compacted watch timed out") - } - for _, ev := range wresp.Events { - if ev.Kv.ModRevision != wRev { - t.Fatalf("expected modRev %v, got %+v", wRev, ev) - } - wRev++ - } - if wresp.Err() == nil { - continue - } - if wresp.Err() != rpctypes.ErrCompacted { - t.Fatalf("wresp.Err() expected %v, got %+v", rpctypes.ErrCompacted, wresp.Err()) - } - break - } - if int(wRev) > numPuts+1 { - // got data faster than the compaction - return - } - // received compaction error; ensure the channel closes - select { - case wresp, ok := <-wch: - if ok { - t.Fatalf("expected closed channel, but got %v", wresp) - } - case <-time.After(5 * time.Second): - t.Fatalf("timed out waiting for channel close") - } -} - -// TestWatchCompactRevision ensures the CompactRevision error is given on a -// compaction event ahead of a watcher. -func TestWatchCompactRevision(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // set some keys - kv := clus.RandClient() - for i := 0; i < 5; i++ { - if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - } - - w := clus.RandClient() - - if _, err := kv.Compact(context.TODO(), 4); err != nil { - t.Fatal(err) - } - wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2)) - - // get compacted error message - wresp, ok := <-wch - if !ok { - t.Fatalf("expected wresp, but got closed channel") - } - if wresp.Err() != rpctypes.ErrCompacted { - t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err()) - } - if !wresp.Canceled { - t.Fatalf("wresp.Canceled expected true, got %+v", wresp) - } - - // ensure the channel is closed - if wresp, ok = <-wch; ok { - t.Fatalf("expected closed channel, but got %v", wresp) - } -} - -func TestWatchWithProgressNotify(t *testing.T) { testWatchWithProgressNotify(t, true) } -func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) } - -func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) { - integration2.BeforeTest(t) - - // accelerate report interval so test terminates quickly - oldpi := v3rpc.GetProgressReportInterval() - // using atomics to avoid race warnings - v3rpc.SetProgressReportInterval(3 * time.Second) - pi := 3 * time.Second - defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - wc := clus.RandClient() - - opts := []clientv3.OpOption{clientv3.WithProgressNotify()} - if watchOnPut { - opts = append(opts, clientv3.WithPrefix()) - } - rch := wc.Watch(context.Background(), "foo", opts...) - - select { - case resp := <-rch: // wait for notification - if len(resp.Events) != 0 { - t.Fatalf("resp.Events expected none, got %+v", resp.Events) - } - case <-time.After(2 * pi): - t.Fatalf("watch response expected in %v, but timed out", pi) - } - - kvc := clus.RandClient() - if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil { - t.Fatal(err) - } - - select { - case resp := <-rch: - if resp.Header.Revision != 2 { - t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision) - } - if watchOnPut { // wait for put if watch on the put key - ev := []*clientv3.Event{{Type: clientv3.EventTypePut, - Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}} - if !reflect.DeepEqual(ev, resp.Events) { - t.Fatalf("expected %+v, got %+v", ev, resp.Events) - } - } else if len(resp.Events) != 0 { // wait for notification otherwise - t.Fatalf("expected no events, but got %+v", resp.Events) - } - case <-time.After(time.Duration(1.5 * float64(pi))): - t.Fatalf("watch response expected in %v, but timed out", pi) - } -} - -func TestConfigurableWatchProgressNotifyInterval(t *testing.T) { - integration2.BeforeTest(t) - - progressInterval := 200 * time.Millisecond - clus := integration2.NewCluster(t, - &integration2.ClusterConfig{ - Size: 3, - WatchProgressNotifyInterval: progressInterval, - }) - defer clus.Terminate(t) - - opts := []clientv3.OpOption{clientv3.WithProgressNotify()} - rch := clus.RandClient().Watch(context.Background(), "foo", opts...) - - timeout := 1 * time.Second // we expect to receive watch progress notify in 2 * progressInterval, - // but for CPU-starved situation it may take longer. So we use 1 second here for timeout. - select { - case resp := <-rch: // waiting for a watch progress notify response - if !resp.IsProgressNotify() { - t.Fatalf("expected resp.IsProgressNotify() == true") - } - case <-time.After(timeout): - t.Fatalf("timed out waiting for watch progress notify response in %v", timeout) - } -} - -func TestWatchRequestProgress(t *testing.T) { - if integration2.ThroughProxy { - t.Skipf("grpc-proxy does not support WatchProgress yet") - } - testCases := []struct { - name string - watchers []string - }{ - {"0-watcher", []string{}}, - {"1-watcher", []string{"/"}}, - {"2-watcher", []string{"/", "/"}}, - } - - for _, c := range testCases { - t.Run(c.name, func(t *testing.T) { - integration2.BeforeTest(t) - - watchTimeout := 3 * time.Second - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - wc := clus.RandClient() - - var watchChans []clientv3.WatchChan - - for _, prefix := range c.watchers { - watchChans = append(watchChans, wc.Watch(context.Background(), prefix, clientv3.WithPrefix())) - } - - _, err := wc.Put(context.Background(), "/a", "1") - if err != nil { - t.Fatal(err) - } - - for _, rch := range watchChans { - select { - case resp := <-rch: // wait for notification - if len(resp.Events) != 1 { - t.Fatalf("resp.Events expected 1, got %d", len(resp.Events)) - } - case <-time.After(watchTimeout): - t.Fatalf("watch response expected in %v, but timed out", watchTimeout) - } - } - - // put a value not being watched to increment revision - _, err = wc.Put(context.Background(), "x", "1") - if err != nil { - t.Fatal(err) - } - - err = wc.RequestProgress(context.Background()) - if err != nil { - t.Fatal(err) - } - - // verify all watch channels receive a progress notify - for _, rch := range watchChans { - select { - case resp := <-rch: - if !resp.IsProgressNotify() { - t.Fatalf("expected resp.IsProgressNotify() == true") - } - if resp.Header.Revision != 3 { - t.Fatalf("resp.Header.Revision expected 3, got %d", resp.Header.Revision) - } - case <-time.After(watchTimeout): - t.Fatalf("progress response expected in %v, but timed out", watchTimeout) - } - } - }) - } -} - -func TestWatchEventType(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer cluster.Terminate(t) - - client := cluster.RandClient() - ctx := context.Background() - watchChan := client.Watch(ctx, "/", clientv3.WithPrefix()) - - if _, err := client.Put(ctx, "/toDelete", "foo"); err != nil { - t.Fatalf("Put failed: %v", err) - } - if _, err := client.Put(ctx, "/toDelete", "bar"); err != nil { - t.Fatalf("Put failed: %v", err) - } - if _, err := client.Delete(ctx, "/toDelete"); err != nil { - t.Fatalf("Delete failed: %v", err) - } - lcr, err := client.Lease.Grant(ctx, 1) - if err != nil { - t.Fatalf("lease create failed: %v", err) - } - if _, err := client.Put(ctx, "/toExpire", "foo", clientv3.WithLease(lcr.ID)); err != nil { - t.Fatalf("Put failed: %v", err) - } - - tests := []struct { - et mvccpb.Event_EventType - isCreate bool - isModify bool - }{{ - et: clientv3.EventTypePut, - isCreate: true, - }, { - et: clientv3.EventTypePut, - isModify: true, - }, { - et: clientv3.EventTypeDelete, - }, { - et: clientv3.EventTypePut, - isCreate: true, - }, { - et: clientv3.EventTypeDelete, - }} - - var res []*clientv3.Event - - for { - select { - case wres := <-watchChan: - res = append(res, wres.Events...) - case <-time.After(10 * time.Second): - t.Fatalf("Should receive %d events and then break out loop", len(tests)) - } - if len(res) == len(tests) { - break - } - } - - for i, tt := range tests { - ev := res[i] - if tt.et != ev.Type { - t.Errorf("#%d: event type want=%s, get=%s", i, tt.et, ev.Type) - } - if tt.isCreate && !ev.IsCreate() { - t.Errorf("#%d: event should be CreateEvent", i) - } - if tt.isModify && !ev.IsModify() { - t.Errorf("#%d: event should be ModifyEvent", i) - } - } -} - -func TestWatchErrConnClosed(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - donec := make(chan struct{}) - go func() { - defer close(donec) - ch := cli.Watch(context.TODO(), "foo") - - if wr := <-ch; !IsCanceled(wr.Err()) { - t.Errorf("expected context canceled, got %v", wr.Err()) - } - }() - - if err := cli.ActiveConnection().Close(); err != nil { - t.Fatal(err) - } - clus.TakeClient(0) - - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("wc.Watch took too long") - case <-donec: - } -} - -func TestWatchAfterClose(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - clus.TakeClient(0) - if err := cli.Close(); err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - cli.Watch(context.TODO(), "foo") - if err := cli.Close(); err != nil && err != context.Canceled { - t.Errorf("expected %v, got %v", context.Canceled, err) - } - close(donec) - }() - select { - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("wc.Watch took too long") - case <-donec: - } -} - -// TestWatchWithRequireLeader checks the watch channel closes when no leader. -func TestWatchWithRequireLeader(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // Put a key for the non-require leader watch to read as an event. - // The watchers will be on member[0]; put key through member[0] to - // ensure that it receives the update so watching after killing quorum - // is guaranteed to have the key. - liveClient := clus.Client(0) - if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - clus.Client(1).Close() - clus.Client(2).Close() - clus.TakeClient(1) - clus.TakeClient(2) - - // wait for election timeout, then member[0] will not have a leader. - tickDuration := 10 * time.Millisecond - // existing streams need three elections before they're torn down; wait until 5 elections cycle - // so proxy tests receive a leader loss event on its existing watch before creating a new watch. - time.Sleep(time.Duration(5*clus.Members[0].ElectionTicks) * tickDuration) - - chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1)) - chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1)) - - select { - case resp, ok := <-chLeader: - if !ok { - t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader) - } - if resp.Err() != rpctypes.ErrNoLeader { - t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp) - } - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("watch without leader took too long to close") - } - - select { - case resp, ok := <-chLeader: - if ok { - t.Fatalf("expected closed channel, got response %v", resp) - } - case <-time.After(integration2.RequestWaitTimeout): - t.Fatal("waited too long for channel to close") - } - - if _, ok := <-chNoLeader; !ok { - t.Fatalf("expected response, got closed channel") - } - - cnt, err := clus.Members[0].Metric( - "etcd_server_client_requests_total", - `type="stream"`, - fmt.Sprintf(`client_api_version="%v"`, version.APIVersion), - ) - if err != nil { - t.Fatal(err) - } - cv, err := strconv.ParseInt(cnt, 10, 32) - if err != nil { - t.Fatal(err) - } - if cv < 2 { // >2 when retried - t.Fatalf("expected at least 2, got %q", cnt) - } -} - -// TestWatchWithFilter checks that watch filtering works. -func TestWatchWithFilter(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer cluster.Terminate(t) - - client := cluster.RandClient() - ctx := context.Background() - - wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut()) - wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete()) - - if _, err := client.Put(ctx, "a", "abc"); err != nil { - t.Fatal(err) - } - if _, err := client.Delete(ctx, "a"); err != nil { - t.Fatal(err) - } - - npResp := <-wcNoPut - if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete { - t.Fatalf("expected delete event, got %+v", npResp.Events) - } - ndResp := <-wcNoDel - if len(ndResp.Events) != 1 || ndResp.Events[0].Type != clientv3.EventTypePut { - t.Fatalf("expected put event, got %+v", ndResp.Events) - } - - select { - case resp := <-wcNoPut: - t.Fatalf("unexpected event on filtered put (%+v)", resp) - case resp := <-wcNoDel: - t.Fatalf("unexpected event on filtered delete (%+v)", resp) - case <-time.After(100 * time.Millisecond): - } -} - -// TestWatchWithCreatedNotification checks that WithCreatedNotify returns a -// Created watch response. -func TestWatchWithCreatedNotification(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer cluster.Terminate(t) - - client := cluster.RandClient() - - ctx := context.Background() - - createC := client.Watch(ctx, "a", clientv3.WithCreatedNotify()) - - resp := <-createC - - if !resp.Created { - t.Fatalf("expected created event, got %v", resp) - } -} - -// TestWatchWithCreatedNotificationDropConn ensures that -// a watcher with created notify does not post duplicate -// created events from disconnect. -func TestWatchWithCreatedNotificationDropConn(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer cluster.Terminate(t) - - client := cluster.RandClient() - - wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify()) - - resp := <-wch - - if !resp.Created { - t.Fatalf("expected created event, got %v", resp) - } - - cluster.Members[0].Bridge().DropConnections() - - // check watch channel doesn't post another watch response. - select { - case wresp := <-wch: - t.Fatalf("got unexpected watch response: %+v\n", wresp) - case <-time.After(time.Second): - // watcher may not reconnect by the time it hits the select, - // so it wouldn't have a chance to filter out the second create event - } -} - -// TestWatchCancelOnServer ensures client watcher cancels propagate back to the server. -func TestWatchCancelOnServer(t *testing.T) { - integration2.BeforeTest(t) - - cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer cluster.Terminate(t) - - client := cluster.RandClient() - numWatches := 10 - - // The grpc proxy starts watches to detect leadership after the proxy server - // returns as started; to avoid racing on the proxy's internal watches, wait - // until require leader watches get create responses to ensure the leadership - // watches have started. - for { - ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO())) - ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify()) - wresp := <-ww - cancel() - if wresp.Err() == nil { - break - } - } - - cancels := make([]context.CancelFunc, numWatches) - for i := 0; i < numWatches; i++ { - // force separate streams in client - md := metadata.Pairs("some-key", fmt.Sprintf("%d", i)) - mctx := metadata.NewOutgoingContext(context.Background(), md) - ctx, cancel := context.WithCancel(mctx) - cancels[i] = cancel - w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify()) - <-w - } - - // get max watches; proxy tests have leadership watches, so total may be >numWatches - maxWatches, _ := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - - // cancel all and wait for cancels to propagate to etcd server - for i := 0; i < numWatches; i++ { - cancels[i]() - } - time.Sleep(time.Second) - - minWatches, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } - - maxWatchV, minWatchV := 0, 0 - n, serr := fmt.Sscanf(maxWatches+" "+minWatches, "%d %d", &maxWatchV, &minWatchV) - if n != 2 || serr != nil { - t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr) - } - - if maxWatchV-minWatchV < numWatches { - t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV) - } -} - -// TestWatchOverlapContextCancel stresses the watcher stream teardown path by -// creating/canceling watchers to ensure that new watchers are not taken down -// by a torn down watch stream. The sort of race that's being detected: -// 1. create w1 using a cancelable ctx with %v as "ctx" -// 2. cancel ctx -// 3. watcher client begins tearing down watcher grpc stream since no more watchers -// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream -// 4. watcher client finishes tearing down stream on "ctx" -// 5. w2 comes back canceled -func TestWatchOverlapContextCancel(t *testing.T) { - f := func(clus *integration2.Cluster) {} - testWatchOverlapContextCancel(t, f) -} - -func TestWatchOverlapDropConnContextCancel(t *testing.T) { - f := func(clus *integration2.Cluster) { - clus.Members[0].Bridge().DropConnections() - } - testWatchOverlapContextCancel(t, f) -} - -func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - n := 100 - ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5) - for i := range ctxs { - // make unique stream - md := metadata.Pairs("some-key", fmt.Sprintf("%d", i)) - ctxs[i] = metadata.NewOutgoingContext(context.Background(), md) - // limits the maximum number of outstanding watchers per stream - ctxc[i] = make(chan struct{}, 2) - } - - // issue concurrent watches on "abc" with cancel - cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil { - t.Fatal(err) - } - ch := make(chan struct{}, n) - tCtx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - for i := 0; i < n; i++ { - go func() { - defer func() { ch <- struct{}{} }() - idx := rand.Intn(len(ctxs)) - ctx, cancel := context.WithCancel(ctxs[idx]) - ctxc[idx] <- struct{}{} - wch := cli.Watch(ctx, "abc", clientv3.WithRev(1)) - select { - case <-tCtx.Done(): - cancel() - return - default: - } - f(clus) - select { - case _, ok := <-wch: - if !ok { - t.Errorf("unexpected closed channel %p", wch) - } - // may take a second or two to reestablish a watcher because of - // grpc back off policies for disconnects - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for watch on %p", wch) - } - // randomize how cancel overlaps with watch creation - if rand.Intn(2) == 0 { - <-ctxc[idx] - cancel() - } else { - cancel() - <-ctxc[idx] - } - }() - } - // join on watches - for i := 0; i < n; i++ { - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Fatalf("timed out waiting for completed watch") - } - } -} - -// TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately -// closing the client does not return a client closing error. -func TestWatchCancelAndCloseClient(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - cli := clus.Client(0) - ctx, cancel := context.WithCancel(context.Background()) - wch := cli.Watch(ctx, "abc") - donec := make(chan struct{}) - go func() { - defer close(donec) - select { - case wr, ok := <-wch: - if ok { - t.Errorf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err()) - } - case <-time.After(5 * time.Second): - t.Error("timed out waiting for closed channel") - } - }() - cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } - <-donec - clus.TakeClient(0) -} - -// TestWatchStressResumeClose establishes a bunch of watchers, disconnects -// to put them in resuming mode, cancels them so some resumes by cancel fail, -// then closes the watcher interface to ensure correct clean up. -func TestWatchStressResumeClose(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - cli := clus.Client(0) - - ctx, cancel := context.WithCancel(context.Background()) - // add more watches than can be resumed before the cancel - wchs := make([]clientv3.WatchChan, 2000) - for i := range wchs { - wchs[i] = cli.Watch(ctx, "abc") - } - clus.Members[0].Bridge().DropConnections() - cancel() - if err := cli.Close(); err != nil { - t.Fatal(err) - } - clus.TakeClient(0) -} - -// TestWatchCancelDisconnected ensures canceling a watcher works when -// its grpc stream is disconnected / reconnecting. -func TestWatchCancelDisconnected(t *testing.T) { - integration2.BeforeTest(t) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - cli := clus.Client(0) - ctx, cancel := context.WithCancel(context.Background()) - // add more watches than can be resumed before the cancel - wch := cli.Watch(ctx, "abc") - clus.Members[0].Stop(t) - cancel() - select { - case <-wch: - case <-time.After(time.Second): - t.Fatal("took too long to cancel disconnected watcher") - } -} - -// TestWatchClose ensures that close does not return error -func TestWatchClose(t *testing.T) { - runWatchTest(t, testWatchClose) -} - -func testWatchClose(t *testing.T, wctx *watchctx) { - ctx, cancel := context.WithCancel(context.Background()) - wch := wctx.w.Watch(ctx, "a") - cancel() - if wch == nil { - t.Fatalf("expected watcher channel, got nil") - } - if wctx.w.Close() != nil { - t.Fatalf("watch did not close successfully") - } - wresp, ok := <-wch - if ok { - t.Fatalf("read wch got %v; expected closed channel", wresp) - } -} diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go deleted file mode 100644 index a05b662b8a1..00000000000 --- a/tests/integration/cluster_test.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "log" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func init() { - // open microsecond-level time log for integration test debugging - log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile) - if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" { - if i, err := strconv.ParseInt(t, 10, 64); err == nil { - integration.ElectionTicks = int(i) - } - } -} - -func TestClusterOf1(t *testing.T) { testCluster(t, 1) } -func TestClusterOf3(t *testing.T) { testCluster(t, 3) } - -func testCluster(t *testing.T, size int) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: size}) - defer c.Terminate(t) - clusterMustProgress(t, c.Members) -} - -func TestTLSClusterOf3(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfo}) - defer c.Terminate(t) - clusterMustProgress(t, c.Members) -} - -// TestTLSClusterOf3WithSpecificUsage tests that a cluster can progress when -// using separate client and server certs when peering. This supports -// certificate authorities that don't issue dual-usage certificates. -func TestTLSClusterOf3WithSpecificUsage(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage}) - defer c.Terminate(t) - clusterMustProgress(t, c.Members) -} - -func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) } -func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) } - -func testDoubleClusterSize(t *testing.T, size int) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - for i := 0; i < size; i++ { - c.AddMember(t) - } - clusterMustProgress(t, c.Members) -} - -func TestDoubleTLSClusterSizeOf3(t *testing.T) { - integration.BeforeTest(t) - cfg := &integration.ClusterConfig{ - Size: 1, - PeerTLS: &integration.TestTLSInfo, - DisableStrictReconfigCheck: true, - } - c := integration.NewCluster(t, cfg) - defer c.Terminate(t) - - for i := 0; i < 3; i++ { - c.AddMember(t) - } - clusterMustProgress(t, c.Members) -} - -func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) } -func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) } - -func testDecreaseClusterSize(t *testing.T, size int) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: size, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - // TODO: remove the last but one member - for i := 0; i < size-1; i++ { - id := c.Members[len(c.Members)-1].Server.MemberId() - // may hit second leader election on slow machines - if err := c.RemoveMember(t, c.Members[0].Client, uint64(id)); err != nil { - if strings.Contains(err.Error(), "no leader") { - t.Logf("got leader error (%v)", err) - i-- - continue - } - t.Fatal(err) - } - c.WaitMembersForLeader(t, c.Members) - } - clusterMustProgress(t, c.Members) -} - -func TestForceNewCluster(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer c.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - resp, err := c.Members[0].Client.Put(ctx, "/foo", "bar") - if err != nil { - t.Fatalf("unexpected create error: %v", err) - } - cancel() - // ensure create has been applied in this machine - ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) - watch := c.Members[0].Client.Watcher.Watch(ctx, "/foo", clientv3.WithRev(resp.Header.Revision-1)) - for resp := range watch { - if len(resp.Events) != 0 { - break - } - if resp.Err() != nil { - t.Fatalf("unexpected watch error: %q", resp.Err()) - } - if resp.Canceled { - t.Fatalf("watch cancelled") - } - } - cancel() - - c.Members[0].Stop(t) - c.Members[1].Terminate(t) - c.Members[2].Terminate(t) - c.Members[0].ForceNewCluster = true - err = c.Members[0].Restart(t) - if err != nil { - t.Fatalf("unexpected ForceRestart error: %v", err) - } - c.WaitMembersForLeader(t, c.Members[:1]) - - // use new http client to init new connection - // ensure force restart keep the old data, and new Cluster can make progress - ctx, cancel = context.WithTimeout(context.Background(), integration.RequestTimeout) - watch = c.Members[0].Client.Watcher.Watch(ctx, "/foo", clientv3.WithRev(resp.Header.Revision-1)) - for resp := range watch { - if len(resp.Events) != 0 { - break - } - if resp.Err() != nil { - t.Fatalf("unexpected watch error: %q", resp.Err()) - } - if resp.Canceled { - t.Fatalf("watch cancelled") - } - } - cancel() - clusterMustProgress(t, c.Members[:1]) -} - -func TestAddMemberAfterClusterFullRotation(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - // remove all the previous three members and add in three new members. - for i := 0; i < 3; i++ { - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[1].Server.MemberId())); err != nil { - t.Fatal(err) - } - c.WaitMembersForLeader(t, c.Members) - - c.AddMember(t) - c.WaitMembersForLeader(t, c.Members) - } - - c.AddMember(t) - c.WaitMembersForLeader(t, c.Members) - - clusterMustProgress(t, c.Members) -} - -// TestIssue2681 ensures we can remove a member then add a new one back immediately. -func TestIssue2681(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberId())); err != nil { - t.Fatal(err) - } - c.WaitMembersForLeader(t, c.Members) - - c.AddMember(t) - c.WaitMembersForLeader(t, c.Members) - clusterMustProgress(t, c.Members) -} - -// TestIssue2746 ensures we can remove a member after a snapshot then add a new one back. -func TestIssue2746(t *testing.T) { testIssue2746(t, 5) } - -// TestIssue2746WithThree tests with 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot. -func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) } - -func testIssue2746(t *testing.T, members int) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: members, SnapshotCount: 10, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - // force a snapshot - for i := 0; i < 20; i++ { - clusterMustProgress(t, c.Members) - } - - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberId())); err != nil { - t.Fatal(err) - } - c.WaitMembersForLeader(t, c.Members) - - c.AddMember(t) - c.WaitMembersForLeader(t, c.Members) - clusterMustProgress(t, c.Members) -} - -// TestIssue2904 ensures etcd will not panic when removing a just started member. -func TestIssue2904(t *testing.T) { - integration.BeforeTest(t) - // start 1-member Cluster to ensure member 0 is the leader of the Cluster. - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 2, UseBridge: true, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - c.WaitLeader(t) - - c.AddMember(t) - c.Members[2].Stop(t) - - // send remove member-1 request to the Cluster. - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - // the proposal is not committed because member 1 is stopped, but the - // proposal is appended to leader'Server raft log. - c.Members[0].Client.MemberRemove(ctx, uint64(c.Members[2].Server.MemberId())) - cancel() - - // restart member, and expect it to send UpdateAttributes request. - // the log in the leader is like this: - // [..., remove 1, ..., update attr 1, ...] - c.Members[2].Restart(t) - // when the member comes back, it ack the proposal to remove itself, - // and apply it. - <-c.Members[2].Server.StopNotify() - - // terminate removed member - c.Members[2].Client.Close() - c.Members[2].Terminate(t) - c.Members = c.Members[:2] - // wait member to be removed. - c.WaitMembersMatch(t, c.ProtoMembers()) -} - -// TestIssue3699 tests minority failure during cluster configuration; it was -// deadlocking. -func TestIssue3699(t *testing.T) { - // start a Cluster of 3 nodes a, b, c - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true, DisableStrictReconfigCheck: true}) - defer c.Terminate(t) - - // make node a unavailable - c.Members[0].Stop(t) - - // add node d - c.AddMember(t) - - t.Logf("Disturbing cluster till member:3 will become a leader") - - // electing node d as leader makes node a unable to participate - leaderID := c.WaitMembersForLeader(t, c.Members) - for leaderID != 3 { - c.Members[leaderID].Stop(t) - <-c.Members[leaderID].Server.StopNotify() - // do not restart the killed member immediately. - // the member will advance its election timeout after restart, - // so it will have a better chance to become the leader again. - time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration))) - c.Members[leaderID].Restart(t) - leaderID = c.WaitMembersForLeader(t, c.Members) - } - - t.Logf("Finally elected member 3 as the leader.") - - t.Logf("Restarting member '0'...") - // bring back node a - // node a will remain useless as long as d is the leader. - if err := c.Members[0].Restart(t); err != nil { - t.Fatal(err) - } - t.Logf("Restarted member '0'.") - - select { - // waiting for ReadyNotify can take several seconds - case <-time.After(10 * time.Second): - t.Fatalf("waited too long for ready notification") - case <-c.Members[0].Server.StopNotify(): - t.Fatalf("should not be stopped") - case <-c.Members[0].Server.ReadyNotify(): - } - // must WaitMembersForLeader so goroutines don't leak on terminate - c.WaitLeader(t) - - t.Logf("Expecting successful put...") - // try to participate in Cluster - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - if _, err := c.Members[0].Client.Put(ctx, "/foo", "bar"); err != nil { - t.Fatalf("unexpected error on Put (%v)", err) - } - cancel() -} - -// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members. -func TestRejectUnhealthyAdd(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer c.Terminate(t) - - // make Cluster unhealthy and wait for downed peer - c.Members[0].Stop(t) - c.WaitLeader(t) - - // all attempts to add member should fail - for i := 1; i < len(c.Members); i++ { - err := c.AddMemberByURL(t, c.Members[i].Client, "unix://foo:12345") - if err == nil { - t.Fatalf("should have failed adding peer") - } - // TODO: client should return descriptive error codes for internal errors - if !strings.Contains(err.Error(), "unhealthy cluster") { - t.Errorf("unexpected error (%v)", err) - } - } - - // make cluster healthy - c.Members[0].Restart(t) - c.WaitLeader(t) - time.Sleep(2 * etcdserver.HealthInterval) - - // add member should succeed now that it'Server healthy - var err error - for i := 1; i < len(c.Members); i++ { - if err = c.AddMemberByURL(t, c.Members[i].Client, "unix://foo:12345"); err == nil { - break - } - } - if err != nil { - t.Fatalf("should have added peer to healthy Cluster (%v)", err) - } -} - -// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members -// if quorum will be lost. -func TestRejectUnhealthyRemove(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, UseBridge: true}) - defer c.Terminate(t) - - // make cluster unhealthy and wait for downed peer; (3 up, 2 down) - c.Members[0].Stop(t) - c.Members[1].Stop(t) - leader := c.WaitLeader(t) - - // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum - err := c.RemoveMember(t, c.Members[leader].Client, uint64(c.Members[2].Server.MemberId())) - if err == nil { - t.Fatalf("should reject quorum breaking remove: %s", err) - } - // TODO: client should return more descriptive error codes for internal errors - if !strings.Contains(err.Error(), "unhealthy cluster") { - t.Errorf("unexpected error (%v)", err) - } - - // member stopped after launch; wait for missing heartbeats - time.Sleep(time.Duration(integration.ElectionTicks * int(config.TickDuration))) - - // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum - if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberId())); err != nil { - t.Fatalf("should accept removing down member: %s", err) - } - - // bring cluster to (4,1) - c.Members[0].Restart(t) - - // restarted member must be connected for a HealthInterval before remove is accepted - time.Sleep((3 * etcdserver.HealthInterval) / 2) - - // accept remove member since (4,1)-(1,0) => (3,1) has quorum - if err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.MemberId())); err != nil { - t.Fatalf("expected to remove member, got error %v", err) - } -} - -// TestRestartRemoved ensures that restarting removed member must exit -// if 'initial-cluster-state' is set 'new' and old data directory still exists -// (see https://github.com/etcd-io/etcd/issues/7512 for more). -func TestRestartRemoved(t *testing.T) { - integration.BeforeTest(t) - - // 1. start single-member Cluster - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer c.Terminate(t) - - // 2. add a new member - c.Cfg.DisableStrictReconfigCheck = true - c.AddMember(t) - c.WaitLeader(t) - - firstMember := c.Members[0] - firstMember.KeepDataDirTerminate = true - - // 3. remove first member, shut down without deleting data - if err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.MemberId())); err != nil { - t.Fatalf("expected to remove member, got error %v", err) - } - c.WaitLeader(t) - - // 4. restart first member with 'initial-cluster-state=new' - // wrong config, expects exit within ReqTimeout - firstMember.ServerConfig.NewCluster = false - if err := firstMember.Restart(t); err != nil { - t.Fatalf("unexpected ForceRestart error: %v", err) - } - defer func() { - firstMember.Close() - os.RemoveAll(firstMember.ServerConfig.DataDir) - }() - select { - case <-firstMember.Server.StopNotify(): - case <-time.After(time.Minute): - t.Fatalf("removed member didn't exit within %v", time.Minute) - } -} - -// clusterMustProgress ensures that cluster can make progress. It creates -// a random key first, and check the new key could be got from all client urls -// of the cluster. -func clusterMustProgress(t *testing.T, members []*integration.Member) { - key := fmt.Sprintf("foo%d", rand.Int()) - var ( - err error - resp *clientv3.PutResponse - ) - // retry in case of leader loss induced by slow CI - for i := 0; i < 3; i++ { - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - resp, err = members[0].Client.Put(ctx, key, "bar") - cancel() - if err == nil { - break - } - t.Logf("failed to create key on #0 (%v)", err) - } - if err != nil { - t.Fatalf("create on #0 error: %v", err) - } - - for i, m := range members { - mctx, mcancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - watch := m.Client.Watcher.Watch(mctx, key, clientv3.WithRev(resp.Header.Revision-1)) - for resp := range watch { - if len(resp.Events) != 0 { - break - } - if resp.Err() != nil { - t.Fatalf("#%d: watch error: %q", i, resp.Err()) - } - if resp.Canceled { - t.Fatalf("#%d: watch: cancelled", i) - } - } - mcancel() - } -} - -func TestSpeedyTerminate(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - // Stop/Restart so requests will time out on lost leaders - for i := 0; i < 3; i++ { - clus.Members[i].Stop(t) - clus.Members[i].Restart(t) - } - donec := make(chan struct{}) - go func() { - defer close(donec) - clus.Terminate(t) - }() - select { - case <-time.After(10 * time.Second): - t.Fatalf("Cluster took too long to terminate") - case <-donec: - } -} diff --git a/tests/integration/corrupt_test.go b/tests/integration/corrupt_test.go deleted file mode 100644 index c93cd0ff86f..00000000000 --- a/tests/integration/corrupt_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestPeriodicCheck(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - require.NoError(t, err) - - ctx := context.Background() - - var totalRevisions int64 = 1210 - var rev int64 - for ; rev < totalRevisions; rev += testutil.CompactionCycle { - testPeriodicCheck(ctx, t, cc, clus, rev, rev+testutil.CompactionCycle) - } - testPeriodicCheck(ctx, t, cc, clus, rev, rev+totalRevisions) - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - assert.Equal(t, []*etcdserverpb.AlarmMember(nil), alarmResponse.Alarms) -} - -func testPeriodicCheck(ctx context.Context, t *testing.T, cc *clientv3.Client, clus *integration.Cluster, start, stop int64) { - for i := start; i <= stop; i++ { - if i%67 == 0 { - _, err := cc.Delete(ctx, testutil.PickKey(i+83)) - assert.NoError(t, err, "error on delete") - } else { - _, err := cc.Put(ctx, testutil.PickKey(i), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - } - err := clus.Members[0].Server.CorruptionChecker().PeriodicCheck() - assert.NoError(t, err, "error on periodic check (rev %v)", stop) -} - -func TestPeriodicCheckDetectsCorruption(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - require.NoError(t, err) - - ctx := context.Background() - - for i := 0; i < 10; i++ { - _, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - - err = clus.Members[0].Server.CorruptionChecker().PeriodicCheck() - assert.NoError(t, err, "error on periodic check") - clus.Members[0].Stop(t) - clus.WaitLeader(t) - - err = testutil.CorruptBBolt(clus.Members[0].BackendPath()) - assert.NoError(t, err) - - err = clus.Members[0].Restart(t) - assert.NoError(t, err) - time.Sleep(50 * time.Millisecond) - leader := clus.WaitLeader(t) - - err = clus.Members[leader].Server.CorruptionChecker().PeriodicCheck() - assert.NoError(t, err, "error on periodic check") - time.Sleep(50 * time.Millisecond) - - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: uint64(clus.Members[0].ID())}}, alarmResponse.Alarms) -} - -func TestCompactHashCheck(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - require.NoError(t, err) - - ctx := context.Background() - - var totalRevisions int64 = 1210 - var rev int64 - for ; rev < totalRevisions; rev += testutil.CompactionCycle { - testCompactionHash(ctx, t, cc, clus, rev, rev+testutil.CompactionCycle) - } - testCompactionHash(ctx, t, cc, clus, rev, rev+totalRevisions) -} - -func testCompactionHash(ctx context.Context, t *testing.T, cc *clientv3.Client, clus *integration.Cluster, start, stop int64) { - for i := start; i <= stop; i++ { - if i%67 == 0 { - _, err := cc.Delete(ctx, testutil.PickKey(i+83)) - assert.NoError(t, err, "error on delete") - } else { - _, err := cc.Put(ctx, testutil.PickKey(i), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - } - _, err := cc.Compact(ctx, stop) - assert.NoError(t, err, "error on compact (rev %v)", stop) - // Wait for compaction to be compacted - time.Sleep(50 * time.Millisecond) - - clus.Members[0].Server.CorruptionChecker().CompactHashCheck() -} - -func TestCompactHashCheckDetectCorruption(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - require.NoError(t, err) - - ctx := context.Background() - - for i := 0; i < 10; i++ { - _, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - - clus.Members[0].Server.CorruptionChecker().CompactHashCheck() - clus.Members[0].Stop(t) - clus.WaitLeader(t) - - err = testutil.CorruptBBolt(clus.Members[0].BackendPath()) - assert.NoError(t, err) - - err = clus.Members[0].Restart(t) - assert.NoError(t, err) - _, err = cc.Compact(ctx, 5) - assert.NoError(t, err) - time.Sleep(50 * time.Millisecond) - leader := clus.WaitLeader(t) - - clus.Members[leader].Server.CorruptionChecker().CompactHashCheck() - time.Sleep(50 * time.Millisecond) - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - assert.Equal(t, []*etcdserverpb.AlarmMember{{Alarm: etcdserverpb.AlarmType_CORRUPT, MemberID: uint64(clus.Members[0].ID())}}, alarmResponse.Alarms) -} - -func TestCompactHashCheckDetectMultipleCorruption(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - require.NoError(t, err) - - ctx := context.Background() - - for i := 0; i < 10; i++ { - _, err := cc.Put(ctx, testutil.PickKey(int64(i)), fmt.Sprint(i)) - assert.NoError(t, err, "error on put") - } - - clus.Members[0].Server.CorruptionChecker().CompactHashCheck() - clus.Members[0].Stop(t) - clus.Members[1].Server.CorruptionChecker().CompactHashCheck() - clus.Members[1].Stop(t) - clus.WaitLeader(t) - - err = testutil.CorruptBBolt(clus.Members[0].BackendPath()) - require.NoError(t, err) - err = testutil.CorruptBBolt(clus.Members[1].BackendPath()) - require.NoError(t, err) - - err = clus.Members[0].Restart(t) - require.NoError(t, err) - err = clus.Members[1].Restart(t) - require.NoError(t, err) - - _, err = cc.Compact(ctx, 5) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - leader := clus.WaitLeader(t) - - clus.Members[leader].Server.CorruptionChecker().CompactHashCheck() - time.Sleep(50 * time.Millisecond) - alarmResponse, err := cc.AlarmList(ctx) - assert.NoError(t, err, "error on alarm list") - - expectedAlarmMap := map[uint64]etcdserverpb.AlarmType{ - uint64(clus.Members[0].ID()): etcdserverpb.AlarmType_CORRUPT, - uint64(clus.Members[1].ID()): etcdserverpb.AlarmType_CORRUPT, - } - - actualAlarmMap := make(map[uint64]etcdserverpb.AlarmType) - for _, alarm := range alarmResponse.Alarms { - actualAlarmMap[alarm.MemberID] = alarm.Alarm - } - - require.Equal(t, expectedAlarmMap, actualAlarmMap) -} diff --git a/tests/integration/doc.go b/tests/integration/doc.go deleted file mode 100644 index fbf19d54368..00000000000 --- a/tests/integration/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package integration implements tests built upon embedded etcd, and focus on -etcd correctness. - -Features/goals of the integration tests: -1. test the whole code base except command-line parsing. -2. check internal data, including raft, store and etc. -3. based on goroutines, which is faster than process. -4. mainly tests user behavior and user-facing API. -*/ -package integration diff --git a/tests/integration/embed/embed_proxy_test.go b/tests/integration/embed/embed_proxy_test.go deleted file mode 100644 index 50f2a175fbd..00000000000 --- a/tests/integration/embed/embed_proxy_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build cluster_proxy - -// The purpose of this (empty) package is too keep following test working: -// # go test -tags=cluster_proxy ./integration/embed -package embed_test diff --git a/tests/integration/embed/embed_test.go b/tests/integration/embed/embed_test.go deleted file mode 100644 index f018e74ad0a..00000000000 --- a/tests/integration/embed/embed_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !cluster_proxy - -// Keep the test in a separate package from other tests such that -// .setupLogging method does not race with other (previously running) servers (grpclog is global). - -package embed_test - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "go.etcd.io/etcd/tests/v3/framework/testutils" -) - -var ( - testTLSInfo = transport.TLSInfo{ - KeyFile: testutils.MustAbsPath("../../fixtures/server.key.insecure"), - CertFile: testutils.MustAbsPath("../../fixtures/server.crt"), - TrustedCAFile: testutils.MustAbsPath("../../fixtures/ca.crt"), - ClientCertAuth: true, - } -) - -func TestEmbedEtcd(t *testing.T) { - testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests") - - tests := []struct { - cfg embed.Config - - werr string - wpeers int - wclients int - }{ - {werr: "multiple discovery"}, - {werr: "advertise-client-urls is required"}, - {werr: "should be at least"}, - {werr: "is too long"}, - {wpeers: 1, wclients: 1}, - {wpeers: 2, wclients: 1}, - {wpeers: 1, wclients: 2}, - {werr: "expected IP"}, - {werr: "expected IP"}, - } - - urls := newEmbedURLs(false, 10) - - // setup defaults - for i := range tests { - tests[i].cfg = *embed.NewConfig() - tests[i].cfg.Logger = "zap" - tests[i].cfg.LogOutputs = []string{"/dev/null"} - } - - tests[0].cfg.Durl = "abc" - setupEmbedCfg(&tests[1].cfg, []url.URL{urls[0]}, []url.URL{urls[1]}) - tests[1].cfg.ACUrls = nil - tests[2].cfg.TickMs = tests[2].cfg.ElectionMs - 1 - tests[3].cfg.ElectionMs = 999999 - setupEmbedCfg(&tests[4].cfg, []url.URL{urls[2]}, []url.URL{urls[3]}) - setupEmbedCfg(&tests[5].cfg, []url.URL{urls[4]}, []url.URL{urls[5], urls[6]}) - setupEmbedCfg(&tests[6].cfg, []url.URL{urls[7], urls[8]}, []url.URL{urls[9]}) - - dnsURL, _ := url.Parse("http://whatever.test:12345") - tests[7].cfg.LCUrls = []url.URL{*dnsURL} - tests[8].cfg.LPUrls = []url.URL{*dnsURL} - - dir := filepath.Join(t.TempDir(), "embed-etcd") - - for i, tt := range tests { - tests[i].cfg.Dir = dir - e, err := embed.StartEtcd(&tests[i].cfg) - if e != nil { - <-e.Server.ReadyNotify() // wait for e.Server to join the cluster - } - if tt.werr != "" { - if err == nil || !strings.Contains(err.Error(), tt.werr) { - t.Errorf("%d: expected error with %q, got %v", i, tt.werr, err) - } - if e != nil { - e.Close() - } - continue - } - if err != nil { - t.Errorf("%d: expected success, got error %v", i, err) - continue - } - if len(e.Peers) != tt.wpeers { - t.Errorf("%d: expected %d peers, got %d", i, tt.wpeers, len(e.Peers)) - } - if len(e.Clients) != tt.wclients { - t.Errorf("%d: expected %d clients, got %d", i, tt.wclients, len(e.Clients)) - } - e.Close() - select { - case err := <-e.Err(): - if err != nil { - t.Errorf("#%d: unexpected error on close (%v)", i, err) - } - } - } -} - -func TestEmbedEtcdGracefulStopSecure(t *testing.T) { testEmbedEtcdGracefulStop(t, true) } -func TestEmbedEtcdGracefulStopInsecure(t *testing.T) { testEmbedEtcdGracefulStop(t, false) } - -// testEmbedEtcdGracefulStop ensures embedded server stops -// cutting existing transports. -func testEmbedEtcdGracefulStop(t *testing.T, secure bool) { - testutil.SkipTestIfShortMode(t, "Cannot start embedded cluster in --short tests") - - cfg := embed.NewConfig() - if secure { - cfg.ClientTLSInfo = testTLSInfo - cfg.PeerTLSInfo = testTLSInfo - } - - urls := newEmbedURLs(secure, 2) - setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]}) - - cfg.Dir = filepath.Join(t.TempDir(), "embed-etcd") - - e, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - <-e.Server.ReadyNotify() // wait for e.Server to join the cluster - - clientCfg := clientv3.Config{ - Endpoints: []string{urls[0].String()}, - } - if secure { - clientCfg.TLS, err = testTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } - } - cli, err := integration2.NewClient(t, clientCfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - // open watch connection - cli.Watch(context.Background(), "foo") - - donec := make(chan struct{}) - go func() { - e.Close() - close(donec) - }() - select { - case <-donec: - case <-time.After(2*time.Second + e.Server.Cfg.ReqTimeout()): - t.Fatalf("took too long to close server") - } - err = <-e.Err() - if err != nil { - t.Fatal(err) - } -} - -func newEmbedURLs(secure bool, n int) (urls []url.URL) { - scheme := "unix" - if secure { - scheme = "unixs" - } - for i := 0; i < n; i++ { - u, _ := url.Parse(fmt.Sprintf("%s://localhost:%d%06d", scheme, os.Getpid(), i)) - urls = append(urls, *u) - } - return urls -} - -func setupEmbedCfg(cfg *embed.Config, curls []url.URL, purls []url.URL) { - cfg.Logger = "zap" - cfg.LogOutputs = []string{"/dev/null"} - - cfg.ClusterState = "new" - cfg.LCUrls, cfg.ACUrls = curls, curls - cfg.LPUrls, cfg.APUrls = purls, purls - cfg.InitialCluster = "" - for i := range purls { - cfg.InitialCluster += ",default=" + purls[i].String() - } - cfg.InitialCluster = cfg.InitialCluster[1:] -} diff --git a/tests/integration/fixtures-expired/README b/tests/integration/fixtures-expired/README deleted file mode 100644 index 3651eb557fa..00000000000 --- a/tests/integration/fixtures-expired/README +++ /dev/null @@ -1,5 +0,0 @@ -To generate bad certs - -1. Manually set system time back to past -2. Run ./gencerts.sh - diff --git a/tests/integration/fixtures-expired/ca-csr.json b/tests/integration/fixtures-expired/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/integration/fixtures-expired/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/integration/fixtures-expired/ca.crt b/tests/integration/fixtures-expired/ca.crt deleted file mode 100644 index b8b3bddb4a8..00000000000 --- a/tests/integration/fixtures-expired/ca.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0jCCArqgAwIBAgIUEKEIOO1O97Bz4car+7SHDxT5tB4wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0wMDA0MTMxODU1MDBaFw0xMDA0MTExODU1 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDFeNJ9r2TFcJp9UHS42QN2NN1A96LQXxn/BirHzXdeTk6YEe0eloA91SJT -BAae7aGdPMkpMyAAXheGPGHAbSde5dONYx2QE4nqWRl79v6kDbX6EmqwpzTOGD/T -UfLXe65g6w6kaXcNZWiMdqfkUImke/WWM1qunsCKoGOXF8Jg1DLy7NSjqT2Kg1UP -evJ5GOrWmIj5rEnEvW0ohR7mKV23xl5okVjrlzCi+arWDdl5RzE0I9x7vKNE0TKX -NNHG9hMSJQ/ipXXXyMcahqGZXtkGvOpwpO3lpsGjo3WIUZMQW2FA3xR0nBC6Lt+0 -d+7IXOy/LbzXpkcL8Ws5BZuLDSKLAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBT5Z7kwwTNntO1UsuMdUv/Yq3Ad -cDAfBgNVHSMEGDAWgBT5Z7kwwTNntO1UsuMdUv/Yq3AdcDANBgkqhkiG9w0BAQsF -AAOCAQEAkFF/fIVlcgj1uRL36wP4DgkOMCpU5+vwlDdHihDzRJHZqik+3+oNz7DD -pRIURHMeeF+Wk5/GRQ/oGzKYotNLLzqCOggnLCxET6Hkb07vfve91HmYVOYix5pU -GPW8+M3XyFTL3+2BnPpqPpJWpJ28g+N3eQjAG8rIbjXESdxrpJFKY22nMbtyS1rH -dyzf3OO4S7LZiRQx0nuD9SZtX2vj5DyN8Am/zieSYm+GCtJsvIiDoB+Uhndnxxt0 -FA0/89vGJ1gCo+Z6clzqBIbesRUBnLvPbUdpxhFAtjUKZhQv05IrE81/GP7F7kEr -oODS2+D5WC6mKDO4v2k736OTw6HwOQ== ------END CERTIFICATE----- diff --git a/tests/integration/fixtures-expired/gencert.json b/tests/integration/fixtures-expired/gencert.json deleted file mode 100644 index 3d7eceac0c7..00000000000 --- a/tests/integration/fixtures-expired/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "1h" - } - } -} diff --git a/tests/integration/fixtures-expired/gencerts.sh b/tests/integration/fixtures-expired/gencerts.sh deleted file mode 100755 index aecdd423bba..00000000000 --- a/tests/integration/fixtures-expired/gencerts.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -if which openssl >/dev/null; then - openssl x509 -in ca.crt -noout -text -fi - -# generate DNS: localhost, IP: 127.0.0.1, CN: example.com certificates -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -# generate IP: 127.0.0.1, CN: example.com certificates -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-ip.json | cfssljson --bare ./server-ip -mv server-ip.pem server-ip.crt -mv server-ip-key.pem server-ip.key.insecure - -if which openssl >/dev/null; then - openssl x509 -in ./server.crt -text -noout - openssl x509 -in ./server-ip.crt -text -noout -fi - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/integration/fixtures-expired/server-ca-csr-ip.json b/tests/integration/fixtures-expired/server-ca-csr-ip.json deleted file mode 100644 index 2b2c4350ba1..00000000000 --- a/tests/integration/fixtures-expired/server-ca-csr-ip.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "127.0.0.1" - ] -} diff --git a/tests/integration/fixtures-expired/server-ca-csr.json b/tests/integration/fixtures-expired/server-ca-csr.json deleted file mode 100644 index 272cf841d2d..00000000000 --- a/tests/integration/fixtures-expired/server-ca-csr.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "example.com", - "hosts": [ - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/integration/fixtures-expired/server-ip.crt b/tests/integration/fixtures-expired/server-ip.crt deleted file mode 100644 index 35e78fabfd2..00000000000 --- a/tests/integration/fixtures-expired/server-ip.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEBzCCAu+gAwIBAgIUOc4vrxQ6OeHoGslhL7daP1Ye8ZYwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0wMDA0MTMxODU1MDBaFw0wMDA0MTMxOTU1 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDLUZuwXwiky2VvujM0EOP9LL85sx1dKLc/16hOl6qYRPOg -PH7zsmXMVndsD2Fi9NDbhV9rVHfVNkCyZO/D81u52UEyr2uSFHOfIqLkFGKvcxhO -FtOLA7wTjzHiYO1pFgqYBWzSfIyreYYo13tCYxHUlhn3ibqvCz9fimGsQmswhUiP -yaC4C8iBICWNd4vrXHhtKb5pHHzUDFHkOxKF6VS9f7InKBy2yTr8ekgoEYyE3gtp -ncoVbVlwxehChbZThFi0xsQc/kG/eoyGznKo9RUlUW+h3SEJR3bYizYP76ZwWXus -nP5vgLmZ0wIi/689uTQbEAK438rK3xTSziPv6B51AgMBAAGjgZEwgY4wDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBR8bgC5SVrIrOAkjED8FB6OThk0IjAfBgNVHSMEGDAW -gBT5Z7kwwTNntO1UsuMdUv/Yq3AdcDAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3 -DQEBCwUAA4IBAQDEV4Ec8TpDXvFTJYXrpME5KnKvtq1fEv100jc88cmlGb/rygge -MtisA1rYSaSEPMF0j7HoMtTwP90yrJCBTr7/vziAXCZU2H6bg24exRzqtMDpDhXg -mvqkqvMVFem8ANIF3a+qXPY/pzjh4xrPuOw10TfG0bE576lAY/KbnY3UvXo6QL54 -AMyimFhq8e9dJ7JnO3eaYmJv6oSjKjqNYSU+01UfxEJGNbx1IELMDlnVKX0Zmn9p -YbUS3nrowKoVXpuca9KzS1pINgqVsztF5XJxzqlcDwERR/QcTKwUgQ0y0BBRqiGg -WdtbyamFufvF8GPsNJ0KRHXSIRRXF7hbgiXd ------END CERTIFICATE----- diff --git a/tests/integration/fixtures-expired/server-ip.key.insecure b/tests/integration/fixtures-expired/server-ip.key.insecure deleted file mode 100644 index 357f52455ab..00000000000 --- a/tests/integration/fixtures-expired/server-ip.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAy1GbsF8IpMtlb7ozNBDj/Sy/ObMdXSi3P9eoTpeqmETzoDx+ -87JlzFZ3bA9hYvTQ24Vfa1R31TZAsmTvw/NbudlBMq9rkhRznyKi5BRir3MYThbT -iwO8E48x4mDtaRYKmAVs0nyMq3mGKNd7QmMR1JYZ94m6rws/X4phrEJrMIVIj8mg -uAvIgSAljXeL61x4bSm+aRx81AxR5DsShelUvX+yJygctsk6/HpIKBGMhN4LaZ3K -FW1ZcMXoQoW2U4RYtMbEHP5Bv3qMhs5yqPUVJVFvod0hCUd22Is2D++mcFl7rJz+ -b4C5mdMCIv+vPbk0GxACuN/Kyt8U0s4j7+gedQIDAQABAoIBAAjHl1+AWxEyr0ip -07g12oJ+QiutrmDtdyxMlbn/FqDIqXSL6DeBxp+SREnoSB5L0BEKq1opJZuRYi3R -6gCeK6HU3dngdVazh2KhzkLnFnPZFn2Ywr3IBYEat968rMPS7dYutcpJEpH9B2wQ -EgSF3qk9ahWkXulcJPptMVaM77ACnZk6yYsPDPqjX/zsCXVga59QL0x1n2ai50er -W7kthCj69zZP6crbnjyCUDjNdpDio7xurvvxs0k1KWmcN9QjdOyFXDFgTUnphIFX -pEyVhu+LmLzFKc1WVQ4sIAt6ot9kpWt+cdaBVIWl2yCmqF4nbJ38DDG6wLXaZQd1 -DgEL0YECgYEAzt7QukPfjgw5CKZguQVVn0LGYdHw47qHiusjABzYH4mokMHqR/r5 -LIIRQ4JjB/vpxavj6B0e73tcfwbSzLSwsRI9/6Z27UVpXnpU5LY7+46d+ZXsQorE -8jeUX6ZQi65ujpFFKkftKlmq67XJtmSh2T+3dMqRXmFWVZThllBJcGUCgYEA+5rd -gvZhaj9Rng1CwK3FoI/mp0BtSL+TE8/JbV0yA5X6NhXlts/ysafFZsj9RkR1NhXL -ql8Bl9RxrV6mTIz6/76NC39ZUQUe5FZGv64rqjoFwOnv6ap1/8ntDFy29DgZ5Dqn -flAtbbEyVG+VCwwhDgUT+FTNNS1eg18GStr6LNECgYASgo1anUgbhax0wa5V38xR -e8AUcJyFQ+Ns4q03DV2pNMAIc9Fqr2IsQVcaG0iRJlE8hqzV0AU8mGUmWI30Exbc -QS2a+mIZyOQst/VwoX2sfI5WDrwdGB2XLrHv/Qmn9euehhESP21RJMTOYm2yDD8P -GUxo/tcTAtKexbuJn5VyoQKBgB7OH0DhmZvAlOWdCgc9P20hMURZBwhZLFDIqAjT -2EPIIRJuK+nuG/DUcb7b7OalixRMJtt9Nly4jhKD/ChzOmgFlI9L0EuzLM0YIyFk -2cPFxt6Pxef+DuR6fKN+1oegNstSwx8cAfPkNh1QbBcmLQXiaUeGWnmgTGoZQFP5 -65eBAoGAfV98Mwka+VJ3hYNPL2ZHUXHnXw9Hnf5NnaGfgz7/Ucw3H88HsrIDIZgO -NKSM3NVRIrweAx8/gDIrGqjXkvrwuCqXXYeS23gRteigUpoQrtGjBxIwtalT8K2O -jI4vqz8SsNALtR8nehmBPzTj+t+rF5b1cMfyreHccoAa+0TbPac= ------END RSA PRIVATE KEY----- diff --git a/tests/integration/fixtures-expired/server.crt b/tests/integration/fixtures-expired/server.crt deleted file mode 100644 index 2939cad8856..00000000000 --- a/tests/integration/fixtures-expired/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEEjCCAvqgAwIBAgIULscJmimDEFNvw3oQ5paqHc+V/w4wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0wMDA0MTMxODU1MDBaFw0wMDA0MTMxOTU1 -MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQC090lhTNQRzaNBROiSAt0Tv2KTEqGtus7bNJjAVEpRRz6X -htuXfFf7jJoDqhz60/Xtyky+EoP+EH1sh6TwafZwl3PrCMDNL+Bvmm0BUHU00Tef -afmFf1K7FpL/eYVQcavpNJ35sQRYjsw8JWQ0+Ge0qsyfEUSIqfzdNQgniBrITM7+ -t7NWmCjm3awd0PWMFk10WnEudoWV8fb2TBSTE9gdx0wsafg2Xu7z9WUFCZWPtFRV -2qB91G8fGtZx32pxmi6WgKHIYBWceZ1IGaVsH/c+UWsxJvxoKQnRSuo8vfxHybLM -wULDqFlNE7Z29KIDEmSwUXGeWwGUrud/VgQo0FBPAgMBAAGjgZwwgZkwDgYDVR0P -AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB -Af8EAjAAMB0GA1UdDgQWBBQqIZCfMH71R7RVCzGrap10FGf5ZjAfBgNVHSMEGDAW -gBT5Z7kwwTNntO1UsuMdUv/Yq3AdcDAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8A -AAEwDQYJKoZIhvcNAQELBQADggEBAKzFUSqROx2ERE06y6ZAjB7PeZuRFGIAZiSY -QGG+4vQLAxNMqQN4Td7SXrOsFq/uffrqxlOPCVS94Kd2XRmsT6gCsMWXSr8zBQyA -c//hWHU9jG9YSZ4s2IBqLTugsMUGxuq0ClEXzkrqzeJssHfJCEF+Peg39v/Bk/Hr -iA/YDoQp7hgSdvwO8XH21HBab9nsYHvOIFivWdS4/w+au6QplwDC9a0R67tkNDnQ -gxWvhA8SJ2HjumvZ0eOSZMYhOhXca52LwYBEM66600cKKvOcVAtIWAfx3MI7FY03 -sCUu4iGbo61ceomM22hmZtPUEBzpVuwnaujmD7MMvr322Wu4QJY= ------END CERTIFICATE----- diff --git a/tests/integration/fixtures-expired/server.key.insecure b/tests/integration/fixtures-expired/server.key.insecure deleted file mode 100644 index 3092f9dfab1..00000000000 --- a/tests/integration/fixtures-expired/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAtPdJYUzUEc2jQUTokgLdE79ikxKhrbrO2zSYwFRKUUc+l4bb -l3xX+4yaA6oc+tP17cpMvhKD/hB9bIek8Gn2cJdz6wjAzS/gb5ptAVB1NNE3n2n5 -hX9SuxaS/3mFUHGr6TSd+bEEWI7MPCVkNPhntKrMnxFEiKn83TUIJ4gayEzO/rez -Vpgo5t2sHdD1jBZNdFpxLnaFlfH29kwUkxPYHcdMLGn4Nl7u8/VlBQmVj7RUVdqg -fdRvHxrWcd9qcZouloChyGAVnHmdSBmlbB/3PlFrMSb8aCkJ0UrqPL38R8myzMFC -w6hZTRO2dvSiAxJksFFxnlsBlK7nf1YEKNBQTwIDAQABAoIBAFny3E93v6VFwFLN -7Ie+0qJhK58M0L4or273msFmZDY4Il1w069dR+Ipxdfyc0sdlgzm0/RaAa+EBMOw -PISfNrZKIXz+sc6LcJQofuv7UPa601nyc+suGTITC2fewCv3BEr7M1aL7SwTdmKi -90b4/ZsolmKuU5FWZPCSzoXPufg6lyyw/9AvD8gnjMPjyoPrG1AbLruogdD/gz62 -SZEHAaY0jHdQZbobo6iOuGoOKfYHr39B2xVoJAjR+1PVxzuAnZYbpUhemzHMTW03 -ikei3l6B/Qu/NdhW6CaR2dMetTVnbHwomm0cx2N4SNFFrKvnc1KQJR/b4RTxmVvd -HQlVHqECgYEAzb9vxslb6vpbpGUu787X/SFGpHi5tSAeqoL2/TphrKMBh0EJIUSK -tq2B021rzLjvVKdN2hSisa5EvBkNQnwkeF7Nvr0adPB13Xo26G45RmogUnH4UFrY -RNTosMcab6VopFmcdFLxfNHx+hVKf0+l5mNsSN0MtxyM/9q92JF+fFECgYEA4SpY -AcrldyZcmXF5ngFp24SHX0sTgjrbuq0VGk6HshVOYY/XFymlpoZLEpaRC5kKoZ7W -YLVSE1BlJ79kmqPQ6Y2oB+TN2PALVMl8K1fwJxW/OfHEbq2tDylF5/jUS3noxU2w -J2FjV4wyHoKCrVFGQjI+CWQBmvXaGsbEwQO6+p8CgYBfUz7afxiTOgOTmz2v5cm0 -geJU+YoxHPyYS61bjd0LO0rN+5fbTgJmuOTZrGyxoU1hj1JGpCDs6az26TR3hUTw -cBwrLzo+y9oQDzu5XLg0o57uE9fUgwKIgYx9uwHIkH53Bv2x92vjRPIzyAGIEsLu -h0n4SFJH1HaPZC1pVZ+gwQKBgCCWbUhNIiq9bZdzmeNpVvXDV4hOKFOnyxdYZ354 -MSFv/fkWxU1/5I6WTxUwn2trSeOcRnCWrXtIHmvDQn8zCFBVBSWnUrd7/lfWFVd8 -kbBGcHelawWNs0dHdOue0rLdwPeVR9JbQPJxwusxflIxOhboiJv5UlYoENnhPKam -sJAHAoGATyztNEOTtPk7Le4ZYzC2bR58vAPeiu6mzN37Vf4PGWJyLSzAqLnDyQ8c -REFVsgawue5hKzHz+JBsc91CURWHlMcMQ1sjmMx0MGpNyjVlZIoHMxnIo/CGwGvI -TSlyv2ErcTpiwC1gAw1G5dAp3fWASmiDxNX5UXnpVA2SMiCScY8= ------END RSA PRIVATE KEY----- diff --git a/tests/integration/grpc_test.go b/tests/integration/grpc_test.go deleted file mode 100644 index 618c63f7194..00000000000 --- a/tests/integration/grpc_test.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - tls "crypto/tls" - "fmt" - "strings" - "testing" - "time" - - "google.golang.org/grpc" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestAuthority(t *testing.T) { - tcs := []struct { - name string - useTCP bool - useTLS bool - clientURLPattern string - expectAuthorityPattern string - }{ - { - name: "unix:path", - clientURLPattern: "unix:localhost:${MEMBER_NAME}", - expectAuthorityPattern: "localhost:${MEMBER_NAME}", - }, - { - name: "unix://absolute_path", - clientURLPattern: "unix://localhost:${MEMBER_NAME}", - expectAuthorityPattern: "localhost:${MEMBER_NAME}", - }, - // "unixs" is not standard schema supported by etcd - { - name: "unixs:absolute_path", - useTLS: true, - clientURLPattern: "unixs:localhost:${MEMBER_NAME}", - expectAuthorityPattern: "localhost:${MEMBER_NAME}", - }, - { - name: "unixs://absolute_path", - useTLS: true, - clientURLPattern: "unixs://localhost:${MEMBER_NAME}", - expectAuthorityPattern: "localhost:${MEMBER_NAME}", - }, - { - name: "http://domain[:port]", - useTCP: true, - clientURLPattern: "http://localhost:${MEMBER_PORT}", - expectAuthorityPattern: "localhost:${MEMBER_PORT}", - }, - { - name: "https://domain[:port]", - useTLS: true, - useTCP: true, - clientURLPattern: "https://localhost:${MEMBER_PORT}", - expectAuthorityPattern: "localhost:${MEMBER_PORT}", - }, - { - name: "http://address[:port]", - useTCP: true, - clientURLPattern: "http://127.0.0.1:${MEMBER_PORT}", - expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}", - }, - { - name: "https://address[:port]", - useTCP: true, - useTLS: true, - clientURLPattern: "https://127.0.0.1:${MEMBER_PORT}", - expectAuthorityPattern: "127.0.0.1:${MEMBER_PORT}", - }, - } - for _, tc := range tcs { - for _, clusterSize := range []int{1, 3} { - t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) { - integration.BeforeTest(t) - cfg := integration.ClusterConfig{ - Size: clusterSize, - UseTCP: tc.useTCP, - UseIP: tc.useTCP, - } - cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg) - clus := integration.NewCluster(t, &cfg) - defer clus.Terminate(t) - - kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig) - defer kv.Close() - - putRequestMethod := "/etcdserverpb.KV/Put" - _, err := kv.Put(context.TODO(), "foo", "bar") - if err != nil { - t.Fatal(err) - } - - assertAuthority(t, templateAuthority(t, tc.expectAuthorityPattern, clus.Members[0]), clus, putRequestMethod) - }) - } - } -} - -func setupTLS(t *testing.T, useTLS bool, cfg integration.ClusterConfig) (integration.ClusterConfig, *tls.Config) { - t.Helper() - if useTLS { - cfg.ClientTLS = &integration.TestTLSInfo - tlsConfig, err := integration.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } - return cfg, tlsConfig - } - return cfg, nil -} - -func setupClient(t *testing.T, endpointPattern string, clus *integration.Cluster, tlsConfig *tls.Config) *clientv3.Client { - t.Helper() - endpoints := templateEndpoints(t, endpointPattern, clus) - kv, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - TLS: tlsConfig, - }) - if err != nil { - t.Fatal(err) - } - return kv -} - -func templateEndpoints(t *testing.T, pattern string, clus *integration.Cluster) []string { - t.Helper() - var endpoints []string - for _, m := range clus.Members { - ent := pattern - ent = strings.ReplaceAll(ent, "${MEMBER_PORT}", m.GrpcPortNumber()) - ent = strings.ReplaceAll(ent, "${MEMBER_NAME}", m.Name) - endpoints = append(endpoints, ent) - } - return endpoints -} - -func templateAuthority(t *testing.T, pattern string, m *integration.Member) string { - t.Helper() - authority := pattern - authority = strings.ReplaceAll(authority, "${MEMBER_PORT}", m.GrpcPortNumber()) - authority = strings.ReplaceAll(authority, "${MEMBER_NAME}", m.Name) - return authority -} - -func assertAuthority(t *testing.T, expectedAuthority string, clus *integration.Cluster, filterMethod string) { - t.Helper() - requestsFound := 0 - for _, m := range clus.Members { - for _, r := range m.RecordedRequests() { - if filterMethod != "" && r.FullMethod != filterMethod { - continue - } - requestsFound++ - if r.Authority != expectedAuthority { - t.Errorf("Got unexpected authority header, member: %q, request: %q, got authority: %q, expected %q", m.Name, r.FullMethod, r.Authority, expectedAuthority) - } - } - } - if requestsFound == 0 { - t.Errorf("Expected at least one request") - } -} diff --git a/tests/integration/hashkv_test.go b/tests/integration/hashkv_test.go deleted file mode 100644 index 3fc10a604d8..00000000000 --- a/tests/integration/hashkv_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "net" - "net/http" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/storage/mvcc/testutil" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestCompactionHash tests the compaction hash -// TODO: Change this to fuzz test -func TestCompactionHash(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cc, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } - client := &http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", clus.Members[0].PeerURLs[0].Host) - }, - }, - } - - testutil.TestCompactionHash(context.Background(), t, hashTestCase{cc, clus.Members[0].GRPCURL(), client}, 1000) -} - -type hashTestCase struct { - *clientv3.Client - url string - http *http.Client -} - -func (tc hashTestCase) Put(ctx context.Context, key, value string) error { - _, err := tc.Client.Put(ctx, key, value) - return err -} - -func (tc hashTestCase) Delete(ctx context.Context, key string) error { - _, err := tc.Client.Delete(ctx, key) - return err -} - -func (tc hashTestCase) HashByRev(ctx context.Context, rev int64) (testutil.KeyValueHash, error) { - resp, err := etcdserver.HashByRev(ctx, tc.http, "http://unix", rev) - return testutil.KeyValueHash{Hash: resp.Hash, CompactRevision: resp.CompactRevision, Revision: resp.Header.Revision}, err -} - -func (tc hashTestCase) Defrag(ctx context.Context) error { - _, err := tc.Client.Defragment(ctx, tc.url) - return err -} - -func (tc hashTestCase) Compact(ctx context.Context, rev int64) error { - _, err := tc.Client.Compact(ctx, rev) - // Wait for compaction to be compacted - time.Sleep(50 * time.Millisecond) - return err -} diff --git a/tests/integration/lazy_cluster.go b/tests/integration/lazy_cluster.go deleted file mode 100644 index 1d16d2d3818..00000000000 --- a/tests/integration/lazy_cluster.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "log" - "net/http" - "sync" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// Infrastructure to provision a single shared cluster for tests - only -// when its needed. -// -// See ./tests/integration/clientv3/examples/main_test.go for canonical usage. -// Please notice that the shared (LazyCluster's) state is preserved between -// testcases, so left-over state might has cross-testcase effects. -// Prefer dedicated clusters for substancial test-cases. - -type LazyCluster interface { - // EndpointsV2 - exposes connection points for client v2. - // Calls to this method might initialize the cluster. - EndpointsV2() []string - - // EndpointsV3 - exposes connection points for client v3. - // Calls to this method might initialize the cluster. - EndpointsV3() []string - - // Cluster - calls to this method might initialize the cluster. - Cluster() *integration.Cluster - - // Transport - call to this method might initialize the cluster. - Transport() *http.Transport - - Terminate() - - TB() testutil.TB -} - -type lazyCluster struct { - cfg integration.ClusterConfig - cluster *integration.Cluster - transport *http.Transport - once sync.Once - tb testutil.TB - closer func() -} - -// NewLazyCluster returns a new test cluster handler that gets created on the -// first call to GetEndpoints() or GetTransport() -func NewLazyCluster() LazyCluster { - return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1}) -} - -// NewLazyClusterWithConfig returns a new test cluster handler that gets created -// on the first call to GetEndpoints() or GetTransport() -func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster { - tb, closer := testutil.NewTestingTBProthesis("lazy_cluster") - return &lazyCluster{cfg: cfg, tb: tb, closer: closer} -} - -func (lc *lazyCluster) mustLazyInit() { - lc.once.Do(func() { - lc.tb.Logf("LazyIniting ...") - var err error - lc.transport, err = transport.NewTransport(transport.TLSInfo{}, time.Second) - if err != nil { - log.Fatal(err) - } - lc.cluster = integration.NewCluster(lc.tb, &lc.cfg) - lc.tb.Logf("LazyIniting [Done]") - }) -} - -func (lc *lazyCluster) Terminate() { - lc.tb.Logf("Terminating...") - if lc != nil && lc.cluster != nil { - lc.cluster.Terminate(nil) - lc.cluster = nil - } - if lc.closer != nil { - lc.tb.Logf("Closer...") - lc.closer() - } -} - -func (lc *lazyCluster) EndpointsV2() []string { - return []string{lc.Cluster().Members[0].URL()} -} - -func (lc *lazyCluster) EndpointsV3() []string { - return lc.Cluster().Client(0).Endpoints() -} - -func (lc *lazyCluster) Cluster() *integration.Cluster { - lc.mustLazyInit() - return lc.cluster -} - -func (lc *lazyCluster) Transport() *http.Transport { - lc.mustLazyInit() - return lc.transport -} - -func (lc *lazyCluster) TB() testutil.TB { - return lc.tb -} diff --git a/tests/integration/main_test.go b/tests/integration/main_test.go deleted file mode 100644 index e783205834f..00000000000 --- a/tests/integration/main_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package integration - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -func TestMain(m *testing.M) { - testutil.MustTestMainWithLeakDetection(m) -} diff --git a/tests/integration/member_test.go b/tests/integration/member_test.go deleted file mode 100644 index 6581b654b38..00000000000 --- a/tests/integration/member_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestPauseMember(t *testing.T) { - integration.BeforeTest(t) - - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5}) - defer c.Terminate(t) - - for i := 0; i < 5; i++ { - c.Members[i].Pause() - membs := append([]*integration.Member{}, c.Members[:i]...) - membs = append(membs, c.Members[i+1:]...) - c.WaitMembersForLeader(t, membs) - clusterMustProgress(t, membs) - c.Members[i].Resume() - } - c.WaitMembersForLeader(t, c.Members) - clusterMustProgress(t, c.Members) -} - -func TestRestartMember(t *testing.T) { - integration.BeforeTest(t) - c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer c.Terminate(t) - - for i := 0; i < 3; i++ { - c.Members[i].Stop(t) - membs := append([]*integration.Member{}, c.Members[:i]...) - membs = append(membs, c.Members[i+1:]...) - c.WaitMembersForLeader(t, membs) - clusterMustProgress(t, membs) - err := c.Members[i].Restart(t) - if err != nil { - t.Fatal(err) - } - } - c.WaitMembersForLeader(t, c.Members) - clusterMustProgress(t, c.Members) -} - -func TestLaunchDuplicateMemberShouldFail(t *testing.T) { - integration.BeforeTest(t) - size := 3 - c := integration.NewCluster(t, &integration.ClusterConfig{Size: size}) - m := c.Members[0].Clone(t) - m.DataDir = t.TempDir() - defer c.Terminate(t) - - if err := m.Launch(); err == nil { - t.Errorf("unexpect successful launch") - } else { - t.Logf("launch failed as expected: %v", err) - assert.Contains(t, err.Error(), "has already been bootstrapped") - } -} - -func TestSnapshotAndRestartMember(t *testing.T) { - integration.BeforeTest(t) - m := integration.MustNewMember(t, integration.MemberConfig{Name: "snapAndRestartTest", UseBridge: true}) - m.SnapshotCount = 100 - m.Launch() - defer m.Terminate(t) - defer m.Client.Close() - m.WaitOK(t) - - var err error - for i := 0; i < 120; i++ { - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - key := fmt.Sprintf("foo%d", i) - _, err = m.Client.Put(ctx, "/"+key, "bar") - if err != nil { - t.Fatalf("#%d: create on %s error: %v", i, m.URL(), err) - } - cancel() - } - m.Stop(t) - m.Restart(t) - - m.WaitOK(t) - for i := 0; i < 120; i++ { - ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) - key := fmt.Sprintf("foo%d", i) - resp, err := m.Client.Get(ctx, "/"+key) - if err != nil { - t.Fatalf("#%d: get on %s error: %v", i, m.URL(), err) - } - cancel() - - if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" { - t.Errorf("#%d: got = %v, want %v", i, resp.Kvs[0], "bar") - } - } -} diff --git a/tests/integration/metrics_test.go b/tests/integration/metrics_test.go deleted file mode 100644 index 59ce0d1d377..00000000000 --- a/tests/integration/metrics_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "net/http" - "strconv" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/transport" - "go.etcd.io/etcd/server/v3/storage" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestMetricDbSizeBoot checks that the db size metric is set on boot. -func TestMetricDbSizeBoot(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } - - if v == "0" { - t.Fatalf("expected non-zero, got %q", v) - } -} - -func TestMetricDbSizeDefrag(t *testing.T) { - testMetricDbSizeDefrag(t, "etcd") -} - -// testMetricDbSizeDefrag checks that the db size metric is set after defrag. -func testMetricDbSizeDefrag(t *testing.T, name string) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.Client(0)).KV - mc := integration.ToGRPC(clus.Client(0)).Maintenance - - // expand the db size - numPuts := 25 // large enough to write more than 1 page - putreq := &pb.PutRequest{Key: []byte("k"), Value: make([]byte, 4096)} - for i := 0; i < numPuts; i++ { - time.Sleep(10 * time.Millisecond) // to execute multiple backend txn - if _, err := kvc.Put(context.TODO(), putreq); err != nil { - t.Fatal(err) - } - } - - // wait for backend txn sync - time.Sleep(500 * time.Millisecond) - - expected := numPuts * len(putreq.Value) - beforeDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } - bv, err := strconv.Atoi(beforeDefrag) - if err != nil { - t.Fatal(err) - } - if bv < expected { - t.Fatalf("expected db size greater than %d, got %d", expected, bv) - } - beforeDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } - biu, err := strconv.Atoi(beforeDefragInUse) - if err != nil { - t.Fatal(err) - } - if biu < expected { - t.Fatalf("expected db size in use is greater than %d, got %d", expected, biu) - } - - // clear out historical keys, in use bytes should free pages - creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true} - if _, kerr := kvc.Compact(context.TODO(), creq); kerr != nil { - t.Fatal(kerr) - } - - validateAfterCompactionInUse := func() error { - // Put to move PendingPages to FreePages - if _, err = kvc.Put(context.TODO(), putreq); err != nil { - t.Fatal(err) - } - time.Sleep(500 * time.Millisecond) - - afterCompactionInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } - aciu, err := strconv.Atoi(afterCompactionInUse) - if err != nil { - t.Fatal(err) - } - if biu <= aciu { - return fmt.Errorf("expected less than %d, got %d after compaction", biu, aciu) - } - return nil - } - - // backend rollbacks read transaction asynchronously (PR #10523), - // which causes the result to be flaky. Retry 3 times. - maxRetry, retry := 3, 0 - for { - err := validateAfterCompactionInUse() - if err == nil { - break - } - retry++ - if retry >= maxRetry { - t.Fatalf(err.Error()) - } - } - - // defrag should give freed space back to fs - mc.Defragment(context.TODO(), &pb.DefragmentRequest{}) - - afterDefrag, err := clus.Members[0].Metric(name + "_mvcc_db_total_size_in_bytes") - if err != nil { - t.Fatal(err) - } - av, err := strconv.Atoi(afterDefrag) - if err != nil { - t.Fatal(err) - } - if bv <= av { - t.Fatalf("expected less than %d, got %d after defrag", bv, av) - } - - afterDefragInUse, err := clus.Members[0].Metric("etcd_mvcc_db_total_size_in_use_in_bytes") - if err != nil { - t.Fatal(err) - } - adiu, err := strconv.Atoi(afterDefragInUse) - if err != nil { - t.Fatal(err) - } - if adiu > av { - t.Fatalf("db size in use (%d) is expected less than db size (%d) after defrag", adiu, av) - } -} - -func TestMetricQuotaBackendBytes(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - qs, err := clus.Members[0].Metric("etcd_server_quota_backend_bytes") - if err != nil { - t.Fatal(err) - } - qv, err := strconv.ParseFloat(qs, 64) - if err != nil { - t.Fatal(err) - } - if int64(qv) != storage.DefaultQuotaBytes { - t.Fatalf("expected %d, got %f", storage.DefaultQuotaBytes, qv) - } -} - -func TestMetricsHealth(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - tr, err := transport.NewTransport(transport.TLSInfo{}, 5*time.Second) - if err != nil { - t.Fatal(err) - } - u := clus.Members[0].ClientURLs[0] - u.Path = "/health" - resp, err := tr.RoundTrip(&http.Request{ - Header: make(http.Header), - Method: http.MethodGet, - URL: &u, - }) - resp.Body.Close() - if err != nil { - t.Fatal(err) - } - - hv, err := clus.Members[0].Metric("etcd_server_health_failures") - if err != nil { - t.Fatal(err) - } - if hv != "0" { - t.Fatalf("expected '0' from etcd_server_health_failures, got %q", hv) - } -} diff --git a/tests/integration/network_partition_test.go b/tests/integration/network_partition_test.go deleted file mode 100644 index c3b08f23c5d..00000000000 --- a/tests/integration/network_partition_test.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5}) - defer clus.Terminate(t) - - leadIndex := clus.WaitLeader(t) - - // minority: leader, follower / majority: follower, follower, follower - minority := []int{leadIndex, (leadIndex + 1) % 5} - majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5} - - minorityMembers := getMembersByIndexSlice(clus, minority) - majorityMembers := getMembersByIndexSlice(clus, majority) - - // network partition (bi-directional) - injectPartition(t, minorityMembers, majorityMembers) - - // minority leader must be lost - clus.WaitMembersNoLeader(minorityMembers) - - // wait extra election timeout - time.Sleep(2 * majorityMembers[0].ElectionTimeout()) - - // new leader must be from majority - clus.WaitMembersForLeader(t, majorityMembers) - - // recover network partition (bi-directional) - recoverPartition(t, minorityMembers, majorityMembers) - - // write to majority first - clusterMustProgress(t, append(majorityMembers, minorityMembers...)) -} - -func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) { - // retry up to 3 times, in case of leader election on majority partition due to slow hardware - var err error - for i := 0; i < 3; i++ { - if err = testNetworkPartition5MembersLeaderInMajority(t); err == nil { - break - } - t.Logf("[%d] got %v", i, err) - } - if err != nil { - t.Fatalf("failed after 3 tries (%v)", err) - } -} - -func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 5}) - defer clus.Terminate(t) - - leadIndex := clus.WaitLeader(t) - - // majority: leader, follower, follower / minority: follower, follower - majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5} - minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5} - - majorityMembers := getMembersByIndexSlice(clus, majority) - minorityMembers := getMembersByIndexSlice(clus, minority) - - // network partition (bi-directional) - injectPartition(t, majorityMembers, minorityMembers) - - // minority leader must be lost - clus.WaitMembersNoLeader(minorityMembers) - - // wait extra election timeout - time.Sleep(2 * majorityMembers[0].ElectionTimeout()) - - // leader must be hold in majority - leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers) - leadID, leadID2 := clus.Members[leadIndex].Server.MemberId(), majorityMembers[leadIndex2].Server.MemberId() - if leadID != leadID2 { - return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2) - } - - // recover network partition (bi-directional) - recoverPartition(t, majorityMembers, minorityMembers) - - // write to majority first - clusterMustProgress(t, append(majorityMembers, minorityMembers...)) - return nil -} - -func TestNetworkPartition4Members(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 4}) - defer clus.Terminate(t) - - leadIndex := clus.WaitLeader(t) - - // groupA: leader, follower / groupB: follower, follower - groupA := []int{leadIndex, (leadIndex + 1) % 4} - groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4} - - leaderPartition := getMembersByIndexSlice(clus, groupA) - followerPartition := getMembersByIndexSlice(clus, groupB) - - // network partition (bi-directional) - injectPartition(t, leaderPartition, followerPartition) - - // no group has quorum, so leader must be lost in all members - clus.WaitNoLeader() - - // recover network partition (bi-directional) - recoverPartition(t, leaderPartition, followerPartition) - - // need to wait since it recovered with no leader - clus.WaitLeader(t) - - clusterMustProgress(t, clus.Members) -} - -func getMembersByIndexSlice(clus *integration.Cluster, idxs []int) []*integration.Member { - ms := make([]*integration.Member, len(idxs)) - for i, idx := range idxs { - ms[i] = clus.Members[idx] - } - return ms -} - -func injectPartition(t *testing.T, src, others []*integration.Member) { - for _, m := range src { - m.InjectPartition(t, others...) - } -} - -func recoverPartition(t *testing.T, src, others []*integration.Member) { - for _, m := range src { - m.RecoverPartition(t, others...) - } -} diff --git a/tests/integration/proxy/grpcproxy/cluster_test.go b/tests/integration/proxy/grpcproxy/cluster_test.go deleted file mode 100644 index 9177eb99d15..00000000000 --- a/tests/integration/proxy/grpcproxy/cluster_test.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "net" - "testing" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - "google.golang.org/grpc" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestClusterProxyMemberList(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t) - defer cts.close(t) - - cfg := clientv3.Config{ - Endpoints: []string{cts.caddr}, - DialTimeout: 5 * time.Second, - } - client, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatalf("err %v, want nil", err) - } - defer client.Close() - - // wait some time for register-loop to write keys - time.Sleep(time.Second) - - var mresp *clientv3.MemberListResponse - mresp, err = client.Cluster.MemberList(context.Background()) - if err != nil { - t.Fatalf("err %v, want nil", err) - } - - if len(mresp.Members) != 1 { - t.Fatalf("len(mresp.Members) expected 1, got %d (%+v)", len(mresp.Members), mresp.Members) - } - if len(mresp.Members[0].ClientURLs) != 1 { - t.Fatalf("len(mresp.Members[0].ClientURLs) expected 1, got %d (%+v)", len(mresp.Members[0].ClientURLs), mresp.Members[0].ClientURLs[0]) - } - if mresp.Members[0].ClientURLs[0] != cts.caddr { - t.Fatalf("mresp.Members[0].ClientURLs[0] expected %q, got %q", cts.caddr, mresp.Members[0].ClientURLs[0]) - } -} - -type clusterproxyTestServer struct { - cp pb.ClusterServer - c *clientv3.Client - server *grpc.Server - l net.Listener - donec <-chan struct{} - caddr string -} - -func (cts *clusterproxyTestServer) close(t *testing.T) { - cts.server.Stop() - cts.l.Close() - cts.c.Close() - select { - case <-cts.donec: - return - case <-time.After(5 * time.Second): - t.Fatalf("register-loop took too long to return") - } -} - -func newClusterProxyServer(lg *zap.Logger, endpoints []string, t *testing.T) *clusterproxyTestServer { - cfg := clientv3.Config{ - Endpoints: endpoints, - DialTimeout: 5 * time.Second, - } - client, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - - cts := &clusterproxyTestServer{ - c: client, - } - cts.l, err = net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - var opts []grpc.ServerOption - cts.server = grpc.NewServer(opts...) - servec := make(chan struct{}) - go func() { - <-servec - cts.server.Serve(cts.l) - }() - - grpcproxy.Register(lg, client, "test-prefix", cts.l.Addr().String(), 7) - cts.cp, cts.donec = grpcproxy.NewClusterProxy(lg, client, cts.l.Addr().String(), "test-prefix") - cts.caddr = cts.l.Addr().String() - pb.RegisterClusterServer(cts.server, cts.cp) - close(servec) - - // wait some time for free port 0 to be resolved - time.Sleep(500 * time.Millisecond) - - return cts -} diff --git a/tests/integration/proxy/grpcproxy/kv_test.go b/tests/integration/proxy/grpcproxy/kv_test.go deleted file mode 100644 index 524f1392d97..00000000000 --- a/tests/integration/proxy/grpcproxy/kv_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "context" - "net" - "testing" - "time" - - "google.golang.org/grpc" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestKVProxyRange(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t) - defer kvts.close() - - // create a client and try to get key from proxy. - cfg := clientv3.Config{ - Endpoints: []string{kvts.l.Addr().String()}, - DialTimeout: 5 * time.Second, - } - client, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - _, err = client.Get(context.Background(), "foo") - if err != nil { - t.Fatalf("err = %v, want nil", err) - } - client.Close() -} - -type kvproxyTestServer struct { - kp pb.KVServer - c *clientv3.Client - server *grpc.Server - l net.Listener -} - -func (kts *kvproxyTestServer) close() { - kts.server.Stop() - kts.l.Close() - kts.c.Close() -} - -func newKVProxyServer(endpoints []string, t *testing.T) *kvproxyTestServer { - cfg := clientv3.Config{ - Endpoints: endpoints, - DialTimeout: 5 * time.Second, - } - client, err := integration2.NewClient(t, cfg) - if err != nil { - t.Fatal(err) - } - - kvp, _ := grpcproxy.NewKvProxy(client) - - kvts := &kvproxyTestServer{ - kp: kvp, - c: client, - } - - var opts []grpc.ServerOption - kvts.server = grpc.NewServer(opts...) - pb.RegisterKVServer(kvts.server, kvts.kp) - - kvts.l, err = net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - go kvts.server.Serve(kvts.l) - - return kvts -} diff --git a/tests/integration/proxy/grpcproxy/register_test.go b/tests/integration/proxy/grpcproxy/register_test.go deleted file mode 100644 index da5eca0010b..00000000000 --- a/tests/integration/proxy/grpcproxy/register_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "testing" - "time" - - "go.uber.org/zap/zaptest" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/naming/endpoints" - "go.etcd.io/etcd/server/v3/proxy/grpcproxy" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestRegister(t *testing.T) { - integration2.BeforeTest(t) - - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - cli := clus.Client(0) - paddr := clus.Members[0].GRPCURL() - - testPrefix := "test-name" - wa := mustCreateWatcher(t, cli, testPrefix) - - donec := grpcproxy.Register(zaptest.NewLogger(t), cli, testPrefix, paddr, 5) - - ups := <-wa - if len(ups) != 1 { - t.Fatalf("len(ups) expected 1, got %d (%v)", len(ups), ups) - } - if ups[0].Endpoint.Addr != paddr { - t.Fatalf("ups[0].Addr expected %q, got %q", paddr, ups[0].Endpoint.Addr) - } - - cli.Close() - clus.TakeClient(0) - select { - case <-donec: - case <-time.After(5 * time.Second): - t.Fatal("donec 'register' did not return in time") - } -} - -func mustCreateWatcher(t *testing.T, c *clientv3.Client, prefix string) endpoints.WatchChannel { - em, err := endpoints.NewManager(c, prefix) - if err != nil { - t.Fatalf("failed to create endpoints.Manager: %v", err) - } - wc, err := em.NewWatchChannel(c.Ctx()) - if err != nil { - t.Fatalf("failed to resolve %q (%v)", prefix, err) - } - return wc -} diff --git a/tests/integration/revision_test.go b/tests/integration/revision_test.go deleted file mode 100644 index 6fb81b9ca5f..00000000000 --- a/tests/integration/revision_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration_test - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/tests/v3/framework/integration" - "google.golang.org/grpc/status" -) - -func TestRevisionMonotonicWithLeaderPartitions(t *testing.T) { - testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) { - for i := 0; i < 5; i++ { - leader := clus.WaitLeader(t) - time.Sleep(time.Second) - clus.Members[leader].InjectPartition(t, clus.Members[(leader+1)%3], clus.Members[(leader+2)%3]) - time.Sleep(time.Second) - clus.Members[leader].RecoverPartition(t, clus.Members[(leader+1)%3], clus.Members[(leader+2)%3]) - } - }) -} - -func TestRevisionMonotonicWithPartitions(t *testing.T) { - testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) { - for i := 0; i < 5; i++ { - time.Sleep(time.Second) - clus.Members[i%3].InjectPartition(t, clus.Members[(i+1)%3], clus.Members[(i+2)%3]) - time.Sleep(time.Second) - clus.Members[i%3].RecoverPartition(t, clus.Members[(i+1)%3], clus.Members[(i+2)%3]) - } - }) -} - -func TestRevisionMonotonicWithLeaderRestarts(t *testing.T) { - testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) { - for i := 0; i < 5; i++ { - leader := clus.WaitLeader(t) - time.Sleep(time.Second) - clus.Members[leader].Stop(t) - time.Sleep(time.Second) - clus.Members[leader].Restart(t) - } - }) -} - -func TestRevisionMonotonicWithRestarts(t *testing.T) { - testRevisionMonotonicWithFailures(t, 11*time.Second, func(clus *integration.Cluster) { - for i := 0; i < 5; i++ { - time.Sleep(time.Second) - clus.Members[i%3].Stop(t) - time.Sleep(time.Second) - clus.Members[i%3].Restart(t) - } - }) -} - -func testRevisionMonotonicWithFailures(t *testing.T, testDuration time.Duration, injectFailures func(clus *integration.Cluster)) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), testDuration) - defer cancel() - - wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - putWorker(t, ctx, clus) - }() - } - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - getWorker(t, ctx, clus) - }() - } - - injectFailures(clus) - wg.Wait() - kv := clus.Client(0) - resp, err := kv.Get(context.Background(), "foo") - if err != nil { - t.Fatal(err) - } - t.Logf("Revision %d", resp.Header.Revision) -} - -func putWorker(t *testing.T, ctx context.Context, clus *integration.Cluster) { - for i := 0; ; i++ { - kv := clus.Client(i % 3) - _, err := kv.Put(ctx, "foo", fmt.Sprintf("%d", i)) - if errors.Is(err, context.DeadlineExceeded) { - return - } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } - } -} - -func getWorker(t *testing.T, ctx context.Context, clus *integration.Cluster) { - var prevRev int64 - for i := 0; ; i++ { - kv := clus.Client(i % 3) - resp, err := kv.Get(ctx, "foo") - if errors.Is(err, context.DeadlineExceeded) { - return - } - if silenceConnectionErrors(err) != nil { - t.Fatal(err) - } - if resp == nil { - continue - } - if prevRev > resp.Header.Revision { - t.Fatalf("rev is less than previously observed revision, rev: %d, prevRev: %d", resp.Header.Revision, prevRev) - } - prevRev = resp.Header.Revision - } -} - -func silenceConnectionErrors(err error) error { - if err == nil { - return nil - } - s := status.Convert(err) - for _, msg := range connectionErrorMessages { - if strings.Index(s.Message(), msg) != -1 { - return nil - } - } - return err -} - -var connectionErrorMessages = []string{ - "context deadline exceeded", - "etcdserver: request timed out", - "error reading from server: EOF", - "read: connection reset by peer", - "use of closed network connection", -} diff --git a/tests/integration/snapshot/member_test.go b/tests/integration/snapshot/member_test.go deleted file mode 100644 index 6c66ff2df80..00000000000 --- a/tests/integration/snapshot/member_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snapshot_test - -import ( - "context" - "fmt" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestSnapshotV3RestoreMultiMemberAdd ensures that multiple members -// can boot into the same cluster after being restored from a same -// snapshot file, and also be able to add another member to the cluster. -func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) { - integration2.BeforeTest(t) - - kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} - dbPath := createSnapshotFile(t, kvs) - - clusterN := 3 - cURLs, pURLs, srvs := restoreCluster(t, clusterN, dbPath) - - defer func() { - for i := 0; i < clusterN; i++ { - srvs[i].Close() - } - }() - - // wait for health interval + leader election - time.Sleep(etcdserver.HealthInterval + 2*time.Second) - - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[0].String()}}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - - urls := newEmbedURLs(t, 2) - newCURLs, newPURLs := urls[:1], urls[1:] - if _, err = cli.MemberAdd(context.Background(), []string{newPURLs[0].String()}); err != nil { - t.Fatal(err) - } - - // wait for membership reconfiguration apply - time.Sleep(testutil.ApplyTimeout) - - cfg := integration2.NewEmbedConfig(t, "3") - cfg.InitialClusterToken = testClusterTkn - cfg.ClusterState = "existing" - cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs - cfg.LPUrls, cfg.APUrls = newPURLs, newPURLs - cfg.InitialCluster = "" - for i := 0; i < clusterN; i++ { - cfg.InitialCluster += fmt.Sprintf(",%d=%s", i, pURLs[i].String()) - } - cfg.InitialCluster = cfg.InitialCluster[1:] - cfg.InitialCluster += fmt.Sprintf(",%s=%s", cfg.Name, newPURLs[0].String()) - - srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer func() { - srv.Close() - }() - select { - case <-srv.Server.ReadyNotify(): - case <-time.After(10 * time.Second): - t.Fatalf("failed to start the newly added etcd member") - } - - cli2, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{newCURLs[0].String()}}) - if err != nil { - t.Fatal(err) - } - defer cli2.Close() - - ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) - mresp, err := cli2.MemberList(ctx) - cancel() - if err != nil { - t.Fatal(err) - } - if len(mresp.Members) != 4 { - t.Fatalf("expected 4 members, got %+v", mresp) - } - - // make sure restored cluster has kept all data on recovery - var gresp *clientv3.GetResponse - ctx, cancel = context.WithTimeout(context.Background(), testutil.RequestTimeout) - gresp, err = cli2.Get(ctx, "foo", clientv3.WithPrefix()) - cancel() - if err != nil { - t.Fatal(err) - } - for i := range gresp.Kvs { - if string(gresp.Kvs[i].Key) != kvs[i].k { - t.Fatalf("#%d: key expected %s, got %s", i, kvs[i].k, string(gresp.Kvs[i].Key)) - } - if string(gresp.Kvs[i].Value) != kvs[i].v { - t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[i].Value)) - } - } -} diff --git a/tests/integration/snapshot/testdata/corrupted_backup.db b/tests/integration/snapshot/testdata/corrupted_backup.db deleted file mode 100644 index d4ab10ef79e..00000000000 Binary files a/tests/integration/snapshot/testdata/corrupted_backup.db and /dev/null differ diff --git a/tests/integration/snapshot/v3_snapshot_test.go b/tests/integration/snapshot/v3_snapshot_test.go deleted file mode 100644 index 4c1d258c6eb..00000000000 --- a/tests/integration/snapshot/v3_snapshot_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snapshot_test - -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/etcdutl/v3/snapshot" - "go.etcd.io/etcd/server/v3/embed" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - "go.etcd.io/etcd/tests/v3/framework/testutils" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest" -) - -// TestSnapshotV3RestoreSingle tests single node cluster restoring -// from a snapshot file. -func TestSnapshotV3RestoreSingle(t *testing.T) { - integration2.BeforeTest(t) - kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} - dbPath := createSnapshotFile(t, kvs) - - clusterN := 1 - urls := newEmbedURLs(t, clusterN*2) - cURLs, pURLs := urls[:clusterN], urls[clusterN:] - - cfg := integration2.NewEmbedConfig(t, "s1") - cfg.InitialClusterToken = testClusterTkn - cfg.ClusterState = "existing" - cfg.LCUrls, cfg.ACUrls = cURLs, cURLs - cfg.LPUrls, cfg.APUrls = pURLs, pURLs - cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) - - sp := snapshot.NewV3(zaptest.NewLogger(t)) - pss := make([]string, 0, len(pURLs)) - for _, p := range pURLs { - pss = append(pss, p.String()) - } - if err := sp.Restore(snapshot.RestoreConfig{ - SnapshotPath: dbPath, - Name: cfg.Name, - OutputDataDir: cfg.Dir, - InitialCluster: cfg.InitialCluster, - InitialClusterToken: cfg.InitialClusterToken, - PeerURLs: pss, - }); err != nil { - t.Fatal(err) - } - - srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer func() { - srv.Close() - }() - select { - case <-srv.Server.ReadyNotify(): - case <-time.After(3 * time.Second): - t.Fatalf("failed to start restored etcd member") - } - - var cli *clientv3.Client - cli, err = integration2.NewClient(t, clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - for i := range kvs { - var gresp *clientv3.GetResponse - gresp, err = cli.Get(context.Background(), kvs[i].k) - if err != nil { - t.Fatal(err) - } - if string(gresp.Kvs[0].Value) != kvs[i].v { - t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[0].Value)) - } - } -} - -// TestSnapshotV3RestoreMulti ensures that multiple members -// can boot into the same cluster after being restored from a same -// snapshot file. -func TestSnapshotV3RestoreMulti(t *testing.T) { - integration2.BeforeTest(t) - kvs := []kv{{"foo1", "bar1"}, {"foo2", "bar2"}, {"foo3", "bar3"}} - dbPath := createSnapshotFile(t, kvs) - - clusterN := 3 - cURLs, _, srvs := restoreCluster(t, clusterN, dbPath) - defer func() { - for i := 0; i < clusterN; i++ { - srvs[i].Close() - } - }() - - // wait for leader election - time.Sleep(time.Second) - - for i := 0; i < clusterN; i++ { - cli, err := integration2.NewClient(t, clientv3.Config{Endpoints: []string{cURLs[i].String()}}) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - for i := range kvs { - var gresp *clientv3.GetResponse - gresp, err = cli.Get(context.Background(), kvs[i].k) - if err != nil { - t.Fatal(err) - } - if string(gresp.Kvs[0].Value) != kvs[i].v { - t.Fatalf("#%d: value expected %s, got %s", i, kvs[i].v, string(gresp.Kvs[0].Value)) - } - } - } -} - -// TestCorruptedBackupFileCheck tests if we can correctly identify a corrupted backup file. -func TestCorruptedBackupFileCheck(t *testing.T) { - dbPath := testutils.MustAbsPath("testdata/corrupted_backup.db") - integration2.BeforeTest(t) - if _, err := os.Stat(dbPath); err != nil { - t.Fatalf("test file [%s] does not exist: %v", dbPath, err) - } - - sp := snapshot.NewV3(zaptest.NewLogger(t)) - _, err := sp.Status(dbPath) - expectedErrKeywords := "snapshot file integrity check failed" - /* example error message: - snapshot file integrity check failed. 2 errors found. - page 3: already freed - page 4: unreachable unfreed - */ - if err == nil { - t.Error("expected error due to corrupted snapshot file, got no error") - } - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Errorf("expected error message to contain the following keywords:\n%s\n"+ - "actual error message:\n%s", - expectedErrKeywords, err.Error()) - } -} - -type kv struct { - k, v string -} - -// creates a snapshot file and returns the file path. -func createSnapshotFile(t *testing.T, kvs []kv) string { - testutil.SkipTestIfShortMode(t, - "Snapshot creation tests are depending on embedded etcd server so are integration-level tests.") - clusterN := 1 - urls := newEmbedURLs(t, clusterN*2) - cURLs, pURLs := urls[:clusterN], urls[clusterN:] - - cfg := integration2.NewEmbedConfig(t, "default") - cfg.ClusterState = "new" - cfg.LCUrls, cfg.ACUrls = cURLs, cURLs - cfg.LPUrls, cfg.APUrls = pURLs, pURLs - cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) - srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer func() { - srv.Close() - }() - select { - case <-srv.Server.ReadyNotify(): - case <-time.After(3 * time.Second): - t.Fatalf("failed to start embed.Etcd for creating snapshots") - } - - ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration2.NewClient(t, ccfg) - if err != nil { - t.Fatal(err) - } - defer cli.Close() - for i := range kvs { - ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) - _, err = cli.Put(ctx, kvs[i].k, kvs[i].v) - cancel() - if err != nil { - t.Fatal(err) - } - } - - sp := snapshot.NewV3(zaptest.NewLogger(t)) - dpPath := filepath.Join(t.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond())) - _, err = sp.Save(context.Background(), ccfg, dpPath) - if err != nil { - t.Fatal(err) - } - return dpPath -} - -const testClusterTkn = "tkn" - -func restoreCluster(t *testing.T, clusterN int, dbPath string) ( - cURLs []url.URL, - pURLs []url.URL, - srvs []*embed.Etcd) { - urls := newEmbedURLs(t, clusterN*2) - cURLs, pURLs = urls[:clusterN], urls[clusterN:] - - ics := "" - for i := 0; i < clusterN; i++ { - ics += fmt.Sprintf(",m%d=%s", i, pURLs[i].String()) - } - ics = ics[1:] - - cfgs := make([]*embed.Config, clusterN) - for i := 0; i < clusterN; i++ { - cfg := integration2.NewEmbedConfig(t, fmt.Sprintf("m%d", i)) - cfg.InitialClusterToken = testClusterTkn - cfg.ClusterState = "existing" - cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]} - cfg.LPUrls, cfg.APUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]} - cfg.InitialCluster = ics - - sp := snapshot.NewV3( - zaptest.NewLogger(t, zaptest.Level(zapcore.InfoLevel)).Named(cfg.Name).Named("sm")) - - if err := sp.Restore(snapshot.RestoreConfig{ - SnapshotPath: dbPath, - Name: cfg.Name, - OutputDataDir: cfg.Dir, - PeerURLs: []string{pURLs[i].String()}, - InitialCluster: ics, - InitialClusterToken: cfg.InitialClusterToken, - }); err != nil { - t.Fatal(err) - } - - cfgs[i] = cfg - } - - sch := make(chan *embed.Etcd, len(cfgs)) - for i := range cfgs { - go func(idx int) { - srv, err := embed.StartEtcd(cfgs[idx]) - if err != nil { - t.Error(err) - } - - <-srv.Server.ReadyNotify() - sch <- srv - }(i) - } - - srvs = make([]*embed.Etcd, clusterN) - for i := 0; i < clusterN; i++ { - select { - case srv := <-sch: - srvs[i] = srv - case <-time.After(5 * time.Second): - t.Fatalf("#%d: failed to start embed.Etcd", i) - } - } - return cURLs, pURLs, srvs -} - -// TODO: TLS -func newEmbedURLs(t testutil.TB, n int) (urls []url.URL) { - urls = make([]url.URL, n) - for i := 0; i < n; i++ { - l := integration2.NewLocalListener(t) - defer l.Close() - - u, err := url.Parse(fmt.Sprintf("unix://%s", l.Addr())) - if err != nil { - t.Fatal(err) - } - urls[i] = *u - } - return urls -} diff --git a/tests/integration/testing_test.go b/tests/integration/testing_test.go deleted file mode 100644 index dfd75e89344..00000000000 --- a/tests/integration/testing_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration_test - -import ( - "testing" - - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestBeforeTestWithoutLeakDetection(t *testing.T) { - integration2.BeforeTest(t, integration2.WithoutGoLeakDetection(), integration2.WithoutSkipInShort()) - // Intentional leak that should get ignored - go func() { - - }() -} diff --git a/tests/integration/tracing_test.go b/tests/integration/tracing_test.go deleted file mode 100644 index a90586836d0..00000000000 --- a/tests/integration/tracing_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "net" - "testing" - "time" - - "github.com/stretchr/testify/require" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/propagation" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - traceservice "go.opentelemetry.io/proto/otlp/collector/trace/v1" - "google.golang.org/grpc" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestTracing ensures that distributed tracing is setup when the feature flag is enabled. -func TestTracing(t *testing.T) { - testutil.SkipTestIfShortMode(t, - "Wal creation tests are depending on embedded etcd server so are integration-level tests.") - // set up trace collector - listener, err := net.Listen("tcp", "localhost:") - if err != nil { - t.Fatal(err) - } - - traceFound := make(chan struct{}) - defer close(traceFound) - - srv := grpc.NewServer() - traceservice.RegisterTraceServiceServer(srv, &traceServer{ - traceFound: traceFound, - filterFunc: containsNodeListSpan}) - - go srv.Serve(listener) - defer srv.Stop() - - cfg := integration.NewEmbedConfig(t, "default") - cfg.ExperimentalEnableDistributedTracing = true - cfg.ExperimentalDistributedTracingAddress = listener.Addr().String() - cfg.ExperimentalDistributedTracingServiceName = "integration-test-tracing" - cfg.ExperimentalDistributedTracingSamplingRatePerMillion = 100 - - // start an etcd instance with tracing enabled - etcdSrv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - defer etcdSrv.Close() - - select { - case <-etcdSrv.Server.ReadyNotify(): - case <-time.After(1 * time.Second): - t.Fatalf("failed to start embed.Etcd for test") - } - - // create a client that has tracing enabled - tracer := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample())) - defer tracer.Shutdown(context.TODO()) - tp := trace.TracerProvider(tracer) - - tracingOpts := []otelgrpc.Option{ - otelgrpc.WithTracerProvider(tp), - otelgrpc.WithPropagators( - propagation.NewCompositeTextMapPropagator( - propagation.TraceContext{}, - propagation.Baggage{}, - )), - } - - dialOptions := []grpc.DialOption{ - grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor(tracingOpts...)), - grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor(tracingOpts...))} - ccfg := clientv3.Config{DialOptions: dialOptions, Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) - if err != nil { - etcdSrv.Close() - t.Fatal(err) - } - defer cli.Close() - - // make a request with the instrumented client - resp, err := cli.Get(context.TODO(), "key") - require.NoError(t, err) - require.Empty(t, resp.Kvs) - - // Wait for a span to be recorded from our request - select { - case <-traceFound: - return - case <-time.After(30 * time.Second): - t.Fatal("Timed out waiting for trace") - } -} - -func containsNodeListSpan(req *traceservice.ExportTraceServiceRequest) bool { - for _, resourceSpans := range req.GetResourceSpans() { - for _, attr := range resourceSpans.GetResource().GetAttributes() { - if attr.GetKey() != "service.name" && attr.GetValue().GetStringValue() != "integration-test-tracing" { - continue - } - for _, scoped := range resourceSpans.GetScopeSpans() { - for _, span := range scoped.GetSpans() { - if span.GetName() == "etcdserverpb.KV/Range" { - return true - } - } - } - } - } - return false -} - -// traceServer implements TracesServiceServer -type traceServer struct { - traceFound chan struct{} - filterFunc func(req *traceservice.ExportTraceServiceRequest) bool - traceservice.UnimplementedTraceServiceServer -} - -func (t *traceServer) Export(ctx context.Context, req *traceservice.ExportTraceServiceRequest) (*traceservice.ExportTraceServiceResponse, error) { - var emptyValue = traceservice.ExportTraceServiceResponse{} - if t.filterFunc(req) { - t.traceFound <- struct{}{} - } - return &emptyValue, nil -} diff --git a/tests/integration/util_test.go b/tests/integration/util_test.go deleted file mode 100644 index 35b0d711ff4..00000000000 --- a/tests/integration/util_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "io" - "os" - "path/filepath" - - "go.etcd.io/etcd/client/pkg/v3/transport" -) - -// copyTLSFiles clones certs files to dst directory. -func copyTLSFiles(ti transport.TLSInfo, dst string) (transport.TLSInfo, error) { - ci := transport.TLSInfo{ - KeyFile: filepath.Join(dst, "server-key.pem"), - CertFile: filepath.Join(dst, "server.pem"), - TrustedCAFile: filepath.Join(dst, "etcd-root-ca.pem"), - ClientCertAuth: ti.ClientCertAuth, - } - if err := copyFile(ti.KeyFile, ci.KeyFile); err != nil { - return transport.TLSInfo{}, err - } - if err := copyFile(ti.CertFile, ci.CertFile); err != nil { - return transport.TLSInfo{}, err - } - if err := copyFile(ti.TrustedCAFile, ci.TrustedCAFile); err != nil { - return transport.TLSInfo{}, err - } - return ci, nil -} - -func copyFile(src, dst string) error { - f, err := os.Open(src) - if err != nil { - return err - } - defer f.Close() - - w, err := os.Create(dst) - if err != nil { - return err - } - defer w.Close() - - if _, err = io.Copy(w, f); err != nil { - return err - } - return w.Sync() -} diff --git a/tests/integration/utl_wal_version_test.go b/tests/integration/utl_wal_version_test.go deleted file mode 100644 index c0b75f9a55d..00000000000 --- a/tests/integration/utl_wal_version_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "testing" - "time" - - "github.com/coreos/go-semver/semver" - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestEtcdVersionFromWAL(t *testing.T) { - testutil.SkipTestIfShortMode(t, - "Wal creation tests are depending on embedded etcd server so are integration-level tests.") - cfg := integration.NewEmbedConfig(t, "default") - srv, err := embed.StartEtcd(cfg) - if err != nil { - t.Fatal(err) - } - select { - case <-srv.Server.ReadyNotify(): - case <-time.After(3 * time.Second): - t.Fatalf("failed to start embed.Etcd for test") - } - - ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} - cli, err := integration.NewClient(t, ccfg) - if err != nil { - srv.Close() - t.Fatal(err) - } - // Get auth status to increase etcd version of proto stored in wal - ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) - cli.AuthStatus(ctx) - cancel() - - cli.Close() - srv.Close() - - w, err := wal.Open(zap.NewNop(), cfg.Dir+"/member/wal", walpb.Snapshot{}) - if err != nil { - panic(err) - } - defer w.Close() - walVersion, err := wal.ReadWALVersion(w) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, &semver.Version{Major: 3, Minor: 6}, walVersion.MinimalEtcdVersion()) -} diff --git a/tests/integration/v2store/main_test.go b/tests/integration/v2store/main_test.go deleted file mode 100644 index 89026c5a773..00000000000 --- a/tests/integration/v2store/main_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store_test - -import ( - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" -) - -//var endpoints []string - -func TestMain(m *testing.M) { - //cfg := integration.ClusterConfig{Size: 1} - //clus := integration.NewClusterV3(nil, &cfg) - //endpoints = []string{clus.Client(0).Endpoints()[0]} - // v := m.Run() - //clus.Terminate(nil) - //if err := testutil.CheckAfterTest(time.Second); err != nil { - // fmt.Fprintf(os.Stderr, "%v", err) - // os.Exit(1) - //} - testutil.MustTestMainWithLeakDetection(m) - //if v == 0 && testutil.CheckLeakedGoroutine() { - // os.Exit(1) - //} - //os.Exit(v) -} diff --git a/tests/integration/v2store/store_tag_test.go b/tests/integration/v2store/store_tag_test.go deleted file mode 100644 index ec6b02a05f5..00000000000 --- a/tests/integration/v2store/store_tag_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestStoreRecover ensures that the store can recover from a previously saved state. -func TestStoreRecover(t *testing.T) { - integration2.BeforeTest(t) - s := v2store.New() - var eidx uint64 = 4 - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/x", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Update("/foo/x", "barbar", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/y", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - b, err := s.Save() - testutil.AssertNil(t, err) - - s2 := v2store.New() - s2.Recovery(b) - - e, err := s.Get("/foo/x", false, false) - assert.Equal(t, e.Node.CreatedIndex, uint64(2)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(3)) - assert.Equal(t, e.EtcdIndex, eidx) - testutil.AssertNil(t, err) - assert.Equal(t, *e.Node.Value, "barbar") - - e, err = s.Get("/foo/y", false, false) - assert.Equal(t, e.EtcdIndex, eidx) - testutil.AssertNil(t, err) - assert.Equal(t, *e.Node.Value, "baz") -} diff --git a/tests/integration/v2store/store_test.go b/tests/integration/v2store/store_test.go deleted file mode 100644 index 8ae8e7f7ce2..00000000000 --- a/tests/integration/v2store/store_test.go +++ /dev/null @@ -1,850 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2store_test - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "go.etcd.io/etcd/client/pkg/v3/testutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -type StoreCloser interface { - v2store.Store - Close() -} - -func TestNewStoreWithNamespaces(t *testing.T) { - s := v2store.New("/0", "/1") - - _, err := s.Get("/0", false, false) - testutil.AssertNil(t, err) - _, err = s.Get("/1", false, false) - testutil.AssertNil(t, err) -} - -// TestStoreGetValue ensures that the store can retrieve an existing value. -func TestStoreGetValue(t *testing.T) { - s := v2store.New() - - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - var eidx uint64 = 1 - e, err := s.Get("/foo", false, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "get") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, *e.Node.Value, "bar") -} - -// TestStoreGetSorted ensures that the store can retrieve a directory in sorted order. -func TestStoreGetSorted(t *testing.T) { - s := v2store.New() - - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/x", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/z", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/y", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/y/a", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - s.Create("/foo/y/b", false, "0", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - var eidx uint64 = 6 - e, err := s.Get("/foo", true, true) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - - var yNodes v2store.NodeExterns - sortedStrings := []string{"/foo/x", "/foo/y", "/foo/z"} - for i := range e.Node.Nodes { - node := e.Node.Nodes[i] - if node.Key != sortedStrings[i] { - t.Errorf("expect key = %s, got key = %s", sortedStrings[i], node.Key) - } - if node.Key == "/foo/y" { - yNodes = node.Nodes - } - } - - sortedStrings = []string{"/foo/y/a", "/foo/y/b"} - for i := range yNodes { - node := yNodes[i] - if node.Key != sortedStrings[i] { - t.Errorf("expect key = %s, got key = %s", sortedStrings[i], node.Key) - } - } -} - -func TestSet(t *testing.T) { - s := v2store.New() - - // Set /foo="" - var eidx uint64 = 1 - e, err := s.Set("/foo", false, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "set") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(1)) - - // Set /foo="bar" - eidx = 2 - e, err = s.Set("/foo", false, "bar", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "set") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "bar") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(2)) - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - // Set /foo="baz" (for testing prevNode) - eidx = 3 - e, err = s.Set("/foo", false, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "set") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "baz") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(3)) - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(2)) - - // Set /a/b/c/d="efg" - eidx = 4 - e, err = s.Set("/a/b/c/d", false, "efg", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Node.Key, "/a/b/c/d") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "efg") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(4)) - - // Set /dir as a directory - eidx = 5 - e, err = s.Set("/dir", true, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "set") - assert.Equal(t, e.Node.Key, "/dir") - testutil.AssertTrue(t, e.Node.Dir) - testutil.AssertNil(t, e.Node.Value) - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(5)) -} - -// TestStoreCreateValue ensures that the store can create a new key if it doesn't already exist. -func TestStoreCreateValue(t *testing.T) { - s := v2store.New() - - // Create /foo=bar - var eidx uint64 = 1 - e, err := s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "bar") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(1)) - - // Create /empty="" - eidx = 2 - e, err = s.Create("/empty", false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/empty") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "") - testutil.AssertNil(t, e.Node.Nodes) - testutil.AssertNil(t, e.Node.Expiration) - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(2)) - -} - -// TestStoreCreateDirectory ensures that the store can create a new directory if it doesn't already exist. -func TestStoreCreateDirectory(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 1 - e, err := s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertTrue(t, e.Node.Dir) -} - -// TestStoreCreateFailsIfExists ensure that the store fails to create a key if it already exists. -func TestStoreCreateFailsIfExists(t *testing.T) { - s := v2store.New() - - // create /foo as dir - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - - // create /foo as dir again - e, _err := s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeNodeExist) - assert.Equal(t, err.Message, "Key already exists") - assert.Equal(t, err.Cause, "/foo") - assert.Equal(t, err.Index, uint64(1)) - testutil.AssertNil(t, e) -} - -// TestStoreUpdateValue ensures that the store can update a key if it already exists. -func TestStoreUpdateValue(t *testing.T) { - s := v2store.New() - - // create /foo=bar - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - // update /foo="bzr" - var eidx uint64 = 2 - e, err := s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "baz") - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(2)) - // check prevNode - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.TTL, int64(0)) - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - - e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz") - assert.Equal(t, e.EtcdIndex, eidx) - - // update /foo="" - eidx = 3 - e, err = s.Update("/foo", "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo") - testutil.AssertFalse(t, e.Node.Dir) - assert.Equal(t, *e.Node.Value, "") - assert.Equal(t, e.Node.TTL, int64(0)) - assert.Equal(t, e.Node.ModifiedIndex, uint64(3)) - // check prevNode - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "baz") - assert.Equal(t, e.PrevNode.TTL, int64(0)) - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(2)) - - e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, *e.Node.Value, "") -} - -// TestStoreUpdateFailsIfDirectory ensures that the store cannot update a directory. -func TestStoreUpdateFailsIfDirectory(t *testing.T) { - s := v2store.New() - - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeNotFile) - assert.Equal(t, err.Message, "Not a file") - assert.Equal(t, err.Cause, "/foo") - testutil.AssertNil(t, e) -} - -// TestStoreDeleteValue ensures that the store can delete a value. -func TestStoreDeleteValue(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, err := s.Delete("/foo", false, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "delete") - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") -} - -// TestStoreDeleteDirectory ensures that the store can delete a directory if recursive is specified. -func TestStoreDeleteDirectory(t *testing.T) { - s := v2store.New() - - // create directory /foo - var eidx uint64 = 2 - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - // delete /foo with dir = true and recursive = false - // this should succeed, since the directory is empty - e, err := s.Delete("/foo", true, false) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "delete") - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, e.PrevNode.Dir, true) - - // create directory /foo and directory /foo/bar - _, err = s.Create("/foo/bar", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - // delete /foo with dir = true and recursive = false - // this should fail, since the directory is not empty - _, err = s.Delete("/foo", true, false) - testutil.AssertNotNil(t, err) - - // delete /foo with dir=false and recursive = true - // this should succeed, since recursive implies dir=true - // and recursively delete should be able to delete all - // items under the given directory - e, err = s.Delete("/foo", false, true) - testutil.AssertNil(t, err) - assert.Equal(t, e.Action, "delete") - -} - -// TestStoreDeleteDirectoryFailsIfNonRecursiveAndDir ensures that the -// store cannot delete a directory if both of recursive and dir are not specified. -func TestStoreDeleteDirectoryFailsIfNonRecursiveAndDir(t *testing.T) { - s := v2store.New() - - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.Delete("/foo", false, false) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeNotFile) - assert.Equal(t, err.Message, "Not a file") - testutil.AssertNil(t, e) -} - -func TestRootRdOnly(t *testing.T) { - s := v2store.New("/0") - - for _, tt := range []string{"/", "/0"} { - _, err := s.Set(tt, true, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNotNil(t, err) - - _, err = s.Delete(tt, true, true) - testutil.AssertNotNil(t, err) - - _, err = s.Create(tt, true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNotNil(t, err) - - _, err = s.Update(tt, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNotNil(t, err) - - _, err = s.CompareAndSwap(tt, "", 0, "", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNotNil(t, err) - } -} - -func TestStoreCompareAndDeletePrevValue(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, err := s.CompareAndDelete("/foo", "bar", 0) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndDelete") - assert.Equal(t, e.Node.Key, "/foo") - - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1)) -} - -func TestStoreCompareAndDeletePrevValueFailsIfNotMatch(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.CompareAndDelete("/foo", "baz", 0) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeTestFailed) - assert.Equal(t, err.Message, "Compare failed") - testutil.AssertNil(t, e) - e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, *e.Node.Value, "bar") -} - -func TestStoreCompareAndDeletePrevIndex(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, err := s.CompareAndDelete("/foo", "", 1) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndDelete") - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1)) -} - -func TestStoreCompareAndDeletePrevIndexFailsIfNotMatch(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.CompareAndDelete("/foo", "", 100) - testutil.AssertNotNil(t, _err) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeTestFailed) - assert.Equal(t, err.Message, "Compare failed") - testutil.AssertNil(t, e) - e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, *e.Node.Value, "bar") -} - -// TestStoreCompareAndDeleteDirectoryFail ensures that the store cannot delete a directory. -func TestStoreCompareAndDeleteDirectoryFail(t *testing.T) { - s := v2store.New() - - s.Create("/foo", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - _, _err := s.CompareAndDelete("/foo", "", 0) - testutil.AssertNotNil(t, _err) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeNotFile) -} - -// TestStoreCompareAndSwapPrevValue ensures that the store can conditionally -// update a key if it has a previous value. -func TestStoreCompareAndSwapPrevValue(t *testing.T) { - s := v2store.New() - - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, err := s.CompareAndSwap("/foo", "bar", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndSwap") - assert.Equal(t, *e.Node.Value, "baz") - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1)) - - e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz") -} - -// TestStoreCompareAndSwapPrevValueFailsIfNotMatch ensure that the store cannot -// conditionally update a key if it has the wrong previous value. -func TestStoreCompareAndSwapPrevValueFailsIfNotMatch(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.CompareAndSwap("/foo", "wrong_value", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeTestFailed) - assert.Equal(t, err.Message, "Compare failed") - testutil.AssertNil(t, e) - e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "bar") - assert.Equal(t, e.EtcdIndex, eidx) -} - -// TestStoreCompareAndSwapPrevIndex ensures that the store can conditionally -// update a key if it has a previous index. -func TestStoreCompareAndSwapPrevIndex(t *testing.T) { - s := v2store.New() - var eidx uint64 = 2 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, err := s.CompareAndSwap("/foo", "", 1, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - testutil.AssertNil(t, err) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndSwap") - assert.Equal(t, *e.Node.Value, "baz") - // check prevNode - testutil.AssertNotNil(t, e.PrevNode) - assert.Equal(t, e.PrevNode.Key, "/foo") - assert.Equal(t, *e.PrevNode.Value, "bar") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1)) - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1)) - - e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz") - assert.Equal(t, e.EtcdIndex, eidx) -} - -// TestStoreCompareAndSwapPrevIndexFailsIfNotMatch ensures that the store cannot -// conditionally update a key if it has the wrong previous index. -func TestStoreCompareAndSwapPrevIndexFailsIfNotMatch(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e, _err := s.CompareAndSwap("/foo", "", 100, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - err := _err.(*v2error.Error) - assert.Equal(t, err.ErrorCode, v2error.EcodeTestFailed) - assert.Equal(t, err.Message, "Compare failed") - testutil.AssertNil(t, e) - e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, *e.Node.Value, "bar") -} - -// TestStoreWatchCreate ensures that the store can watch for key creation. -func TestStoreWatchCreate(t *testing.T) { - s := v2store.New() - var eidx uint64 = 0 - w, _ := s.Watch("/foo", false, false, 0) - c := w.EventChan() - assert.Equal(t, w.StartIndex(), eidx) - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - eidx = 1 - e := timeoutSelect(t, c) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/foo") - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } -} - -// TestStoreWatchRecursiveCreate ensures that the store -// can watch for recursive key creation. -func TestStoreWatchRecursiveCreate(t *testing.T) { - s := v2store.New() - var eidx uint64 = 0 - w, err := s.Watch("/foo", true, false, 0) - testutil.AssertNil(t, err) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 1 - s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/foo/bar") -} - -// TestStoreWatchUpdate ensures that the store can watch for key updates. -func TestStoreWatchUpdate(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo") -} - -// TestStoreWatchRecursiveUpdate ensures that the store can watch for recursive key updates. -func TestStoreWatchRecursiveUpdate(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, err := s.Watch("/foo", true, false, 0) - testutil.AssertNil(t, err) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.Update("/foo/bar", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo/bar") -} - -// TestStoreWatchDelete ensures that the store can watch for key deletions. -func TestStoreWatchDelete(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.Delete("/foo", false, false) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "delete") - assert.Equal(t, e.Node.Key, "/foo") -} - -// TestStoreWatchRecursiveDelete ensures that the store can watch for recursive key deletions. -func TestStoreWatchRecursiveDelete(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, err := s.Watch("/foo", true, false, 0) - testutil.AssertNil(t, err) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.Delete("/foo/bar", false, false) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "delete") - assert.Equal(t, e.Node.Key, "/foo/bar") -} - -// TestStoreWatchCompareAndSwap ensures that the store can watch for CAS updates. -func TestStoreWatchCompareAndSwap(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.CompareAndSwap("/foo", "bar", 0, "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndSwap") - assert.Equal(t, e.Node.Key, "/foo") -} - -// TestStoreWatchRecursiveCompareAndSwap ensures that the -// store can watch for recursive CAS updates. -func TestStoreWatchRecursiveCompareAndSwap(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - s.Create("/foo/bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx) - eidx = 2 - s.CompareAndSwap("/foo/bar", "baz", 0, "bat", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "compareAndSwap") - assert.Equal(t, e.Node.Key, "/foo/bar") -} - -// TestStoreWatchStream ensures that the store can watch in streaming mode. -func TestStoreWatchStream(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - w, _ := s.Watch("/foo", false, true, 0) - // first modification - s.Create("/foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, *e.Node.Value, "bar") - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } - // second modification - eidx = 2 - s.Update("/foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e = timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/foo") - assert.Equal(t, *e.Node.Value, "baz") - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } -} - -// TestStoreWatchCreateWithHiddenKey ensure that the store can -// watch for hidden keys as long as it's an exact path match. -func TestStoreWatchCreateWithHiddenKey(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - w, _ := s.Watch("/_foo", false, false, 0) - s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/_foo") - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } -} - -// TestStoreWatchRecursiveCreateWithHiddenKey ensures that the store doesn't -// see hidden key creates without an exact path match in recursive mode. -func TestStoreWatchRecursiveCreateWithHiddenKey(t *testing.T) { - s := v2store.New() - w, _ := s.Watch("/foo", true, false, 0) - s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := nbselect(w.EventChan()) - testutil.AssertNil(t, e) - w, _ = s.Watch("/foo", true, false, 0) - s.Create("/foo/_baz", true, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } - s.Create("/foo/_baz/quux", false, "quux", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - select { - case e = <-w.EventChan(): - testutil.AssertNil(t, e) - case <-time.After(100 * time.Millisecond): - } -} - -// TestStoreWatchUpdateWithHiddenKey ensures that the store -// doesn't see hidden key updates. -func TestStoreWatchUpdateWithHiddenKey(t *testing.T) { - s := v2store.New() - s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/_foo", false, false, 0) - s.Update("/_foo", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.Action, "update") - assert.Equal(t, e.Node.Key, "/_foo") - e = nbselect(w.EventChan()) - testutil.AssertNil(t, e) -} - -// TestStoreWatchRecursiveUpdateWithHiddenKey ensures that the store doesn't -// see hidden key updates without an exact path match in recursive mode. -func TestStoreWatchRecursiveUpdateWithHiddenKey(t *testing.T) { - s := v2store.New() - s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", true, false, 0) - s.Update("/foo/_bar", "baz", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - e := nbselect(w.EventChan()) - testutil.AssertNil(t, e) -} - -// TestStoreWatchDeleteWithHiddenKey ensures that the store can watch for key deletions. -func TestStoreWatchDeleteWithHiddenKey(t *testing.T) { - s := v2store.New() - var eidx uint64 = 2 - s.Create("/_foo", false, "bar", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/_foo", false, false, 0) - s.Delete("/_foo", false, false) - e := timeoutSelect(t, w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "delete") - assert.Equal(t, e.Node.Key, "/_foo") - e = nbselect(w.EventChan()) - testutil.AssertNil(t, e) -} - -// TestStoreWatchRecursiveDeleteWithHiddenKey ensures that the store doesn't see -// hidden key deletes without an exact path match in recursive mode. -func TestStoreWatchRecursiveDeleteWithHiddenKey(t *testing.T) { - s := v2store.New() - s.Create("/foo/_bar", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - w, _ := s.Watch("/foo", true, false, 0) - s.Delete("/foo/_bar", false, false) - e := nbselect(w.EventChan()) - testutil.AssertNil(t, e) -} - -// TestStoreWatchRecursiveCreateDeeperThanHiddenKey ensures that the store does see -// hidden key creates if watching deeper than a hidden key in recursive mode. -func TestStoreWatchRecursiveCreateDeeperThanHiddenKey(t *testing.T) { - s := v2store.New() - var eidx uint64 = 1 - w, _ := s.Watch("/_foo/bar", true, false, 0) - s.Create("/_foo/bar/baz", false, "baz", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) - - e := timeoutSelect(t, w.EventChan()) - testutil.AssertNotNil(t, e) - assert.Equal(t, e.EtcdIndex, eidx) - assert.Equal(t, e.Action, "create") - assert.Equal(t, e.Node.Key, "/_foo/bar/baz") -} - -// TestStoreWatchSlowConsumer ensures that slow consumers are handled properly. -// -// Since Watcher.EventChan() has a buffer of size 100 we can only queue 100 -// event per watcher. If the consumer cannot consume the event on time and -// another event arrives, the channel is closed and event is discarded. -// This test ensures that after closing the channel, the store can continue -// to operate correctly. -func TestStoreWatchSlowConsumer(t *testing.T) { - s := v2store.New() - s.Watch("/foo", true, true, 0) // stream must be true - // Fill watch channel with 100 events - for i := 1; i <= 100; i++ { - s.Set("/foo", false, fmt.Sprint(i), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok - } - // assert.Equal(t, s.WatcherHub.count, int64(1)) - s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok - // remove watcher - // assert.Equal(t, s.WatcherHub.count, int64(0)) - s.Set("/foo", false, "102", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // must not panic -} - -// Performs a non-blocking select on an event channel. -func nbselect(c <-chan *v2store.Event) *v2store.Event { - select { - case e := <-c: - return e - default: - return nil - } -} - -// Performs a non-blocking select on an event channel. -func timeoutSelect(t *testing.T, c <-chan *v2store.Event) *v2store.Event { - select { - case e := <-c: - return e - case <-time.After(time.Second): - t.Errorf("timed out waiting on event") - return nil - } -} diff --git a/tests/integration/v3_alarm_test.go b/tests/integration/v3_alarm_test.go deleted file mode 100644 index 4bd722eaed5..00000000000 --- a/tests/integration/v3_alarm_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "go.uber.org/zap/zaptest" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/mvcc" - "go.etcd.io/etcd/server/v3/storage/schema" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3StorageQuotaApply tests the V3 server respects quotas during apply -func TestV3StorageQuotaApply(t *testing.T) { - integration.BeforeTest(t) - quotasize := int64(16 * os.Getpagesize()) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2}) - defer clus.Terminate(t) - kvc1 := integration.ToGRPC(clus.Client(1)).KV - - // Set a quota on one node - clus.Members[0].QuotaBackendBytes = quotasize - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - kvc0 := integration.ToGRPC(clus.Client(0)).KV - waitForRestart(t, kvc0) - - key := []byte("abc") - - // test small put still works - smallbuf := make([]byte, 1024) - _, serr := kvc0.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) - if serr != nil { - t.Fatal(serr) - } - - // test big put - bigbuf := make([]byte, quotasize) - _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) - if err != nil { - t.Fatal(err) - } - - // quorum get should work regardless of whether alarm is raised - _, err = kvc0.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - - // wait until alarm is raised for sure-- poll the alarms - stopc := time.After(5 * time.Second) - for { - req := &pb.AlarmRequest{Action: pb.AlarmRequest_GET} - resp, aerr := clus.Members[0].Server.Alarm(context.TODO(), req) - if aerr != nil { - t.Fatal(aerr) - } - if len(resp.Alarms) != 0 { - break - } - select { - case <-stopc: - t.Fatalf("timed out waiting for alarm") - case <-time.After(10 * time.Millisecond): - } - } - - // txn with non-mutating Ops should go through when NOSPACE alarm is raised - _, err = kvc0.Txn(context.TODO(), &pb.TxnRequest{ - Compare: []*pb.Compare{ - { - Key: key, - Result: pb.Compare_EQUAL, - Target: pb.Compare_CREATE, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 0}, - }, - }, - Success: []*pb.RequestOp{ - { - Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: key, - }, - }, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.TODO(), integration.RequestWaitTimeout) - defer cancel() - - // small quota machine should reject put - if _, err := kvc0.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil { - t.Fatalf("past-quota instance should reject put") - } - - // large quota machine should reject put - if _, err := kvc1.Put(ctx, &pb.PutRequest{Key: key, Value: smallbuf}); err == nil { - t.Fatalf("past-quota instance should reject put") - } - - // reset large quota node to ensure alarm persisted - clus.Members[1].Stop(t) - clus.Members[1].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - - if _, err := kvc1.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err == nil { - t.Fatalf("alarmed instance should reject put after reset") - } -} - -// TestV3AlarmDeactivate ensures that space alarms can be deactivated so puts go through. -func TestV3AlarmDeactivate(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - kvc := integration.ToGRPC(clus.RandClient()).KV - mt := integration.ToGRPC(clus.RandClient()).Maintenance - - alarmReq := &pb.AlarmRequest{ - MemberID: 123, - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - if _, err := mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } - - key := []byte("abc") - smallbuf := make([]byte, 512) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}) - if err == nil && !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { - t.Fatalf("put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) - } - - alarmReq.Action = pb.AlarmRequest_DEACTIVATE - if _, err = mt.Alarm(context.TODO(), alarmReq); err != nil { - t.Fatal(err) - } - - if _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } -} - -func TestV3CorruptAlarm(t *testing.T) { - integration.BeforeTest(t) - lg := zaptest.NewLogger(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - var wg sync.WaitGroup - wg.Add(10) - for i := 0; i < 10; i++ { - go func() { - defer wg.Done() - if _, err := clus.Client(0).Put(context.TODO(), "k", "v"); err != nil { - t.Error(err) - } - }() - } - wg.Wait() - - // Corrupt member 0 by modifying backend offline. - clus.Members[0].Stop(t) - fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db") - be := backend.NewDefaultBackend(lg, fp) - s := mvcc.NewStore(lg, be, nil, mvcc.StoreConfig{}) - // NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'. - s.Put([]byte("abc"), []byte("def"), 0) - s.Put([]byte("xyz"), []byte("123"), 0) - s.Compact(traceutil.TODO(), 5) - s.Commit() - s.Close() - be.Close() - - clus.Members[1].WaitOK(t) - clus.Members[2].WaitOK(t) - time.Sleep(time.Second * 2) - - // Wait for cluster so Puts succeed in case member 0 was the leader. - if _, err := clus.Client(1).Get(context.TODO(), "k"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "xyz", "321"); err != nil { - t.Fatal(err) - } - if _, err := clus.Client(1).Put(context.TODO(), "abc", "fed"); err != nil { - t.Fatal(err) - } - - // Restart with corruption checking enabled. - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - for _, m := range clus.Members { - m.CorruptCheckTime = time.Second - m.Restart(t) - } - clus.WaitLeader(t) - time.Sleep(time.Second * 2) - - clus.Members[0].WaitStarted(t) - resp0, err0 := clus.Client(0).Get(context.TODO(), "abc") - if err0 != nil { - t.Fatal(err0) - } - clus.Members[1].WaitStarted(t) - resp1, err1 := clus.Client(1).Get(context.TODO(), "abc") - if err1 != nil { - t.Fatal(err1) - } - - if resp0.Kvs[0].ModRevision == resp1.Kvs[0].ModRevision { - t.Fatalf("matching ModRevision values") - } - - for i := 0; i < 5; i++ { - presp, perr := clus.Client(0).Put(context.TODO(), "abc", "aaa") - if perr != nil { - if !eqErrGRPC(perr, rpctypes.ErrCorrupt) { - t.Fatalf("expected %v, got %+v (%v)", rpctypes.ErrCorrupt, presp, perr) - } else { - return - } - } - time.Sleep(time.Second) - } - t.Fatalf("expected error %v after %s", rpctypes.ErrCorrupt, 5*time.Second) -} - -func TestV3CorruptAlarmWithLeaseCorrupted(t *testing.T) { - integration.BeforeTest(t) - lg := zaptest.NewLogger(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{ - CorruptCheckTime: time.Second, - Size: 3, - SnapshotCount: 10, - SnapshotCatchUpEntries: 5, - DisableStrictReconfigCheck: true, - }) - defer clus.Terminate(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{ID: 1, TTL: 60}) - if err != nil { - t.Errorf("could not create lease 1 (%v)", err) - } - if lresp.ID != 1 { - t.Errorf("got id %v, wanted id %v", lresp.ID, 1) - } - - putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID} - // Trigger snapshot from the leader to new member - for i := 0; i < 15; i++ { - _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr) - if err != nil { - t.Errorf("#%d: couldn't put key (%v)", i, err) - } - } - - if err := clus.RemoveMember(t, clus.Client(1), uint64(clus.Members[2].ID())); err != nil { - t.Fatal(err) - } - clus.WaitMembersForLeader(t, clus.Members) - - clus.AddMember(t) - clus.WaitMembersForLeader(t, clus.Members) - // Wait for new member to catch up - integration.WaitClientV3(t, clus.Members[2].Client) - - // Corrupt member 2 by modifying backend lease bucket offline. - clus.Members[2].Stop(t) - fp := filepath.Join(clus.Members[2].DataDir, "member", "snap", "db") - bcfg := backend.DefaultBackendConfig(lg) - bcfg.Path = fp - be := backend.New(bcfg) - - olpb := leasepb.Lease{ID: int64(1), TTL: 60} - tx := be.BatchTx() - schema.UnsafeDeleteLease(tx, &olpb) - lpb := leasepb.Lease{ID: int64(2), TTL: 60} - schema.MustUnsafePutLease(tx, &lpb) - tx.Commit() - - if err := be.Close(); err != nil { - t.Fatal(err) - } - - if err := clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } - - clus.Members[1].WaitOK(t) - clus.Members[2].WaitOK(t) - - // Revoke lease should remove key except the member with corruption - _, err = integration.ToGRPC(clus.Members[0].Client).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } - resp0, err0 := clus.Members[1].Client.KV.Get(context.TODO(), "foo") - if err0 != nil { - t.Fatal(err0) - } - resp1, err1 := clus.Members[2].Client.KV.Get(context.TODO(), "foo") - if err1 != nil { - t.Fatal(err1) - } - - if resp0.Header.Revision == resp1.Header.Revision { - t.Fatalf("matching Revision values") - } - - // Wait for CorruptCheckTime - time.Sleep(time.Second) - presp, perr := clus.Client(0).Put(context.TODO(), "abc", "aaa") - if perr != nil { - if !eqErrGRPC(perr, rpctypes.ErrCorrupt) { - t.Fatalf("expected %v, got %+v (%v)", rpctypes.ErrCorrupt, presp, perr) - } else { - return - } - } -} diff --git a/tests/integration/v3_auth_test.go b/tests/integration/v3_auth_test.go deleted file mode 100644 index b0d3fb09d4e..00000000000 --- a/tests/integration/v3_auth_test.go +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "go.etcd.io/etcd/api/v3/authpb" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/testutil" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error. -func TestV3AuthEmptyUserGet(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - - api := integration.ToGRPC(clus.Client(0)) - authSetupRoot(t, api.Auth) - - _, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")}) - if !eqErrGRPC(err, rpctypes.ErrUserEmpty) { - t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty) - } -} - -// TestV3AuthEmptyUserPut ensures that a put with an empty user will return an empty user error, -// and the consistent_index should be moved forward even the apply-->Put fails. -func TestV3AuthEmptyUserPut(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{ - Size: 1, - SnapshotCount: 3, - }) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - - api := integration.ToGRPC(clus.Client(0)) - authSetupRoot(t, api.Auth) - - // The SnapshotCount is 3, so there must be at least 3 new snapshot files being created. - // The VERIFY logic will check whether the consistent_index >= last snapshot index on - // cluster terminating. - for i := 0; i < 10; i++ { - _, err := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if !eqErrGRPC(err, rpctypes.ErrUserEmpty) { - t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty) - } - } -} - -// TestV3AuthTokenWithDisable tests that auth won't crash if -// given a valid token when authentication is disabled -func TestV3AuthTokenWithDisable(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer c.Close() - - rctx, cancel := context.WithCancel(context.TODO()) - donec := make(chan struct{}) - go func() { - defer close(donec) - for rctx.Err() == nil { - c.Put(rctx, "abc", "def") - } - }() - - time.Sleep(10 * time.Millisecond) - if _, err := c.AuthDisable(context.TODO()); err != nil { - t.Fatal(err) - } - time.Sleep(10 * time.Millisecond) - - cancel() - <-donec -} - -func TestV3AuthRevision(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - api := integration.ToGRPC(clus.Client(0)) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - presp, perr := api.KV.Put(ctx, &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - cancel() - if perr != nil { - t.Fatal(perr) - } - rev := presp.Header.Revision - - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) - aresp, aerr := api.Auth.UserAdd(ctx, &pb.AuthUserAddRequest{Name: "root", Password: "123", Options: &authpb.UserAddOptions{NoPassword: false}}) - cancel() - if aerr != nil { - t.Fatal(aerr) - } - if aresp.Header.Revision != rev { - t.Fatalf("revision expected %d, got %d", rev, aresp.Header.Revision) - } -} - -// TestV3AuthWithLeaseRevokeWithRoot ensures that granted leases -// with root user be revoked after TTL. -func TestV3AuthWithLeaseRevokeWithRoot(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1}) -} - -// TestV3AuthWithLeaseRevokeWithRootJWT creates a lease with a JWT-token enabled cluster. -// And tests if server is able to revoke expiry lease item. -func TestV3AuthWithLeaseRevokeWithRootJWT(t *testing.T) { - testV3AuthWithLeaseRevokeWithRoot(t, integration.ClusterConfig{Size: 1, AuthToken: integration.DefaultTokenJWT}) -} - -func testV3AuthWithLeaseRevokeWithRoot(t *testing.T, ccfg integration.ClusterConfig) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &ccfg) - defer clus.Terminate(t) - - api := integration.ToGRPC(clus.Client(0)) - authSetupRoot(t, api.Auth) - - rootc, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: clus.Client(0).Endpoints(), - Username: "root", - Password: "123", - }) - if cerr != nil { - t.Fatal(cerr) - } - defer rootc.Close() - - leaseResp, err := rootc.Grant(context.TODO(), 2) - if err != nil { - t.Fatal(err) - } - leaseID := leaseResp.ID - - if _, err = rootc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(leaseID)); err != nil { - t.Fatal(err) - } - - // wait for lease expire - time.Sleep(3 * time.Second) - - tresp, terr := api.Lease.LeaseTimeToLive( - context.TODO(), - &pb.LeaseTimeToLiveRequest{ - ID: int64(leaseID), - Keys: true, - }, - ) - if terr != nil { - t.Error(terr) - } - if len(tresp.Keys) > 0 || tresp.GrantedTTL != 0 { - t.Errorf("lease %016x should have been revoked, got %+v", leaseID, tresp) - } - if tresp.TTL != -1 { - t.Errorf("lease %016x should have been expired, got %+v", leaseID, tresp) - } -} - -type user struct { - name string - password string - role string - key string - end string -} - -func TestV3AuthWithLeaseRevoke(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - users := []user{ - { - name: "user1", - password: "user1-123", - role: "role1", - key: "k1", - end: "k2", - }, - } - authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - rootc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer rootc.Close() - - leaseResp, err := rootc.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } - leaseID := leaseResp.ID - // permission of k3 isn't granted to user1 - _, err = rootc.Put(context.TODO(), "k3", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } - - userc, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer userc.Close() - _, err = userc.Revoke(context.TODO(), leaseID) - if err == nil { - t.Fatal("revoking from user1 should be failed with permission denied") - } -} - -func TestV3AuthWithLeaseAttach(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - users := []user{ - { - name: "user1", - password: "user1-123", - role: "role1", - key: "k1", - end: "k3", - }, - { - name: "user2", - password: "user2-123", - role: "role2", - key: "k2", - end: "k4", - }, - } - authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - user1c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer user1c.Close() - - user2c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer user2c.Close() - - leaseResp, err := user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } - leaseID := leaseResp.ID - // permission of k2 is also granted to user2 - _, err = user1c.Put(context.TODO(), "k2", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } - - _, err = user2c.Revoke(context.TODO(), leaseID) - if err != nil { - t.Fatal(err) - } - - leaseResp, err = user1c.Grant(context.TODO(), 90) - if err != nil { - t.Fatal(err) - } - leaseID = leaseResp.ID - // permission of k1 isn't granted to user2 - _, err = user1c.Put(context.TODO(), "k1", "val", clientv3.WithLease(leaseID)) - if err != nil { - t.Fatal(err) - } - - _, err = user2c.Revoke(context.TODO(), leaseID) - if err == nil { - t.Fatal("revoking from user2 should be failed with permission denied") - } -} - -func authSetupUsers(t *testing.T, auth pb.AuthClient, users []user) { - for _, user := range users { - if _, err := auth.UserAdd(context.TODO(), &pb.AuthUserAddRequest{Name: user.name, Password: user.password, Options: &authpb.UserAddOptions{NoPassword: false}}); err != nil { - t.Fatal(err) - } - if _, err := auth.RoleAdd(context.TODO(), &pb.AuthRoleAddRequest{Name: user.role}); err != nil { - t.Fatal(err) - } - if _, err := auth.UserGrantRole(context.TODO(), &pb.AuthUserGrantRoleRequest{User: user.name, Role: user.role}); err != nil { - t.Fatal(err) - } - - if len(user.key) == 0 { - continue - } - - perm := &authpb.Permission{ - PermType: authpb.READWRITE, - Key: []byte(user.key), - RangeEnd: []byte(user.end), - } - if _, err := auth.RoleGrantPermission(context.TODO(), &pb.AuthRoleGrantPermissionRequest{Name: user.role, Perm: perm}); err != nil { - t.Fatal(err) - } - } -} - -func authSetupRoot(t *testing.T, auth pb.AuthClient) { - root := []user{ - { - name: "root", - password: "123", - role: "root", - key: "", - }, - } - authSetupUsers(t, auth, root) - if _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}); err != nil { - t.Fatal(err) - } -} - -func TestV3AuthNonAuthorizedRPCs(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - nonAuthedKV := clus.Client(0).KV - - key := "foo" - val := "bar" - _, err := nonAuthedKV.Put(context.TODO(), key, val) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - respput, err := nonAuthedKV.Put(context.TODO(), key, val) - if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) { - t.Fatalf("could put key (%v), it should cause an error of permission denied", respput) - } -} - -func TestV3AuthOldRevConcurrent(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - c, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: clus.Client(0).Endpoints(), - DialTimeout: 5 * time.Second, - Username: "root", - Password: "123", - }) - testutil.AssertNil(t, cerr) - defer c.Close() - - var wg sync.WaitGroup - f := func(i int) { - defer wg.Done() - role, user := fmt.Sprintf("test-role-%d", i), fmt.Sprintf("test-user-%d", i) - _, err := c.RoleAdd(context.TODO(), role) - testutil.AssertNil(t, err) - _, err = c.RoleGrantPermission(context.TODO(), role, "", clientv3.GetPrefixRangeEnd(""), clientv3.PermissionType(clientv3.PermReadWrite)) - testutil.AssertNil(t, err) - _, err = c.UserAdd(context.TODO(), user, "123") - testutil.AssertNil(t, err) - _, err = c.Put(context.TODO(), "a", "b") - testutil.AssertNil(t, err) - } - // needs concurrency to trigger - numRoles := 2 - wg.Add(numRoles) - for i := 0; i < numRoles; i++ { - go f(i) - } - wg.Wait() -} - -func TestV3AuthRestartMember(t *testing.T) { - integration.BeforeTest(t) - - // create a cluster with 1 member - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // create a client - c, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: clus.Client(0).Endpoints(), - DialTimeout: 5 * time.Second, - }) - testutil.AssertNil(t, cerr) - defer c.Close() - - authData := []struct { - user string - role string - pass string - }{ - { - user: "root", - role: "root", - pass: "123", - }, - { - user: "user0", - role: "role0", - pass: "123", - }, - } - - for _, authObj := range authData { - // add a role - _, err := c.RoleAdd(context.TODO(), authObj.role) - testutil.AssertNil(t, err) - // add a user - _, err = c.UserAdd(context.TODO(), authObj.user, authObj.pass) - testutil.AssertNil(t, err) - // grant role to user - _, err = c.UserGrantRole(context.TODO(), authObj.user, authObj.role) - testutil.AssertNil(t, err) - } - - // role grant permission to role0 - _, err := c.RoleGrantPermission(context.TODO(), authData[1].role, "foo", "", clientv3.PermissionType(clientv3.PermReadWrite)) - testutil.AssertNil(t, err) - - // enable auth - _, err = c.AuthEnable(context.TODO()) - testutil.AssertNil(t, err) - - // create another client with ID:Password - c2, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: clus.Client(0).Endpoints(), - DialTimeout: 5 * time.Second, - Username: authData[1].user, - Password: authData[1].pass, - }) - testutil.AssertNil(t, cerr) - defer c2.Close() - - // create foo since that is within the permission set - // expectation is to succeed - _, err = c2.Put(context.TODO(), "foo", "bar") - testutil.AssertNil(t, err) - - clus.Members[0].Stop(t) - err = clus.Members[0].Restart(t) - testutil.AssertNil(t, err) - integration.WaitClientV3WithKey(t, c2.KV, "foo") - - // nothing has changed, but it fails without refreshing cache after restart - _, err = c2.Put(context.TODO(), "foo", "bar2") - testutil.AssertNil(t, err) -} - -func TestV3AuthWatchAndTokenExpire(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, AuthTokenTTL: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) - defer cancel() - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer c.Close() - - _, err := c.Put(ctx, "key", "val") - if err != nil { - t.Fatalf("Unexpected error from Put: %v", err) - } - - // The first watch gets a valid auth token through watcher.newWatcherGrpcStream() - // We should discard the first one by waiting TTL after the first watch. - wChan := c.Watch(ctx, "key", clientv3.WithRev(1)) - watchResponse := <-wChan - - time.Sleep(5 * time.Second) - - wChan = c.Watch(ctx, "key", clientv3.WithRev(1)) - watchResponse = <-wChan - testutil.AssertNil(t, watchResponse.Err()) -} - -func TestV3AuthWatchErrorAndWatchId0(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) - defer cancel() - - users := []user{ - { - name: "user1", - password: "user1-123", - role: "role1", - key: "k1", - end: "k2", - }, - } - authSetupUsers(t, integration.ToGRPC(clus.Client(0)).Auth, users) - - authSetupRoot(t, integration.ToGRPC(clus.Client(0)).Auth) - - c, cerr := integration.NewClient(t, clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) - if cerr != nil { - t.Fatal(cerr) - } - defer c.Close() - - watchStartCh, watchEndCh := make(chan interface{}), make(chan interface{}) - - go func() { - wChan := c.Watch(ctx, "k1", clientv3.WithRev(1)) - watchStartCh <- struct{}{} - watchResponse := <-wChan - t.Logf("watch response from k1: %v", watchResponse) - testutil.AssertTrue(t, len(watchResponse.Events) != 0) - watchEndCh <- struct{}{} - }() - - // Chan for making sure that the above goroutine invokes Watch() - // So the above Watch() can get watch ID = 0 - <-watchStartCh - - wChan := c.Watch(ctx, "non-allowed-key", clientv3.WithRev(1)) - watchResponse := <-wChan - testutil.AssertNotNil(t, watchResponse.Err()) // permission denied - - _, err := c.Put(ctx, "k1", "val") - if err != nil { - t.Fatalf("Unexpected error from Put: %v", err) - } - - <-watchEndCh -} diff --git a/tests/integration/v3_election_test.go b/tests/integration/v3_election_test.go deleted file mode 100644 index b1479b260b7..00000000000 --- a/tests/integration/v3_election_test.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "testing" - "time" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestElectionWait tests if followers can correctly wait for elections. -func TestElectionWait(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - leaders := 3 - followers := 3 - var clients []*clientv3.Client - newClient := integration.MakeMultiNodeClients(t, clus, &clients) - defer func() { - integration.CloseClients(t, clients) - }() - - electedc := make(chan string) - var nextc []chan struct{} - - // wait for all elections - donec := make(chan struct{}) - for i := 0; i < followers; i++ { - nextc = append(nextc, make(chan struct{})) - go func(ch chan struct{}) { - for j := 0; j < leaders; j++ { - session, err := concurrency.NewSession(newClient()) - if err != nil { - t.Error(err) - } - b := concurrency.NewElection(session, "test-election") - - cctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - s, ok := <-b.Observe(cctx) - if !ok { - t.Errorf("could not observe election; channel closed") - } - electedc <- string(s.Kvs[0].Value) - // wait for next election round - <-ch - session.Orphan() - } - donec <- struct{}{} - }(nextc[i]) - } - - // elect some leaders - for i := 0; i < leaders; i++ { - go func() { - session, err := concurrency.NewSession(newClient()) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - e := concurrency.NewElection(session, "test-election") - ev := fmt.Sprintf("electval-%v", time.Now().UnixNano()) - if err := e.Campaign(context.TODO(), ev); err != nil { - t.Errorf("failed volunteer (%v)", err) - } - // wait for followers to accept leadership - for j := 0; j < followers; j++ { - s := <-electedc - if s != ev { - t.Errorf("wrong election value got %s, wanted %s", s, ev) - } - } - // let next leader take over - if err := e.Resign(context.TODO()); err != nil { - t.Errorf("failed resign (%v)", err) - } - // tell followers to start listening for next leader - for j := 0; j < followers; j++ { - nextc[j] <- struct{}{} - } - }() - } - - // wait on followers - for i := 0; i < followers; i++ { - <-donec - } -} - -// TestElectionFailover tests that an election will -func TestElectionFailover(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - ss := make([]*concurrency.Session, 3) - - for i := 0; i < 3; i++ { - var err error - ss[i], err = concurrency.NewSession(clus.Client(i)) - if err != nil { - t.Error(err) - } - defer ss[i].Orphan() - } - - // first leader (elected) - e := concurrency.NewElection(ss[0], "test-election") - if err := e.Campaign(context.TODO(), "foo"); err != nil { - t.Fatalf("failed volunteer (%v)", err) - } - - // check first leader - resp, ok := <-e.Observe(cctx) - if !ok { - t.Fatalf("could not wait for first election; channel closed") - } - s := string(resp.Kvs[0].Value) - if s != "foo" { - t.Fatalf("wrong election result. got %s, wanted foo", s) - } - - // next leader - electedErrC := make(chan error, 1) - go func() { - ee := concurrency.NewElection(ss[1], "test-election") - eer := ee.Campaign(context.TODO(), "bar") - electedErrC <- eer // If eer != nil, the test will fail by calling t.Fatal(eer) - }() - - // invoke leader failover - if err := ss[0].Close(); err != nil { - t.Fatal(err) - } - - // check new leader - e = concurrency.NewElection(ss[2], "test-election") - resp, ok = <-e.Observe(cctx) - if !ok { - t.Fatalf("could not wait for second election; channel closed") - } - s = string(resp.Kvs[0].Value) - if s != "bar" { - t.Fatalf("wrong election result. got %s, wanted bar", s) - } - - // leader must ack election (otherwise, Campaign may see closed conn) - eer := <-electedErrC - if eer != nil { - t.Fatal(eer) - } -} - -// TestElectionSessionRecampaign ensures that campaigning twice on the same election -// with the same lock will Proclaim instead of deadlocking. -func TestElectionSessionRecampaign(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - cli := clus.RandClient() - - session, err := concurrency.NewSession(cli) - if err != nil { - t.Error(err) - } - defer session.Orphan() - - e := concurrency.NewElection(session, "test-elect") - if err := e.Campaign(context.TODO(), "abc"); err != nil { - t.Fatal(err) - } - e2 := concurrency.NewElection(session, "test-elect") - if err := e2.Campaign(context.TODO(), "def"); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - if resp := <-e.Observe(ctx); len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) != "def" { - t.Fatalf("expected value=%q, got response %v", "def", resp) - } -} - -// TestElectionOnPrefixOfExistingKey checks that a single -// candidate can be elected on a new key that is a prefix -// of an existing key. To wit, check for regression -// of bug #6278. https://github.com/etcd-io/etcd/issues/6278 -func TestElectionOnPrefixOfExistingKey(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - if _, err := cli.Put(context.TODO(), "testa", "value"); err != nil { - t.Fatal(err) - } - s, serr := concurrency.NewSession(cli) - if serr != nil { - t.Fatal(serr) - } - e := concurrency.NewElection(s, "test") - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - err := e.Campaign(ctx, "abc") - cancel() - if err != nil { - // after 5 seconds, deadlock results in - // 'context deadline exceeded' here. - t.Fatal(err) - } -} - -// TestElectionOnSessionRestart tests that a quick restart of leader (resulting -// in a new session with the same lease id) does not result in loss of -// leadership. -func TestElectionOnSessionRestart(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - cli := clus.RandClient() - - session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } - - e := concurrency.NewElection(session, "test-elect") - if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { - t.Fatal(cerr) - } - - // ensure leader is not lost to waiter on fail-over - waitSession, werr := concurrency.NewSession(cli) - if werr != nil { - t.Fatal(werr) - } - defer waitSession.Orphan() - waitCtx, waitCancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer waitCancel() - go concurrency.NewElection(waitSession, "test-elect").Campaign(waitCtx, "123") - - // simulate restart by reusing the lease from the old session - newSession, nerr := concurrency.NewSession(cli, concurrency.WithLease(session.Lease())) - if nerr != nil { - t.Fatal(nerr) - } - defer newSession.Orphan() - - newElection := concurrency.NewElection(newSession, "test-elect") - if ncerr := newElection.Campaign(context.TODO(), "def"); ncerr != nil { - t.Fatal(ncerr) - } - - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer cancel() - if resp := <-newElection.Observe(ctx); len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) != "def" { - t.Errorf("expected value=%q, got response %v", "def", resp) - } -} - -// TestElectionObserveCompacted checks that observe can tolerate -// a leader key with a modrev less than the compaction revision. -func TestElectionObserveCompacted(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - - session, err := concurrency.NewSession(cli) - if err != nil { - t.Fatal(err) - } - defer session.Orphan() - - e := concurrency.NewElection(session, "test-elect") - if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { - t.Fatal(cerr) - } - - presp, perr := cli.Put(context.TODO(), "foo", "bar") - if perr != nil { - t.Fatal(perr) - } - if _, cerr := cli.Compact(context.TODO(), presp.Header.Revision); cerr != nil { - t.Fatal(cerr) - } - - v, ok := <-e.Observe(context.TODO()) - if !ok { - t.Fatal("failed to observe on compacted revision") - } - if string(v.Kvs[0].Value) != "abc" { - t.Fatalf(`expected leader value "abc", got %q`, string(v.Kvs[0].Value)) - } -} diff --git a/tests/integration/v3_failover_test.go b/tests/integration/v3_failover_test.go deleted file mode 100644 index 9d271bd9fa9..00000000000 --- a/tests/integration/v3_failover_test.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "bytes" - "context" - "crypto/tls" - "testing" - "time" - - "google.golang.org/grpc" - - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - clientv3 "go.etcd.io/etcd/client/v3" - integration2 "go.etcd.io/etcd/tests/v3/framework/integration" - clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3" -) - -func TestFailover(t *testing.T) { - cases := []struct { - name string - testFunc func(*testing.T, *tls.Config, *integration2.Cluster) (*clientv3.Client, error) - }{ - { - name: "create client before the first server down", - testFunc: createClientBeforeServerDown, - }, - { - name: "create client after the first server down", - testFunc: createClientAfterServerDown, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - t.Logf("Starting test [%s]", tc.name) - integration2.BeforeTest(t) - - // Launch an etcd cluster with 3 members - t.Logf("Launching an etcd cluster with 3 members [%s]", tc.name) - clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, ClientTLS: &integration2.TestTLSInfo}) - defer clus.Terminate(t) - - cc, err := integration2.TestTLSInfo.ClientConfig() - if err != nil { - t.Fatal(err) - } - // Create an etcd client before or after first server down - t.Logf("Creating an etcd client [%s]", tc.name) - cli, err := tc.testFunc(t, cc, clus) - if err != nil { - t.Fatalf("Failed to create client: %v", err) - } - defer cli.Close() - - // Sanity test - t.Logf("Running sanity test [%s]", tc.name) - key, val := "key1", "val1" - putWithRetries(t, cli, key, val, 10) - getWithRetries(t, cli, key, val, 10) - - t.Logf("Test done [%s]", tc.name) - }) - } -} - -func createClientBeforeServerDown(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) { - cli, err := createClient(t, cc, clus) - if err != nil { - return nil, err - } - clus.Members[0].Close() - return cli, nil -} - -func createClientAfterServerDown(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) { - clus.Members[0].Close() - return createClient(t, cc, clus) -} - -func createClient(t *testing.T, cc *tls.Config, clus *integration2.Cluster) (*clientv3.Client, error) { - cli, err := integration2.NewClient(t, clientv3.Config{ - Endpoints: clus.Endpoints(), - DialTimeout: 5 * time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - TLS: cc, - }) - if err != nil { - return nil, err - } - - return cli, nil -} - -func putWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCount int) { - for retryCount > 0 { - // put data test - err := func() error { - t.Log("Sanity test, putting data") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - if _, putErr := cli.Put(ctx, key, val); putErr != nil { - t.Logf("Failed to put data (%v)", putErr) - return putErr - } - return nil - }() - - if err != nil { - retryCount-- - if shouldRetry(err) { - continue - } else { - t.Fatal(err) - } - } - break - } -} - -func getWithRetries(t *testing.T, cli *clientv3.Client, key, val string, retryCount int) { - for retryCount > 0 { - // get data test - err := func() error { - t.Log("Sanity test, getting data") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - resp, getErr := cli.Get(ctx, key) - if getErr != nil { - t.Logf("Failed to get key (%v)", getErr) - return getErr - } - if len(resp.Kvs) != 1 { - t.Fatalf("Expected 1 key, got %d", len(resp.Kvs)) - } - if !bytes.Equal([]byte(val), resp.Kvs[0].Value) { - t.Fatalf("Unexpected value, expected: %s, got: %s", val, string(resp.Kvs[0].Value)) - } - return nil - }() - - if err != nil { - retryCount-- - if shouldRetry(err) { - continue - } else { - t.Fatal(err) - } - } - break - } -} - -func shouldRetry(err error) bool { - if clientv3test.IsClientTimeout(err) || clientv3test.IsServerCtxTimeout(err) || - err == rpctypes.ErrTimeout || err == rpctypes.ErrTimeoutDueToLeaderFail { - return true - } - return false -} diff --git a/tests/integration/v3_grpc_inflight_test.go b/tests/integration/v3_grpc_inflight_test.go deleted file mode 100644 index 7968e614edc..00000000000 --- a/tests/integration/v3_grpc_inflight_test.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "sync" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3MaintenanceDefragmentInflightRange ensures inflight range requests -// does not panic the mvcc backend while defragment is running. -func TestV3MaintenanceDefragmentInflightRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.RandClient() - kvc := integration.ToGRPC(cli).KV - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - - donec := make(chan struct{}) - go func() { - defer close(donec) - kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo")}) - }() - - mvc := integration.ToGRPC(cli).Maintenance - mvc.Defragment(context.Background(), &pb.DefragmentRequest{}) - cancel() - - <-donec -} - -// TestV3KVInflightRangeRequests ensures that inflight requests -// (sent before server shutdown) are gracefully handled by server-side. -// They are either finished or canceled, but never crash the backend. -// See https://github.com/etcd-io/etcd/issues/7322 for more detail. -func TestV3KVInflightRangeRequests(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.RandClient() - kvc := integration.ToGRPC(cli).KV - - if _, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - - reqN := 10 // use 500+ for fast machine - var wg sync.WaitGroup - wg.Add(reqN) - for i := 0; i < reqN; i++ { - go func() { - defer wg.Done() - _, err := kvc.Range(ctx, &pb.RangeRequest{Key: []byte("foo"), Serializable: true}, grpc.WaitForReady(true)) - if err != nil { - errCode := status.Convert(err).Code() - errDesc := rpctypes.ErrorDesc(err) - if err != nil && !(errDesc == context.Canceled.Error() || errCode == codes.Canceled || errCode == codes.Unavailable) { - t.Errorf("inflight request should be canceled with '%v' or code Canceled or Unavailable, got '%v' with code '%s'", context.Canceled.Error(), errDesc, errCode) - } - } - }() - } - - clus.Members[0].Stop(t) - cancel() - - wg.Wait() -} diff --git a/tests/integration/v3_grpc_test.go b/tests/integration/v3_grpc_test.go deleted file mode 100644 index a6dfb940421..00000000000 --- a/tests/integration/v3_grpc_test.go +++ /dev/null @@ -1,1955 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "bytes" - "context" - "fmt" - "math/rand" - "os" - "reflect" - "strings" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/transport" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/integration" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// TestV3PutOverwrite puts a key with the v3 api to a random Cluster member, -// overwrites it, then checks that the change was applied. -func TestV3PutOverwrite(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - key := []byte("foo") - reqput := &pb.PutRequest{Key: key, Value: []byte("bar"), PrevKv: true} - - respput, err := kvc.Put(context.TODO(), reqput) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - // overwrite - reqput.Value = []byte("baz") - respput2, err := kvc.Put(context.TODO(), reqput) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - if respput2.Header.Revision <= respput.Header.Revision { - t.Fatalf("expected newer revision on overwrite, got %v <= %v", - respput2.Header.Revision, respput.Header.Revision) - } - if pkv := respput2.PrevKv; pkv == nil || string(pkv.Value) != "bar" { - t.Fatalf("expected PrevKv=bar, got response %+v", respput2) - } - - reqrange := &pb.RangeRequest{Key: key} - resprange, err := kvc.Range(context.TODO(), reqrange) - if err != nil { - t.Fatalf("couldn't get key (%v)", err) - } - if len(resprange.Kvs) != 1 { - t.Fatalf("expected 1 key, got %v", len(resprange.Kvs)) - } - - kv := resprange.Kvs[0] - if kv.ModRevision <= kv.CreateRevision { - t.Errorf("expected modRev > createRev, got %d <= %d", - kv.ModRevision, kv.CreateRevision) - } - if !reflect.DeepEqual(reqput.Value, kv.Value) { - t.Errorf("expected value %v, got %v", reqput.Value, kv.Value) - } -} - -// TestV3PutRestart checks if a put after an unrelated member restart succeeds -func TestV3PutRestart(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - kvIdx := rand.Intn(3) - kvc := integration.ToGRPC(clus.Client(kvIdx)).KV - - stopIdx := kvIdx - for stopIdx == kvIdx { - stopIdx = rand.Intn(3) - } - - clus.Client(stopIdx).Close() - clus.Members[stopIdx].Stop(t) - clus.Members[stopIdx].Restart(t) - c, cerr := integration.NewClientV3(clus.Members[stopIdx]) - if cerr != nil { - t.Fatalf("cannot create client: %v", cerr) - } - clus.Members[stopIdx].ServerClient = c - - ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) - defer cancel() - reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - _, err := kvc.Put(ctx, reqput) - if err != nil && err == ctx.Err() { - t.Fatalf("expected grpc error, got local ctx error (%v)", err) - } -} - -// TestV3CompactCurrentRev ensures keys are present when compacting on current revision. -func TestV3CompactCurrentRev(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - for i := 0; i < 3; i++ { - if _, err := kvc.Put(context.Background(), preq); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - } - // get key to add to proxy cache, if any - if _, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}); err != nil { - t.Fatal(err) - } - // compact on current revision - _, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 4}) - if err != nil { - t.Fatalf("couldn't compact kv space (%v)", err) - } - // key still exists when linearized? - _, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatalf("couldn't get key after compaction (%v)", err) - } - // key still exists when serialized? - _, err = kvc.Range(context.Background(), &pb.RangeRequest{Key: []byte("foo"), Serializable: true}) - if err != nil { - t.Fatalf("couldn't get serialized key after compaction (%v)", err) - } -} - -// TestV3HashKV ensures that multiple calls of HashKV on same node return same hash and compact rev. -func TestV3HashKV(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - mvc := integration.ToGRPC(clus.RandClient()).Maintenance - - for i := 0; i < 10; i++ { - resp, err := kvc.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte(fmt.Sprintf("bar%d", i))}) - if err != nil { - t.Fatal(err) - } - - rev := resp.Header.Revision - hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } - if rev != hresp.Header.Revision { - t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) - } - - prevHash := hresp.Hash - prevCompactRev := hresp.CompactRevision - for i := 0; i < 10; i++ { - hresp, err := mvc.HashKV(context.Background(), &pb.HashKVRequest{Revision: 0}) - if err != nil { - t.Fatal(err) - } - if rev != hresp.Header.Revision { - t.Fatalf("Put rev %v != HashKV rev %v", rev, hresp.Header.Revision) - } - - if prevHash != hresp.Hash { - t.Fatalf("prevHash %v != Hash %v", prevHash, hresp.Hash) - } - - if prevCompactRev != hresp.CompactRevision { - t.Fatalf("prevCompactRev %v != CompactRevision %v", prevHash, hresp.Hash) - } - - prevHash = hresp.Hash - prevCompactRev = hresp.CompactRevision - } - } -} - -func TestV3TxnTooManyOps(t *testing.T) { - integration.BeforeTest(t) - maxTxnOps := uint(128) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - // unique keys - i := new(int) - keyf := func() []byte { - *i++ - return []byte(fmt.Sprintf("key-%d", i)) - } - - addCompareOps := func(txn *pb.TxnRequest) { - txn.Compare = append(txn.Compare, - &pb.Compare{ - Result: pb.Compare_GREATER, - Target: pb.Compare_CREATE, - Key: keyf(), - }) - } - addSuccessOps := func(txn *pb.TxnRequest) { - txn.Success = append(txn.Success, - &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: keyf(), - Value: []byte("bar"), - }, - }, - }) - } - addFailureOps := func(txn *pb.TxnRequest) { - txn.Failure = append(txn.Failure, - &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: keyf(), - Value: []byte("bar"), - }, - }, - }) - } - addTxnOps := func(txn *pb.TxnRequest) { - newTxn := &pb.TxnRequest{} - addSuccessOps(newTxn) - txn.Success = append(txn.Success, - &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: newTxn, - }, - }, - ) - } - - tests := []func(txn *pb.TxnRequest){ - addCompareOps, - addSuccessOps, - addFailureOps, - addTxnOps, - } - - for i, tt := range tests { - txn := &pb.TxnRequest{} - for j := 0; j < int(maxTxnOps+1); j++ { - tt(txn) - } - - _, err := kvc.Txn(context.Background(), txn) - if !eqErrGRPC(err, rpctypes.ErrGRPCTooManyOps) { - t.Errorf("#%d: err = %v, want %v", i, err, rpctypes.ErrGRPCTooManyOps) - } - } -} - -func TestV3TxnDuplicateKeys(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - putreq := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")}}} - delKeyReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("abc"), - }, - }, - } - delInRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("a"), RangeEnd: []byte("b"), - }, - }, - } - delOutOfRangeReq := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &pb.DeleteRangeRequest{ - Key: []byte("abb"), RangeEnd: []byte("abc"), - }, - }, - } - txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}}, - }, - } - txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{ - Success: []*pb.RequestOp{delInRangeReq}, - Failure: []*pb.RequestOp{delInRangeReq}}, - }, - } - - txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}}, - }, - } - txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ - RequestTxn: &pb.TxnRequest{ - Success: []*pb.RequestOp{putreq}, - Failure: []*pb.RequestOp{putreq}}, - }, - } - - kvc := integration.ToGRPC(clus.RandClient()).KV - tests := []struct { - txnSuccess []*pb.RequestOp - - werr error - }{ - { - txnSuccess: []*pb.RequestOp{putreq, putreq}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - { - txnSuccess: []*pb.RequestOp{putreq, delKeyReq}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - { - txnSuccess: []*pb.RequestOp{putreq, delInRangeReq}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - // Then(Put(a), Then(Del(a))) - { - txnSuccess: []*pb.RequestOp{putreq, txnDelReq}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - // Then(Del(a), Then(Put(a))) - { - txnSuccess: []*pb.RequestOp{delInRangeReq, txnPutReq}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - // Then((Then(Put(a)), Else(Put(a))), (Then(Put(a)), Else(Put(a))) - { - txnSuccess: []*pb.RequestOp{txnPutReqTwoSide, txnPutReqTwoSide}, - - werr: rpctypes.ErrGRPCDuplicateKey, - }, - // Then(Del(x), (Then(Put(a)), Else(Put(a)))) - { - txnSuccess: []*pb.RequestOp{delOutOfRangeReq, txnPutReqTwoSide}, - - werr: nil, - }, - // Then(Then(Del(a)), (Then(Del(a)), Else(Del(a)))) - { - txnSuccess: []*pb.RequestOp{txnDelReq, txnDelReqTwoSide}, - - werr: nil, - }, - { - txnSuccess: []*pb.RequestOp{delKeyReq, delInRangeReq, delKeyReq, delInRangeReq}, - - werr: nil, - }, - { - txnSuccess: []*pb.RequestOp{putreq, delOutOfRangeReq}, - - werr: nil, - }, - } - for i, tt := range tests { - txn := &pb.TxnRequest{Success: tt.txnSuccess} - _, err := kvc.Txn(context.Background(), txn) - if !eqErrGRPC(err, tt.werr) { - t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) - } - } -} - -// TestV3TxnRevision tests that the transaction header revision is set as expected. -func TestV3TxnRevision(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - pr := &pb.PutRequest{Key: []byte("abc"), Value: []byte("def")} - presp, err := kvc.Put(context.TODO(), pr) - if err != nil { - t.Fatal(err) - } - - txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: &pb.RangeRequest{Key: []byte("abc")}}} - txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} - tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } - - // did not update revision - if presp.Header.Revision != tresp.Header.Revision { - t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision) - } - - txndr := &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte("def")}}} - txn = &pb.TxnRequest{Success: []*pb.RequestOp{txndr}} - tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } - - // did not update revision - if presp.Header.Revision != tresp.Header.Revision { - t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision) - } - - txnput := &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: &pb.PutRequest{Key: []byte("abc"), Value: []byte("123")}}} - txn = &pb.TxnRequest{Success: []*pb.RequestOp{txnput}} - tresp, err = kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } - - // updated revision - if tresp.Header.Revision != presp.Header.Revision+1 { - t.Fatalf("got rev %d, wanted rev %d", tresp.Header.Revision, presp.Header.Revision+1) - } -} - -// TestV3TxnCmpHeaderRev tests that the txn header revision is set as expected -// when compared to the Succeeded field in the txn response. -func TestV3TxnCmpHeaderRev(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - for i := 0; i < 10; i++ { - // Concurrently put a key with a txn comparing on it. - revc := make(chan int64, 1) - errCh := make(chan error, 1) - go func() { - defer close(revc) - pr := &pb.PutRequest{Key: []byte("k"), Value: []byte("v")} - presp, err := kvc.Put(context.TODO(), pr) - errCh <- err - if err != nil { - return - } - revc <- presp.Header.Revision - }() - - // The read-only txn uses the optimized readindex server path. - txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{ - RequestRange: &pb.RangeRequest{Key: []byte("k")}}} - txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} - // i = 0 /\ Succeeded => put followed txn - cmp := &pb.Compare{ - Result: pb.Compare_EQUAL, - Target: pb.Compare_VERSION, - Key: []byte("k"), - TargetUnion: &pb.Compare_Version{Version: int64(i)}, - } - txn.Compare = append(txn.Compare, cmp) - - tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } - - prev := <-revc - if err := <-errCh; err != nil { - t.Fatal(err) - } - // put followed txn; should eval to false - if prev > tresp.Header.Revision && !tresp.Succeeded { - t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp) - } - // txn follows put; should eval to true - if tresp.Header.Revision >= prev && tresp.Succeeded { - t.Errorf("#%d: got then but put rev %d preceded txn (%+v)", i, prev, tresp) - } - } -} - -// TestV3TxnRangeCompare tests range comparisons in txns -func TestV3TxnRangeCompare(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - // put keys, named by expected revision - for _, k := range []string{"/a/2", "/a/3", "/a/4", "/f/5"} { - if _, err := clus.Client(0).Put(context.TODO(), k, "x"); err != nil { - t.Fatal(err) - } - } - - tests := []struct { - cmp pb.Compare - - wSuccess bool - }{ - { - // >= /a/; all create revs fit - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte{0}, - Target: pb.Compare_CREATE, - Result: pb.Compare_LESS, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 6}, - }, - true, - }, - { - // >= /a/; one create rev doesn't fit - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte{0}, - Target: pb.Compare_CREATE, - Result: pb.Compare_LESS, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 5}, - }, - false, - }, - { - // prefix /a/*; all create revs fit - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte("/a0"), - Target: pb.Compare_CREATE, - Result: pb.Compare_LESS, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 5}, - }, - true, - }, - { - // prefix /a/*; one create rev doesn't fit - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte("/a0"), - Target: pb.Compare_CREATE, - Result: pb.Compare_LESS, - TargetUnion: &pb.Compare_CreateRevision{CreateRevision: 4}, - }, - false, - }, - { - // does not exist, does not succeed - pb.Compare{ - Key: []byte("/b/"), - RangeEnd: []byte("/b0"), - Target: pb.Compare_VALUE, - Result: pb.Compare_EQUAL, - TargetUnion: &pb.Compare_Value{Value: []byte("x")}, - }, - false, - }, - { - // all keys are leased - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte("/a0"), - Target: pb.Compare_LEASE, - Result: pb.Compare_GREATER, - TargetUnion: &pb.Compare_Lease{Lease: 0}, - }, - false, - }, - { - // no keys are leased - pb.Compare{ - Key: []byte("/a/"), - RangeEnd: []byte("/a0"), - Target: pb.Compare_LEASE, - Result: pb.Compare_EQUAL, - TargetUnion: &pb.Compare_Lease{Lease: 0}, - }, - true, - }, - } - - kvc := integration.ToGRPC(clus.Client(0)).KV - for i, tt := range tests { - txn := &pb.TxnRequest{} - txn.Compare = append(txn.Compare, &tt.cmp) - tresp, err := kvc.Txn(context.TODO(), txn) - if err != nil { - t.Fatal(err) - } - if tt.wSuccess != tresp.Succeeded { - t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded) - } - } -} - -// TestV3TxnNestedPath tests nested txns follow paths as expected. -func TestV3TxnNestedPath(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - cmpTrue := &pb.Compare{ - Result: pb.Compare_EQUAL, - Target: pb.Compare_VERSION, - Key: []byte("k"), - TargetUnion: &pb.Compare_Version{Version: int64(0)}, - } - cmpFalse := &pb.Compare{ - Result: pb.Compare_EQUAL, - Target: pb.Compare_VERSION, - Key: []byte("k"), - TargetUnion: &pb.Compare_Version{Version: int64(1)}, - } - - // generate random path to eval txns - topTxn := &pb.TxnRequest{} - txn := topTxn - txnPath := make([]bool, 10) - for i := range txnPath { - nextTxn := &pb.TxnRequest{} - op := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: nextTxn}} - txnPath[i] = rand.Intn(2) == 0 - if txnPath[i] { - txn.Compare = append(txn.Compare, cmpTrue) - txn.Success = append(txn.Success, op) - } else { - txn.Compare = append(txn.Compare, cmpFalse) - txn.Failure = append(txn.Failure, op) - } - txn = nextTxn - } - - tresp, err := kvc.Txn(context.TODO(), topTxn) - if err != nil { - t.Fatal(err) - } - - curTxnResp := tresp - for i := range txnPath { - if curTxnResp.Succeeded != txnPath[i] { - t.Fatalf("expected path %+v, got response %+v", txnPath, *tresp) - } - curTxnResp = curTxnResp.Responses[0].Response.(*pb.ResponseOp_ResponseTxn).ResponseTxn - } -} - -// TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair. -func TestV3PutIgnoreValue(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - key, val := []byte("foo"), []byte("bar") - putReq := pb.PutRequest{Key: key, Value: val} - - // create lease - lc := integration.ToGRPC(clus.RandClient()).Lease - lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - - tests := []struct { - putFunc func() error - putErr error - wleaseID int64 - }{ - { // put failure for non-existent key - func() error { - preq := putReq - preq.IgnoreValue = true - _, err := kvc.Put(context.TODO(), &preq) - return err - }, - rpctypes.ErrGRPCKeyNotFound, - 0, - }, - { // txn failure for non-existent key - func() error { - preq := putReq - preq.Value = nil - preq.IgnoreValue = true - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) - _, err := kvc.Txn(context.TODO(), txn) - return err - }, - rpctypes.ErrGRPCKeyNotFound, - 0, - }, - { // put success - func() error { - _, err := kvc.Put(context.TODO(), &putReq) - return err - }, - nil, - 0, - }, - { // txn success, attach lease - func() error { - preq := putReq - preq.Value = nil - preq.Lease = lresp.ID - preq.IgnoreValue = true - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) - _, err := kvc.Txn(context.TODO(), txn) - return err - }, - nil, - lresp.ID, - }, - { // non-empty value with ignore_value should error - func() error { - preq := putReq - preq.IgnoreValue = true - _, err := kvc.Put(context.TODO(), &preq) - return err - }, - rpctypes.ErrGRPCValueProvided, - 0, - }, - { // overwrite with previous value, ensure no prev-kv is returned and lease is detached - func() error { - preq := putReq - preq.Value = nil - preq.IgnoreValue = true - presp, err := kvc.Put(context.TODO(), &preq) - if err != nil { - return err - } - if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 { - return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv) - } - return nil - }, - nil, - 0, - }, - { // revoke lease, ensure detached key doesn't get deleted - func() error { - _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - return err - }, - nil, - 0, - }, - } - - for i, tt := range tests { - if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) { - t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err) - } - if tt.putErr != nil { - continue - } - rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key}) - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - if len(rr.Kvs) != 1 { - t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs)) - } - if !bytes.Equal(rr.Kvs[0].Value, val) { - t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value) - } - if rr.Kvs[0].Lease != tt.wleaseID { - t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease) - } - } -} - -// TestV3PutIgnoreLease ensures that writes with ignore_lease uses previous lease for the key overwrites. -func TestV3PutIgnoreLease(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - // create lease - lc := integration.ToGRPC(clus.RandClient()).Lease - lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - - key, val, val1 := []byte("zoo"), []byte("bar"), []byte("bar1") - putReq := pb.PutRequest{Key: key, Value: val} - - tests := []struct { - putFunc func() error - putErr error - wleaseID int64 - wvalue []byte - }{ - { // put failure for non-existent key - func() error { - preq := putReq - preq.IgnoreLease = true - _, err := kvc.Put(context.TODO(), &preq) - return err - }, - rpctypes.ErrGRPCKeyNotFound, - 0, - nil, - }, - { // txn failure for non-existent key - func() error { - preq := putReq - preq.IgnoreLease = true - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) - _, err := kvc.Txn(context.TODO(), txn) - return err - }, - rpctypes.ErrGRPCKeyNotFound, - 0, - nil, - }, - { // put success - func() error { - preq := putReq - preq.Lease = lresp.ID - _, err := kvc.Put(context.TODO(), &preq) - return err - }, - nil, - lresp.ID, - val, - }, - { // txn success, modify value using 'ignore_lease' and ensure lease is not detached - func() error { - preq := putReq - preq.Value = val1 - preq.IgnoreLease = true - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{RequestPut: &preq}}) - _, err := kvc.Txn(context.TODO(), txn) - return err - }, - nil, - lresp.ID, - val1, - }, - { // non-empty lease with ignore_lease should error - func() error { - preq := putReq - preq.Lease = lresp.ID - preq.IgnoreLease = true - _, err := kvc.Put(context.TODO(), &preq) - return err - }, - rpctypes.ErrGRPCLeaseProvided, - 0, - nil, - }, - { // overwrite with previous value, ensure no prev-kv is returned and lease is detached - func() error { - presp, err := kvc.Put(context.TODO(), &putReq) - if err != nil { - return err - } - if presp.PrevKv != nil && len(presp.PrevKv.Key) != 0 { - return fmt.Errorf("unexexpected previous key-value %v", presp.PrevKv) - } - return nil - }, - nil, - 0, - val, - }, - { // revoke lease, ensure detached key doesn't get deleted - func() error { - _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - return err - }, - nil, - 0, - val, - }, - } - - for i, tt := range tests { - if err := tt.putFunc(); !eqErrGRPC(err, tt.putErr) { - t.Fatalf("#%d: err expected %v, got %v", i, tt.putErr, err) - } - if tt.putErr != nil { - continue - } - rr, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: key}) - if err != nil { - t.Fatalf("#%d: %v", i, err) - } - if len(rr.Kvs) != 1 { - t.Fatalf("#%d: len(rr.KVs) expected 1, got %d", i, len(rr.Kvs)) - } - if !bytes.Equal(rr.Kvs[0].Value, tt.wvalue) { - t.Fatalf("#%d: value expected %q, got %q", i, val, rr.Kvs[0].Value) - } - if rr.Kvs[0].Lease != tt.wleaseID { - t.Fatalf("#%d: lease ID expected %d, got %d", i, tt.wleaseID, rr.Kvs[0].Lease) - } - } -} - -// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails. -func TestV3PutMissingLease(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - key := []byte("foo") - preq := &pb.PutRequest{Key: key, Lease: 123456} - tests := []func(){ - // put case - func() { - if presp, err := kvc.Put(context.TODO(), preq); err == nil { - t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp) - } - }, - // txn success case - func() { - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) - if tresp, err := kvc.Txn(context.TODO(), txn); err == nil { - t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp) - } - }, - // txn failure case - func() { - txn := &pb.TxnRequest{} - txn.Failure = append(txn.Failure, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) - cmp := &pb.Compare{ - Result: pb.Compare_GREATER, - Target: pb.Compare_CREATE, - Key: []byte("bar"), - } - txn.Compare = append(txn.Compare, cmp) - if tresp, err := kvc.Txn(context.TODO(), txn); err == nil { - t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp) - } - }, - // ignore bad lease in failure on success txn - func() { - txn := &pb.TxnRequest{} - rreq := &pb.RangeRequest{Key: []byte("bar")} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestRange{ - RequestRange: rreq}}) - txn.Failure = append(txn.Failure, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) - if tresp, err := kvc.Txn(context.TODO(), txn); err != nil { - t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp) - } - }, - } - - for i, f := range tests { - f() - // key shouldn't have been stored - rreq := &pb.RangeRequest{Key: key} - rresp, err := kvc.Range(context.TODO(), rreq) - if err != nil { - t.Errorf("#%d. could not rangereq (%v)", i, err) - } else if len(rresp.Kvs) != 0 { - t.Errorf("#%d. expected no keys, got %v", i, rresp) - } - } -} - -// TestV3DeleteRange tests various edge cases in the DeleteRange API. -func TestV3DeleteRange(t *testing.T) { - integration.BeforeTest(t) - tests := []struct { - name string - - keySet []string - begin string - end string - prevKV bool - - wantSet [][]byte - deleted int64 - }{ - { - "delete middle", - []string{"foo", "foo/abc", "fop"}, - "foo/", "fop", false, - [][]byte{[]byte("foo"), []byte("fop")}, 1, - }, - { - "no delete", - []string{"foo", "foo/abc", "fop"}, - "foo/", "foo/", false, - [][]byte{[]byte("foo"), []byte("foo/abc"), []byte("fop")}, 0, - }, - { - "delete first", - []string{"foo", "foo/abc", "fop"}, - "fo", "fop", false, - [][]byte{[]byte("fop")}, 2, - }, - { - "delete tail", - []string{"foo", "foo/abc", "fop"}, - "foo/", "fos", false, - [][]byte{[]byte("foo")}, 2, - }, - { - "delete exact", - []string{"foo", "foo/abc", "fop"}, - "foo/abc", "", false, - [][]byte{[]byte("foo"), []byte("fop")}, 1, - }, - { - "delete none [x,x)", - []string{"foo"}, - "foo", "foo", false, - [][]byte{[]byte("foo")}, 0, - }, - { - "delete middle with preserveKVs set", - []string{"foo", "foo/abc", "fop"}, - "foo/", "fop", true, - [][]byte{[]byte("foo"), []byte("fop")}, 1, - }, - } - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - kvc := integration.ToGRPC(clus.RandClient()).KV - defer clus.Terminate(t) - - ks := tt.keySet - for j := range ks { - reqput := &pb.PutRequest{Key: []byte(ks[j]), Value: []byte{}} - _, err := kvc.Put(context.TODO(), reqput) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - } - - dreq := &pb.DeleteRangeRequest{ - Key: []byte(tt.begin), - RangeEnd: []byte(tt.end), - PrevKv: tt.prevKV, - } - dresp, err := kvc.DeleteRange(context.TODO(), dreq) - if err != nil { - t.Fatalf("couldn't delete range on test %d (%v)", i, err) - } - if tt.deleted != dresp.Deleted { - t.Errorf("expected %d on test %v, got %d", tt.deleted, i, dresp.Deleted) - } - if tt.prevKV { - if len(dresp.PrevKvs) != int(dresp.Deleted) { - t.Errorf("preserve %d keys, want %d", len(dresp.PrevKvs), dresp.Deleted) - } - } - - rreq := &pb.RangeRequest{Key: []byte{0x0}, RangeEnd: []byte{0xff}} - rresp, err := kvc.Range(context.TODO(), rreq) - if err != nil { - t.Errorf("couldn't get range on test %v (%v)", i, err) - } - if dresp.Header.Revision != rresp.Header.Revision { - t.Errorf("expected revision %v, got %v", - dresp.Header.Revision, rresp.Header.Revision) - } - - var keys [][]byte - for j := range rresp.Kvs { - keys = append(keys, rresp.Kvs[j].Key) - } - if !reflect.DeepEqual(tt.wantSet, keys) { - t.Errorf("expected %v on test %v, got %v", tt.wantSet, i, keys) - } - }) - } -} - -// TestV3TxnInvalidRange tests that invalid ranges are rejected in txns. -func TestV3TxnInvalidRange(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - - for i := 0; i < 3; i++ { - _, err := kvc.Put(context.Background(), preq) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - } - - _, err := kvc.Compact(context.Background(), &pb.CompactionRequest{Revision: 2}) - if err != nil { - t.Fatalf("couldn't compact kv space (%v)", err) - } - - // future rev - txn := &pb.TxnRequest{} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: preq}}) - - rreq := &pb.RangeRequest{Key: []byte("foo"), Revision: 100} - txn.Success = append(txn.Success, &pb.RequestOp{ - Request: &pb.RequestOp_RequestRange{ - RequestRange: rreq}}) - - if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCFutureRev) { - t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCFutureRev) - } - - // compacted rev - tv, _ := txn.Success[1].Request.(*pb.RequestOp_RequestRange) - tv.RequestRange.Revision = 1 - if _, err := kvc.Txn(context.TODO(), txn); !eqErrGRPC(err, rpctypes.ErrGRPCCompacted) { - t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCCompacted) - } -} - -func TestV3TooLargeRequest(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - // 2MB request value - largeV := make([]byte, 2*1024*1024) - preq := &pb.PutRequest{Key: []byte("foo"), Value: largeV} - - _, err := kvc.Put(context.Background(), preq) - if !eqErrGRPC(err, rpctypes.ErrGRPCRequestTooLarge) { - t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCRequestTooLarge) - } -} - -// TestV3Hash tests hash. -func TestV3Hash(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - cli := clus.RandClient() - kvc := integration.ToGRPC(cli).KV - m := integration.ToGRPC(cli).Maintenance - - preq := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - - for i := 0; i < 3; i++ { - _, err := kvc.Put(context.Background(), preq) - if err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - } - - resp, err := m.Hash(context.Background(), &pb.HashRequest{}) - if err != nil || resp.Hash == 0 { - t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) - } -} - -// TestV3HashRestart ensures that hash stays the same after restart. -func TestV3HashRestart(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - cli := clus.RandClient() - resp, err := integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) - if err != nil || resp.Hash == 0 { - t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) - } - hash1 := resp.Hash - - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - kvc := integration.ToGRPC(clus.Client(0)).KV - waitForRestart(t, kvc) - - cli = clus.RandClient() - resp, err = integration.ToGRPC(cli).Maintenance.Hash(context.Background(), &pb.HashRequest{}) - if err != nil || resp.Hash == 0 { - t.Fatalf("couldn't hash (%v, hash %d)", err, resp.Hash) - } - hash2 := resp.Hash - - if hash1 != hash2 { - t.Fatalf("hash expected %d, got %d", hash1, hash2) - } -} - -// TestV3StorageQuotaAPI tests the V3 server respects quotas at the API layer -func TestV3StorageQuotaAPI(t *testing.T) { - integration.BeforeTest(t) - quotasize := int64(16 * os.Getpagesize()) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - - // Set a quota on one node - clus.Members[0].QuotaBackendBytes = quotasize - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - - defer clus.Terminate(t) - kvc := integration.ToGRPC(clus.Client(0)).KV - waitForRestart(t, kvc) - - key := []byte("abc") - - // test small put that fits in quota - smallbuf := make([]byte, 512) - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: smallbuf}); err != nil { - t.Fatal(err) - } - - // test big put - bigbuf := make([]byte, quotasize) - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: key, Value: bigbuf}) - if !eqErrGRPC(err, rpctypes.ErrGRPCNoSpace) { - t.Fatalf("big put got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) - } - - // test big txn - puttxn := &pb.RequestOp{ - Request: &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: key, - Value: bigbuf, - }, - }, - } - txnreq := &pb.TxnRequest{} - txnreq.Success = append(txnreq.Success, puttxn) - _, txnerr := kvc.Txn(context.TODO(), txnreq) - if !eqErrGRPC(txnerr, rpctypes.ErrGRPCNoSpace) { - t.Fatalf("big txn got %v, expected %v", err, rpctypes.ErrGRPCNoSpace) - } -} - -func TestV3RangeRequest(t *testing.T) { - integration.BeforeTest(t) - tests := []struct { - name string - - putKeys []string - reqs []pb.RangeRequest - - wresps [][]string - wmores []bool - wcounts []int64 - }{ - { - "single key", - []string{"foo", "bar"}, - []pb.RangeRequest{ - // exists - {Key: []byte("foo")}, - // doesn't exist - {Key: []byte("baz")}, - }, - - [][]string{ - {"foo"}, - {}, - }, - []bool{false, false}, - []int64{1, 0}, - }, - { - "multi-key", - []string{"a", "b", "c", "d", "e"}, - []pb.RangeRequest{ - // all in range - {Key: []byte("a"), RangeEnd: []byte("z")}, - // [b, d) - {Key: []byte("b"), RangeEnd: []byte("d")}, - // out of range - {Key: []byte("f"), RangeEnd: []byte("z")}, - // [c,c) = empty - {Key: []byte("c"), RangeEnd: []byte("c")}, - // [d, b) = empty - {Key: []byte("d"), RangeEnd: []byte("b")}, - // ["\0", "\0") => all in range - {Key: []byte{0}, RangeEnd: []byte{0}}, - }, - - [][]string{ - {"a", "b", "c", "d", "e"}, - {"b", "c"}, - {}, - {}, - {}, - {"a", "b", "c", "d", "e"}, - }, - []bool{false, false, false, false, false, false}, - []int64{5, 2, 0, 0, 0, 5}, - }, - { - "revision", - []string{"a", "b", "c", "d", "e"}, - []pb.RangeRequest{ - {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 0}, - {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 1}, - {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 2}, - {Key: []byte("a"), RangeEnd: []byte("z"), Revision: 3}, - }, - - [][]string{ - {"a", "b", "c", "d", "e"}, - {}, - {"a"}, - {"a", "b"}, - }, - []bool{false, false, false, false}, - []int64{5, 0, 1, 2}, - }, - { - "limit", - []string{"a", "b", "c"}, - []pb.RangeRequest{ - // more - {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 1}, - // half - {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 2}, - // no more - {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 3}, - // limit over - {Key: []byte("a"), RangeEnd: []byte("z"), Limit: 4}, - }, - - [][]string{ - {"a"}, - {"a", "b"}, - {"a", "b", "c"}, - {"a", "b", "c"}, - }, - []bool{true, true, false, false}, - []int64{3, 3, 3, 3}, - }, - { - "sort", - []string{"b", "a", "c", "d", "c"}, - []pb.RangeRequest{ - { - Key: []byte("a"), RangeEnd: []byte("z"), - Limit: 1, - SortOrder: pb.RangeRequest_ASCEND, - SortTarget: pb.RangeRequest_KEY, - }, - { - Key: []byte("a"), RangeEnd: []byte("z"), - Limit: 1, - SortOrder: pb.RangeRequest_DESCEND, - SortTarget: pb.RangeRequest_KEY, - }, - { - Key: []byte("a"), RangeEnd: []byte("z"), - Limit: 1, - SortOrder: pb.RangeRequest_ASCEND, - SortTarget: pb.RangeRequest_CREATE, - }, - { - Key: []byte("a"), RangeEnd: []byte("z"), - Limit: 1, - SortOrder: pb.RangeRequest_DESCEND, - SortTarget: pb.RangeRequest_MOD, - }, - { - Key: []byte("z"), RangeEnd: []byte("z"), - Limit: 1, - SortOrder: pb.RangeRequest_DESCEND, - SortTarget: pb.RangeRequest_CREATE, - }, - { // sort ASCEND by default - Key: []byte("a"), RangeEnd: []byte("z"), - Limit: 10, - SortOrder: pb.RangeRequest_NONE, - SortTarget: pb.RangeRequest_CREATE, - }, - }, - - [][]string{ - {"a"}, - {"d"}, - {"b"}, - {"c"}, - {}, - {"b", "a", "c", "d"}, - }, - []bool{true, true, true, true, false, false}, - []int64{4, 4, 4, 4, 0, 4}, - }, - { - "min/max mod rev", - []string{"rev2", "rev3", "rev4", "rev5", "rev6"}, - []pb.RangeRequest{ - { - Key: []byte{0}, RangeEnd: []byte{0}, - MinModRevision: 3, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MaxModRevision: 3, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MinModRevision: 3, - MaxModRevision: 5, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MaxModRevision: 10, - }, - }, - - [][]string{ - {"rev3", "rev4", "rev5", "rev6"}, - {"rev2", "rev3"}, - {"rev3", "rev4", "rev5"}, - {"rev2", "rev3", "rev4", "rev5", "rev6"}, - }, - []bool{false, false, false, false}, - []int64{5, 5, 5, 5}, - }, - { - "min/max create rev", - []string{"rev2", "rev3", "rev2", "rev2", "rev6", "rev3"}, - []pb.RangeRequest{ - { - Key: []byte{0}, RangeEnd: []byte{0}, - MinCreateRevision: 3, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MaxCreateRevision: 3, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MinCreateRevision: 3, - MaxCreateRevision: 5, - }, - { - Key: []byte{0}, RangeEnd: []byte{0}, - MaxCreateRevision: 10, - }, - }, - - [][]string{ - {"rev3", "rev6"}, - {"rev2", "rev3"}, - {"rev3"}, - {"rev2", "rev3", "rev6"}, - }, - []bool{false, false, false, false}, - []int64{3, 3, 3, 3}, - }, - } - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - for _, k := range tt.putKeys { - kvc := integration.ToGRPC(clus.RandClient()).KV - req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} - if _, err := kvc.Put(context.TODO(), req); err != nil { - t.Fatalf("#%d: couldn't put key (%v)", i, err) - } - } - - for j, req := range tt.reqs { - kvc := integration.ToGRPC(clus.RandClient()).KV - resp, err := kvc.Range(context.TODO(), &req) - if err != nil { - t.Errorf("#%d.%d: Range error: %v", i, j, err) - continue - } - if len(resp.Kvs) != len(tt.wresps[j]) { - t.Errorf("#%d.%d: bad len(resp.Kvs). got = %d, want = %d, ", i, j, len(resp.Kvs), len(tt.wresps[j])) - continue - } - for k, wKey := range tt.wresps[j] { - respKey := string(resp.Kvs[k].Key) - if respKey != wKey { - t.Errorf("#%d.%d: key[%d]. got = %v, want = %v, ", i, j, k, respKey, wKey) - } - } - if resp.More != tt.wmores[j] { - t.Errorf("#%d.%d: bad more. got = %v, want = %v, ", i, j, resp.More, tt.wmores[j]) - } - if resp.GetCount() != tt.wcounts[j] { - t.Errorf("#%d.%d: bad count. got = %v, want = %v, ", i, j, resp.GetCount(), tt.wcounts[j]) - } - wrev := int64(len(tt.putKeys) + 1) - if resp.Header.Revision != wrev { - t.Errorf("#%d.%d: bad header revision. got = %d. want = %d", i, j, resp.Header.Revision, wrev) - } - } - }) - } -} - -// TestTLSGRPCRejectInsecureClient checks that connection is rejected if server is TLS but not client. -func TestTLSGRPCRejectInsecureClient(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo}) - defer clus.Terminate(t) - - // nil out TLS field so client will use an insecure connection - clus.Members[0].ClientTLSInfo = nil - client, err := integration.NewClientV3(clus.Members[0]) - if err != nil && err != context.DeadlineExceeded { - t.Fatalf("unexpected error (%v)", err) - } else if client == nil { - // Ideally, no client would be returned. However, grpc will - // return a connection without trying to handshake first so - // the connection appears OK. - return - } - defer client.Close() - - donec := make(chan error, 1) - go func() { - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - _, perr := integration.ToGRPC(client).KV.Put(ctx, reqput) - cancel() - donec <- perr - }() - - if perr := <-donec; perr == nil { - t.Fatalf("expected client error on put") - } -} - -// TestTLSGRPCRejectSecureClient checks that connection is rejected if client is TLS but not server. -func TestTLSGRPCRejectSecureClient(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - clus.Members[0].ClientTLSInfo = &integration.TestTLSInfo - clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()} - clus.Members[0].GrpcURL = strings.Replace(clus.Members[0].GrpcURL, "http://", "https://", 1) - client, err := integration.NewClientV3(clus.Members[0]) - if client != nil || err == nil { - client.Close() - t.Fatalf("expected no client") - } else if err != context.DeadlineExceeded { - t.Fatalf("unexpected error (%v)", err) - } -} - -// TestTLSGRPCAcceptSecureAll checks that connection is accepted if both client and server are TLS -func TestTLSGRPCAcceptSecureAll(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, ClientTLS: &integration.TestTLSInfo}) - defer clus.Terminate(t) - - client, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatalf("expected tls client (%v)", err) - } - defer client.Close() - - reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := integration.ToGRPC(client).KV.Put(context.TODO(), reqput); err != nil { - t.Fatalf("unexpected error on put over tls (%v)", err) - } -} - -// TestTLSReloadAtomicReplace ensures server reloads expired/valid certs -// when all certs are atomically replaced by directory renaming. -// And expects server to reject client requests, and vice versa. -func TestTLSReloadAtomicReplace(t *testing.T) { - tmpDir := t.TempDir() - os.RemoveAll(tmpDir) - - certsDir := t.TempDir() - - certsDirExp := t.TempDir() - - cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) - if terr != nil { - t.Fatal(terr) - } - if _, err := copyTLSFiles(integration.TestTLSInfoExpired, certsDirExp); err != nil { - t.Fatal(err) - } - return tlsInfo - } - replaceFunc := func() { - if err := os.Rename(certsDir, tmpDir); err != nil { - t.Fatal(err) - } - if err := os.Rename(certsDirExp, certsDir); err != nil { - t.Fatal(err) - } - // after rename, - // 'certsDir' contains expired certs - // 'tmpDir' contains valid certs - // 'certsDirExp' does not exist - } - revertFunc := func() { - if err := os.Rename(tmpDir, certsDirExp); err != nil { - t.Fatal(err) - } - if err := os.Rename(certsDir, tmpDir); err != nil { - t.Fatal(err) - } - if err := os.Rename(certsDirExp, certsDir); err != nil { - t.Fatal(err) - } - } - testTLSReload(t, cloneFunc, replaceFunc, revertFunc, false) -} - -// TestTLSReloadCopy ensures server reloads expired/valid certs -// when new certs are copied over, one by one. And expects server -// to reject client requests, and vice versa. -func TestTLSReloadCopy(t *testing.T) { - certsDir := t.TempDir() - - cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(integration.TestTLSInfo, certsDir) - if terr != nil { - t.Fatal(terr) - } - return tlsInfo - } - replaceFunc := func() { - if _, err := copyTLSFiles(integration.TestTLSInfoExpired, certsDir); err != nil { - t.Fatal(err) - } - } - revertFunc := func() { - if _, err := copyTLSFiles(integration.TestTLSInfo, certsDir); err != nil { - t.Fatal(err) - } - } - testTLSReload(t, cloneFunc, replaceFunc, revertFunc, false) -} - -// TestTLSReloadCopyIPOnly ensures server reloads expired/valid certs -// when new certs are copied over, one by one. And expects server -// to reject client requests, and vice versa. -func TestTLSReloadCopyIPOnly(t *testing.T) { - certsDir := t.TempDir() - - cloneFunc := func() transport.TLSInfo { - tlsInfo, terr := copyTLSFiles(integration.TestTLSInfoIP, certsDir) - if terr != nil { - t.Fatal(terr) - } - return tlsInfo - } - replaceFunc := func() { - if _, err := copyTLSFiles(integration.TestTLSInfoExpiredIP, certsDir); err != nil { - t.Fatal(err) - } - } - revertFunc := func() { - if _, err := copyTLSFiles(integration.TestTLSInfoIP, certsDir); err != nil { - t.Fatal(err) - } - } - testTLSReload(t, cloneFunc, replaceFunc, revertFunc, true) -} - -func testTLSReload( - t *testing.T, - cloneFunc func() transport.TLSInfo, - replaceFunc func(), - revertFunc func(), - useIP bool) { - integration.BeforeTest(t) - - // 1. separate copies for TLS assets modification - tlsInfo := cloneFunc() - - // 2. start cluster with valid certs - clus := integration.NewCluster(t, &integration.ClusterConfig{ - Size: 1, - PeerTLS: &tlsInfo, - ClientTLS: &tlsInfo, - UseIP: useIP, - }) - defer clus.Terminate(t) - - // 3. concurrent client dialing while certs become expired - errc := make(chan error, 1) - go func() { - for { - cc, err := tlsInfo.ClientConfig() - if err != nil { - // errors in 'go/src/crypto/tls/tls.go' - // tls: private key does not match public key - // tls: failed to find any PEM data in key input - // tls: failed to find any PEM data in certificate input - // Or 'does not exist', 'not found', etc - t.Log(err) - continue - } - cli, cerr := integration.NewClient(t, clientv3.Config{ - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: time.Second, - TLS: cc, - }) - if cerr != nil { - errc <- cerr - return - } - cli.Close() - } - }() - - // 4. replace certs with expired ones - replaceFunc() - - // 5. expect dial time-out when loading expired certs - select { - case gerr := <-errc: - if gerr != context.DeadlineExceeded { - t.Fatalf("expected %v, got %v", context.DeadlineExceeded, gerr) - } - case <-time.After(5 * time.Second): - t.Fatal("failed to receive dial timeout error") - } - - // 6. replace expired certs back with valid ones - revertFunc() - - // 7. new requests should trigger listener to reload valid certs - tls, terr := tlsInfo.ClientConfig() - if terr != nil { - t.Fatal(terr) - } - cl, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: 5 * time.Second, - TLS: tls, - }) - if cerr != nil { - t.Fatalf("expected no error, got %v", cerr) - } - cl.Close() -} - -func TestGRPCRequireLeader(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - - client, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - defer client.Close() - - // wait for election timeout, then member[0] will not have a leader. - time.Sleep(time.Duration(3*integration.ElectionTicks) * config.TickDuration) - - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewOutgoingContext(context.Background(), md) - reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := integration.ToGRPC(client).KV.Put(ctx, reqput); rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { - t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) - } -} - -func TestGRPCStreamRequireLeader(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - client, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatalf("failed to create client (%v)", err) - } - defer client.Close() - - wAPI := integration.ToGRPC(client).Watch - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewOutgoingContext(context.Background(), md) - wStream, err := wAPI.Watch(ctx) - if err != nil { - t.Fatalf("wAPI.Watch error: %v", err) - } - - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - - // existing stream should be rejected - _, err = wStream.Recv() - if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { - t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) - } - - // new stream should also be rejected - wStream, err = wAPI.Watch(ctx) - if err != nil { - t.Fatalf("wAPI.Watch error: %v", err) - } - _, err = wStream.Recv() - if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { - t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) - } - - clus.Members[1].Restart(t) - clus.Members[2].Restart(t) - - clus.WaitMembersForLeader(t, clus.Members) - time.Sleep(time.Duration(2*integration.ElectionTicks) * config.TickDuration) - - // new stream should also be OK now after we restarted the other members - wStream, err = wAPI.Watch(ctx) - if err != nil { - t.Fatalf("wAPI.Watch error: %v", err) - } - wreq := &pb.WatchRequest{ - RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}, - }, - } - err = wStream.Send(wreq) - if err != nil { - t.Errorf("err = %v, want nil", err) - } -} - -// TestV3LargeRequests ensures that configurable MaxRequestBytes works as intended. -func TestV3LargeRequests(t *testing.T) { - integration.BeforeTest(t) - tests := []struct { - maxRequestBytes uint - valueSize int - expectError error - }{ - // don't set to 0. use 0 as the default. - {256, 1024, rpctypes.ErrGRPCRequestTooLarge}, - {10 * 1024 * 1024, 9 * 1024 * 1024, nil}, - {10 * 1024 * 1024, 10 * 1024 * 1024, rpctypes.ErrGRPCRequestTooLarge}, - {10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge}, - } - for i, test := range tests { - t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) - defer clus.Terminate(t) - kvcli := integration.ToGRPC(clus.Client(0)).KV - reqput := &pb.PutRequest{Key: []byte("foo"), Value: make([]byte, test.valueSize)} - _, err := kvcli.Put(context.TODO(), reqput) - if !eqErrGRPC(err, test.expectError) { - t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err) - } - - // request went through, expect large response back from server - if test.expectError == nil { - reqget := &pb.RangeRequest{Key: []byte("foo")} - // limit receive call size with original value + gRPC overhead bytes - _, err = kvcli.Range(context.TODO(), reqget, grpc.MaxCallRecvMsgSize(test.valueSize+512*1024)) - if err != nil { - t.Errorf("#%d: range expected no error, got %v", i, err) - } - } - - }) - } -} - -func eqErrGRPC(err1 error, err2 error) bool { - return !(err1 == nil && err2 != nil) || err1.Error() == err2.Error() -} - -// waitForRestart tries a range request until the client's server responds. -// This is mainly a stop-gap function until grpcproxy's KVClient adapter -// (and by extension, clientv3) supports grpc.CallOption pass-through so -// FailFast=false works with Put. -func waitForRestart(t *testing.T, kvc pb.KVClient) { - req := &pb.RangeRequest{Key: []byte("_"), Serializable: true} - // TODO: Remove retry loop once the new grpc load balancer provides retry. - var err error - for i := 0; i < 10; i++ { - if _, err = kvc.Range(context.TODO(), req, grpc.WaitForReady(true)); err != nil { - if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable { - time.Sleep(time.Millisecond * 250) - } else { - t.Fatal(err) - } - } - } - if err != nil { - t.Fatalf("timed out waiting for restart: %v", err) - } -} diff --git a/tests/integration/v3_kv_test.go b/tests/integration/v3_kv_test.go deleted file mode 100644 index 2cf8acf7ab9..00000000000 --- a/tests/integration/v3_kv_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "testing" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/namespace" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestKVWithEmptyValue ensures that a get/delete with an empty value, and with WithFromKey/WithPrefix function will return an empty error. -func TestKVWithEmptyValue(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - client := clus.RandClient() - - _, err := client.Put(context.Background(), "my-namespace/foobar", "data") - if err != nil { - t.Fatal(err) - } - _, err = client.Put(context.Background(), "my-namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } - _, err = client.Put(context.Background(), "namespace/foobar1", "data") - if err != nil { - t.Fatal(err) - } - - // Range over all keys. - resp, err := client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } - for _, kv := range resp.Kvs { - t.Log(string(kv.Key), "=", string(kv.Value)) - } - - // Range over all keys in a namespace. - client.KV = namespace.NewKV(client.KV, "my-namespace/") - resp, err = client.Get(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - t.Fatal(err) - } - for _, kv := range resp.Kvs { - t.Log(string(kv.Key), "=", string(kv.Value)) - } - - //Remove all keys without WithFromKey/WithPrefix func - _, err = client.Delete(context.Background(), "") - if err == nil { - // fatal error duo to without WithFromKey/WithPrefix func called. - t.Fatal(err) - } - - respDel, err := client.Delete(context.Background(), "", clientv3.WithFromKey()) - if err != nil { - // fatal error duo to with WithFromKey/WithPrefix func called. - t.Fatal(err) - } - t.Logf("delete keys:%d", respDel.Deleted) -} diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go deleted file mode 100644 index 7956205c5ea..00000000000 --- a/tests/integration/v3_leadership_test.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "golang.org/x/sync/errgroup" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) } -func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) } - -func testMoveLeader(t *testing.T, auto bool) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - oldLeadIdx := clus.WaitLeader(t) - oldLeadID := uint64(clus.Members[oldLeadIdx].Server.MemberId()) - - // ensure followers go through leader transition while leadership transfer - idc := make(chan uint64) - stopc := make(chan struct{}) - defer close(stopc) - - for i := range clus.Members { - if oldLeadIdx != i { - go func(m *integration.Member) { - select { - case idc <- integration.CheckLeaderTransition(m, oldLeadID): - case <-stopc: - } - }(clus.Members[i]) - } - } - - target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberId()) - if auto { - err := clus.Members[oldLeadIdx].Server.TransferLeadership() - if err != nil { - t.Fatal(err) - } - } else { - mvc := integration.ToGRPC(clus.Client(oldLeadIdx)).Maintenance - _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) - if err != nil { - t.Fatal(err) - } - } - - // wait until leader transitions have happened - var newLeadIDs [2]uint64 - for i := range newLeadIDs { - select { - case newLeadIDs[i] = <-idc: - case <-time.After(time.Second): - t.Fatal("timed out waiting for leader transition") - } - } - - // remaining members must agree on the same leader - if newLeadIDs[0] != newLeadIDs[1] { - t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1]) - } - - // new leader must be different than the old leader - if oldLeadID == newLeadIDs[0] { - t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0]) - } - - // if move-leader were used, new leader must match transferee - if !auto { - if newLeadIDs[0] != target { - t.Fatalf("expected new leader %d != target %d", newLeadIDs[0], target) - } - } -} - -// TestMoveLeaderError ensures that request to non-leader fail. -func TestMoveLeaderError(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - oldLeadIdx := clus.WaitLeader(t) - followerIdx := (oldLeadIdx + 1) % 3 - - target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.MemberId()) - - mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance - _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) - if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) { - t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader) - } -} - -// TestMoveLeaderToLearnerError ensures that leader transfer to learner member will fail. -func TestMoveLeaderToLearnerError(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true}) - defer clus.Terminate(t) - - // we have to add and launch learner member after initial cluster was created, because - // bootstrapping a cluster with learner member is not supported. - clus.AddAndLaunchLearnerMember(t) - - learners, err := clus.GetLearnerMembers() - if err != nil { - t.Fatalf("failed to get the learner members in Cluster: %v", err) - } - if len(learners) != 1 { - t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) - } - - learnerID := learners[0].ID - leaderIdx := clus.WaitLeader(t) - cli := clus.Client(leaderIdx) - _, err = cli.MoveLeader(context.Background(), learnerID) - if err == nil { - t.Fatalf("expecting leader transfer to learner to fail, got no error") - } - expectedErrKeywords := "bad leader transferee" - if !strings.Contains(err.Error(), expectedErrKeywords) { - t.Errorf("expecting error to contain %s, got %s", expectedErrKeywords, err.Error()) - } -} - -// TestTransferLeadershipWithLearner ensures TransferLeadership does not timeout due to learner is -// automatically picked by leader as transferee. -func TestTransferLeadershipWithLearner(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - clus.AddAndLaunchLearnerMember(t) - - learners, err := clus.GetLearnerMembers() - if err != nil { - t.Fatalf("failed to get the learner members in Cluster: %v", err) - } - if len(learners) != 1 { - t.Fatalf("added 1 learner to Cluster, got %d", len(learners)) - } - - leaderIdx := clus.WaitLeader(t) - errCh := make(chan error, 1) - go func() { - // note that this cluster has 1 leader and 1 learner. TransferLeadership should return nil. - // Leadership transfer is skipped in cluster with 1 voting member. - errCh <- clus.Members[leaderIdx].Server.TransferLeadership() - }() - select { - case err := <-errCh: - if err != nil { - t.Errorf("got error during leadership transfer: %v", err) - } - case <-time.After(5 * time.Second): - t.Error("timed out waiting for leader transition") - } -} - -func TestFirstCommitNotification(t *testing.T) { - integration.BeforeTest(t) - ctx := context.Background() - clusterSize := 3 - cluster := integration.NewCluster(t, &integration.ClusterConfig{Size: clusterSize}) - defer cluster.Terminate(t) - - oldLeaderIdx := cluster.WaitLeader(t) - oldLeaderClient := cluster.Client(oldLeaderIdx) - - newLeaderIdx := (oldLeaderIdx + 1) % clusterSize - newLeaderId := uint64(cluster.Members[newLeaderIdx].ID()) - - notifiers := make(map[int]<-chan struct{}, clusterSize) - for i, clusterMember := range cluster.Members { - notifiers[i] = clusterMember.Server.FirstCommitInTermNotify() - } - - _, err := oldLeaderClient.MoveLeader(context.Background(), newLeaderId) - - if err != nil { - t.Errorf("got error during leadership transfer: %v", err) - } - - t.Logf("Leadership transferred.") - t.Logf("Submitting write to make sure empty and 'foo' index entry was already flushed") - cli := cluster.RandClient() - - if _, err := cli.Put(ctx, "foo", "bar"); err != nil { - t.Fatalf("Failed to put kv pair.") - } - - // It's guaranteed now that leader contains the 'foo'->'bar' index entry. - leaderAppliedIndex := cluster.Members[newLeaderIdx].Server.AppliedIndex() - - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - group, groupContext := errgroup.WithContext(ctx) - - for i, notifier := range notifiers { - member, notifier := cluster.Members[i], notifier - group.Go(func() error { - return checkFirstCommitNotification(groupContext, t, member, leaderAppliedIndex, notifier) - }) - } - - err = group.Wait() - if err != nil { - t.Error(err) - } -} - -func checkFirstCommitNotification( - ctx context.Context, - t testing.TB, - member *integration.Member, - leaderAppliedIndex uint64, - notifier <-chan struct{}, -) error { - // wait until server applies all the changes of leader - for member.Server.AppliedIndex() < leaderAppliedIndex { - t.Logf("member.Server.AppliedIndex():%v <= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) - select { - case <-ctx.Done(): - return ctx.Err() - default: - time.Sleep(100 * time.Millisecond) - } - } - select { - case msg, ok := <-notifier: - if ok { - return fmt.Errorf( - "member with ID %d got message via notifier, msg: %v", - member.ID(), - msg, - ) - } - default: - t.Logf("member.Server.AppliedIndex():%v >= leaderAppliedIndex:%v", member.Server.AppliedIndex(), leaderAppliedIndex) - return fmt.Errorf( - "notification was not triggered, member ID: %d", - member.ID(), - ) - } - - return nil -} diff --git a/tests/integration/v3_lease_test.go b/tests/integration/v3_lease_test.go deleted file mode 100644 index 8518b17879e..00000000000 --- a/tests/integration/v3_lease_test.go +++ /dev/null @@ -1,1113 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "errors" - "fmt" - "math" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/testutil" - framecfg "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/integration" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// TestV3LeasePromote ensures the newly elected leader can promote itself -// to the primary lessor, refresh the leases and start to manage leases. -// TODO: use customized clock to make this test go faster? -func TestV3LeasePromote(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true}) - defer clus.Terminate(t) - - // create lease - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 3}) - ttl := time.Duration(lresp.TTL) * time.Second - afterGrant := time.Now() - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - - // wait until the lease is going to expire. - time.Sleep(time.Until(afterGrant.Add(ttl - time.Second))) - - // kill the current leader, all leases should be refreshed. - toStop := clus.WaitMembersForLeader(t, clus.Members) - beforeStop := time.Now() - clus.Members[toStop].Stop(t) - - var toWait []*integration.Member - for i, m := range clus.Members { - if i != toStop { - toWait = append(toWait, m) - } - } - clus.WaitMembersForLeader(t, toWait) - clus.Members[toStop].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - afterReelect := time.Now() - - // ensure lease is refreshed by waiting for a "long" time. - // it was going to expire anyway. - time.Sleep(time.Until(beforeStop.Add(ttl - time.Second))) - - if !leaseExist(t, clus, lresp.ID) { - t.Error("unexpected lease not exists") - } - - // wait until the renewed lease is expected to expire. - time.Sleep(time.Until(afterReelect.Add(ttl))) - - // wait for up to 10 seconds for lease to expire. - expiredCondition := func() (bool, error) { - return !leaseExist(t, clus, lresp.ID), nil - } - expired, err := testutil.Poll(100*time.Millisecond, 10*time.Second, expiredCondition) - if err != nil { - t.Error(err) - } - - if !expired { - t.Error("unexpected lease exists") - } -} - -// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked. -func TestV3LeaseRevoke(t *testing.T) { - integration.BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error { - lc := integration.ToGRPC(clus.RandClient()).Lease - _, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID}) - return err - }) -} - -// TestV3LeaseGrantByID ensures leases may be created by a given id. -func TestV3LeaseGrantByID(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // create fixed lease - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - context.TODO(), - &pb.LeaseGrantRequest{ID: 1, TTL: 1}) - if err != nil { - t.Errorf("could not create lease 1 (%v)", err) - } - if lresp.ID != 1 { - t.Errorf("got id %v, wanted id %v", lresp.ID, 1) - } - - // create duplicate fixed lease - _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - context.TODO(), - &pb.LeaseGrantRequest{ID: 1, TTL: 1}) - if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) { - t.Error(err) - } - - // create fresh fixed lease - lresp, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - context.TODO(), - &pb.LeaseGrantRequest{ID: 2, TTL: 1}) - if err != nil { - t.Errorf("could not create lease 2 (%v)", err) - } - if lresp.ID != 2 { - t.Errorf("got id %v, wanted id %v", lresp.ID, 2) - } -} - -// TestV3LeaseNegativeID ensures restarted member lessor can recover negative leaseID from backend. -// -// When the negative leaseID is used for lease revoke, all etcd nodes will remove the lease -// and delete associated keys to ensure kv store data consistency -// -// It ensures issue 12535 is fixed by PR 13676 -func TestV3LeaseNegativeID(t *testing.T) { - tcs := []struct { - leaseID int64 - k []byte - v []byte - }{ - { - leaseID: -1, // int64 -1 is 2^64 -1 in uint64 - k: []byte("foo"), - v: []byte("bar"), - }, - { - leaseID: math.MaxInt64, - k: []byte("bar"), - v: []byte("foo"), - }, - { - leaseID: math.MinInt64, - k: []byte("hello"), - v: []byte("world"), - }, - } - for _, tc := range tcs { - t.Run(fmt.Sprintf("test with lease ID %16x", tc.leaseID), func(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - cc := clus.RandClient() - lresp, err := integration.ToGRPC(cc).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{ID: tc.leaseID, TTL: 300}) - if err != nil { - t.Errorf("could not create lease %d (%v)", tc.leaseID, err) - } - if lresp.ID != tc.leaseID { - t.Errorf("got id %v, wanted id %v", lresp.ID, tc.leaseID) - } - putr := &pb.PutRequest{Key: tc.k, Value: tc.v, Lease: tc.leaseID} - _, err = integration.ToGRPC(cc).KV.Put(ctx, putr) - if err != nil { - t.Errorf("couldn't put key (%v)", err) - } - - // wait for backend Commit - time.Sleep(100 * time.Millisecond) - // restore lessor from db file - clus.Members[2].Stop(t) - if err := clus.Members[2].Restart(t); err != nil { - t.Fatal(err) - } - - // revoke lease should remove key - integration.WaitClientV3(t, clus.Members[2].Client) - _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: tc.leaseID}) - if err != nil { - t.Errorf("could not revoke lease %d (%v)", tc.leaseID, err) - } - var revision int64 - for _, m := range clus.Members { - getr := &pb.RangeRequest{Key: tc.k} - getresp, err := integration.ToGRPC(m.Client).KV.Range(ctx, getr) - if err != nil { - t.Fatal(err) - } - if revision == 0 { - revision = getresp.Header.Revision - } - if revision != getresp.Header.Revision { - t.Errorf("expect revision %d, but got %d", revision, getresp.Header.Revision) - } - if len(getresp.Kvs) != 0 { - t.Errorf("lease removed but key remains") - } - } - }) - } -} - -// TestV3LeaseExpire ensures a key is deleted once a key expires. -func TestV3LeaseExpire(t *testing.T) { - integration.BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error { - // let lease lapse; wait for deleted key - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - wStream, err := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if err != nil { - return err - } - - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: 1}}} - if err := wStream.Send(wreq); err != nil { - return err - } - if _, err := wStream.Recv(); err != nil { - // the 'created' message - return err - } - if _, err := wStream.Recv(); err != nil { - // the 'put' message - return err - } - - errc := make(chan error, 1) - go func() { - resp, err := wStream.Recv() - switch { - case err != nil: - errc <- err - case len(resp.Events) != 1: - fallthrough - case resp.Events[0].Type != mvccpb.DELETE: - errc <- fmt.Errorf("expected key delete, got %v", resp) - default: - errc <- nil - } - }() - - select { - case <-time.After(15 * time.Second): - return fmt.Errorf("lease expiration too slow") - case err := <-errc: - return err - } - }) -} - -// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive. -func TestV3LeaseKeepAlive(t *testing.T) { - integration.BeforeTest(t) - testLeaseRemoveLeasedKey(t, func(clus *integration.Cluster, leaseID int64) error { - lc := integration.ToGRPC(clus.RandClient()).Lease - lreq := &pb.LeaseKeepAliveRequest{ID: leaseID} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - return err - } - defer lac.CloseSend() - - // renew long enough so lease would've expired otherwise - for i := 0; i < 3; i++ { - if err = lac.Send(lreq); err != nil { - return err - } - lresp, rxerr := lac.Recv() - if rxerr != nil { - return rxerr - } - if lresp.ID != leaseID { - return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID) - } - time.Sleep(time.Duration(lresp.TTL/2) * time.Second) - } - _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID}) - return err - }) -} - -// TestV3LeaseCheckpoint ensures a lease checkpoint results in a remaining TTL being persisted -// across leader elections. -func TestV3LeaseCheckpoint(t *testing.T) { - tcs := []struct { - name string - checkpointingEnabled bool - ttl time.Duration - checkpointingInterval time.Duration - leaderChanges int - clusterSize int - expectTTLIsGT time.Duration - expectTTLIsLT time.Duration - }{ - { - name: "Checkpointing disabled, lease TTL is reset", - ttl: 300 * time.Second, - leaderChanges: 1, - clusterSize: 3, - expectTTLIsGT: 298 * time.Second, - }, - { - name: "Checkpointing enabled 10s, lease TTL is preserved after leader change", - ttl: 300 * time.Second, - checkpointingEnabled: true, - checkpointingInterval: 10 * time.Second, - leaderChanges: 1, - clusterSize: 3, - expectTTLIsLT: 290 * time.Second, - }, - { - name: "Checkpointing enabled 10s, lease TTL is preserved after cluster restart", - ttl: 300 * time.Second, - checkpointingEnabled: true, - checkpointingInterval: 10 * time.Second, - leaderChanges: 1, - clusterSize: 1, - expectTTLIsLT: 290 * time.Second, - }, - { - // Checking if checkpointing continues after the first leader change. - name: "Checkpointing enabled 10s, lease TTL is preserved after 2 leader changes", - ttl: 300 * time.Second, - checkpointingEnabled: true, - checkpointingInterval: 10 * time.Second, - leaderChanges: 2, - clusterSize: 3, - expectTTLIsLT: 280 * time.Second, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - integration.BeforeTest(t) - config := &integration.ClusterConfig{ - Size: tc.clusterSize, - EnableLeaseCheckpoint: tc.checkpointingEnabled, - LeaseCheckpointInterval: tc.checkpointingInterval, - } - clus := integration.NewCluster(t, config) - defer clus.Terminate(t) - - // create lease - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - c := integration.ToGRPC(clus.RandClient()) - lresp, err := c.Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: int64(tc.ttl.Seconds())}) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < tc.leaderChanges; i++ { - // wait for a checkpoint to occur - time.Sleep(tc.checkpointingInterval + 1*time.Second) - - // Force a leader election - leaderId := clus.WaitLeader(t) - leader := clus.Members[leaderId] - leader.Stop(t) - time.Sleep(time.Duration(3*integration.ElectionTicks) * framecfg.TickDuration) - leader.Restart(t) - } - - newLeaderId := clus.WaitLeader(t) - c2 := integration.ToGRPC(clus.Client(newLeaderId)) - - time.Sleep(250 * time.Millisecond) - - // Check the TTL of the new leader - var ttlresp *pb.LeaseTimeToLiveResponse - for i := 0; i < 10; i++ { - if ttlresp, err = c2.Lease.LeaseTimeToLive(ctx, &pb.LeaseTimeToLiveRequest{ID: lresp.ID}); err != nil { - if status, ok := status.FromError(err); ok && status.Code() == codes.Unavailable { - time.Sleep(time.Millisecond * 250) - } else { - t.Fatal(err) - } - } - } - - if tc.expectTTLIsGT != 0 && time.Duration(ttlresp.TTL)*time.Second < tc.expectTTLIsGT { - t.Errorf("Expected lease ttl (%v) to be >= than (%v)", time.Duration(ttlresp.TTL)*time.Second, tc.expectTTLIsGT) - } - - if tc.expectTTLIsLT != 0 && time.Duration(ttlresp.TTL)*time.Second > tc.expectTTLIsLT { - t.Errorf("Expected lease ttl (%v) to be lower than (%v)", time.Duration(ttlresp.TTL)*time.Second, tc.expectTTLIsLT) - } - }) - } -} - -// TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster. -func TestV3LeaseExists(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - // create lease - ctx0, cancel0 := context.WithCancel(context.Background()) - defer cancel0() - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - ctx0, - &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - - if !leaseExist(t, clus, lresp.ID) { - t.Error("unexpected lease not exists") - } -} - -// TestV3LeaseLeases creates leases and confirms list RPC fetches created ones. -func TestV3LeaseLeases(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx0, cancel0 := context.WithCancel(context.Background()) - defer cancel0() - - // create leases - var ids []int64 - for i := 0; i < 5; i++ { - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - ctx0, - &pb.LeaseGrantRequest{TTL: 30}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - ids = append(ids, lresp.ID) - } - - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseLeases( - context.Background(), - &pb.LeaseLeasesRequest{}) - if err != nil { - t.Fatal(err) - } - for i := range lresp.Leases { - if lresp.Leases[i].ID != ids[i] { - t.Fatalf("#%d: lease ID expected %d, got %d", i, ids[i], lresp.Leases[i].ID) - } - } -} - -// TestV3LeaseRenewStress keeps creating lease and renewing it immediately to ensure the renewal goes through. -// it was oberserved that the immediate lease renewal after granting a lease from follower resulted lease not found. -// related issue https://github.com/etcd-io/etcd/issues/6978 -func TestV3LeaseRenewStress(t *testing.T) { - testLeaseStress(t, stressLeaseRenew, false) -} - -// TestV3LeaseRenewStressWithClusterClient is similar to TestV3LeaseRenewStress, -// but it uses a cluster client instead of a specific member's client. -// The related issue is https://github.com/etcd-io/etcd/issues/13675. -func TestV3LeaseRenewStressWithClusterClient(t *testing.T) { - testLeaseStress(t, stressLeaseRenew, true) -} - -// TestV3LeaseTimeToLiveStress keeps creating lease and retrieving it immediately to ensure the lease can be retrieved. -// it was oberserved that the immediate lease retrieval after granting a lease from follower resulted lease not found. -// related issue https://github.com/etcd-io/etcd/issues/6978 -func TestV3LeaseTimeToLiveStress(t *testing.T) { - testLeaseStress(t, stressLeaseTimeToLive, false) -} - -// TestV3LeaseTimeToLiveStressWithClusterClient is similar to TestV3LeaseTimeToLiveStress, -// but it uses a cluster client instead of a specific member's client. -// The related issue is https://github.com/etcd-io/etcd/issues/13675. -func TestV3LeaseTimeToLiveStressWithClusterClient(t *testing.T) { - testLeaseStress(t, stressLeaseTimeToLive, true) -} - -func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error, useClusterClient bool) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - errc := make(chan error) - - if useClusterClient { - clusterClient, err := clus.ClusterClient(t) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 300; i++ { - go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clusterClient).Lease) }(i) - } - } else { - for i := 0; i < 100; i++ { - for j := 0; j < 3; j++ { - go func(i int) { errc <- stresser(ctx, integration.ToGRPC(clus.Client(i)).Lease) }(j) - } - } - } - - for i := 0; i < 300; i++ { - if err := <-errc; err != nil { - t.Fatal(err) - } - } -} - -func stressLeaseRenew(tctx context.Context, lc pb.LeaseClient) (reterr error) { - defer func() { - if tctx.Err() != nil { - reterr = nil - } - }() - lac, err := lc.LeaseKeepAlive(tctx) - if err != nil { - return err - } - for tctx.Err() == nil { - resp, gerr := lc.LeaseGrant(tctx, &pb.LeaseGrantRequest{TTL: 60}) - if gerr != nil { - continue - } - err = lac.Send(&pb.LeaseKeepAliveRequest{ID: resp.ID}) - if err != nil { - continue - } - rresp, rxerr := lac.Recv() - if rxerr != nil { - continue - } - if rresp.TTL == 0 { - return errors.New("TTL shouldn't be 0 so soon") - } - } - return nil -} - -func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr error) { - defer func() { - if tctx.Err() != nil { - reterr = nil - } - }() - for tctx.Err() == nil { - resp, gerr := lc.LeaseGrant(tctx, &pb.LeaseGrantRequest{TTL: 60}) - if gerr != nil { - continue - } - _, kerr := lc.LeaseTimeToLive(tctx, &pb.LeaseTimeToLiveRequest{ID: resp.ID}) - if rpctypes.Error(kerr) == rpctypes.ErrLeaseNotFound { - return kerr - } - } - return nil -} - -func TestV3PutOnNonExistLease(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - badLeaseID := int64(0x12345678) - putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID} - _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, putr) - if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) { - t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound) - } -} - -// TestV3GetNonExistLease ensures client retrieving nonexistent lease on a follower doesn't result node panic -// related issue https://github.com/etcd-io/etcd/issues/6537 -func TestV3GetNonExistLease(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - lc := integration.ToGRPC(clus.RandClient()).Lease - lresp, err := lc.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 10}) - if err != nil { - t.Errorf("failed to create lease %v", err) - } - _, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } - - leaseTTLr := &pb.LeaseTimeToLiveRequest{ - ID: lresp.ID, - Keys: true, - } - - for _, m := range clus.Members { - // quorum-read to ensure revoke completes before TimeToLive - if _, err := integration.ToGRPC(m.Client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { - t.Fatal(err) - } - resp, err := integration.ToGRPC(m.Client).Lease.LeaseTimeToLive(ctx, leaseTTLr) - if err != nil { - t.Fatalf("expected non nil error, but go %v", err) - } - if resp.TTL != -1 { - t.Fatalf("expected TTL to be -1, but got %v", resp.TTL) - } - } -} - -// TestV3LeaseSwitch tests a key can be switched from one lease to another. -func TestV3LeaseSwitch(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - key := "foo" - - // create lease - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - lresp1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } - lresp2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } - - // attach key on lease1 then switch it to lease2 - put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID} - _, err := integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put1) - if err != nil { - t.Fatal(err) - } - put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID} - _, err = integration.ToGRPC(clus.RandClient()).KV.Put(ctx, put2) - if err != nil { - t.Fatal(err) - } - - // revoke lease1 should not remove key - _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID}) - if err != nil { - t.Fatal(err) - } - rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 1 { - t.Fatalf("unexpect removal of key") - } - - // revoke lease2 should remove key - _, err = integration.ToGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID}) - if err != nil { - t.Fatal(err) - } - rresp, err = integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 0 { - t.Fatalf("lease removed but key remains") - } -} - -// TestV3LeaseFailover ensures the old leader drops lease keepalive requests within -// election timeout after it loses its quorum. And the new leader extends the TTL of -// the lease to at least TTL + election timeout. -func TestV3LeaseFailover(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - toIsolate := clus.WaitMembersForLeader(t, clus.Members) - - lc := integration.ToGRPC(clus.Client(toIsolate)).Lease - - // create lease - lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - - // isolate the current leader with its followers. - clus.Members[toIsolate].Pause() - - lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID} - - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewOutgoingContext(context.Background(), md) - ctx, cancel := context.WithCancel(mctx) - defer cancel() - lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } - - // send keep alive to old leader until the old leader starts - // to drop lease request. - var expectedExp time.Time - for { - if err = lac.Send(lreq); err != nil { - break - } - lkresp, rxerr := lac.Recv() - if rxerr != nil { - break - } - expectedExp = time.Now().Add(time.Duration(lkresp.TTL) * time.Second) - time.Sleep(time.Duration(lkresp.TTL/2) * time.Second) - } - - clus.Members[toIsolate].Resume() - clus.WaitMembersForLeader(t, clus.Members) - - // lease should not expire at the last received expire deadline. - time.Sleep(time.Until(expectedExp) - 500*time.Millisecond) - - if !leaseExist(t, clus, lresp.ID) { - t.Error("unexpected lease not exists") - } -} - -// TestV3LeaseRequireLeader ensures that a Recv will get a leader -// loss error if there is no leader. -func TestV3LeaseRequireLeader(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - lc := integration.ToGRPC(clus.Client(0)).Lease - clus.Members[1].Stop(t) - clus.Members[2].Stop(t) - - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewOutgoingContext(context.Background(), md) - ctx, cancel := context.WithCancel(mctx) - defer cancel() - lac, err := lc.LeaseKeepAlive(ctx) - if err != nil { - t.Fatal(err) - } - - donec := make(chan struct{}) - go func() { - defer close(donec) - resp, err := lac.Recv() - if err == nil { - t.Errorf("got response %+v, expected error", resp) - } - if rpctypes.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { - t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) - } - }() - select { - case <-time.After(5 * time.Second): - t.Fatal("did not receive leader loss error (in 5-sec)") - case <-donec: - } -} - -const fiveMinTTL int64 = 300 - -// TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key. -func TestV3LeaseRecoverAndRevoke(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.Client(0)).KV - lsc := integration.ToGRPC(clus.Client(0)).Lease - - lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } - - // restart server and ensure lease still exists - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - - // overwrite old client with newly dialed connection - // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } - kvc = integration.ToGRPC(nc).KV - lsc = integration.ToGRPC(nc).Lease - defer nc.Close() - - // revoke should delete the key - _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } - rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 0 { - t.Fatalf("lease removed but key remains") - } -} - -// TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart. -func TestV3LeaseRevokeAndRecover(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.Client(0)).KV - lsc := integration.ToGRPC(clus.Client(0)).Lease - - lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } - - // revoke should delete the key - _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } - - // restart server and ensure revoked key doesn't exist - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - - // overwrite old client with newly dialed connection - // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } - kvc = integration.ToGRPC(nc).KV - defer nc.Close() - - rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 0 { - t.Fatalf("lease removed but key remains") - } -} - -// TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart -// does not delete the key. -func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.Client(0)).KV - lsc := integration.ToGRPC(clus.Client(0)).Lease - - lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } - - // overwrite lease with none - _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - t.Fatal(err) - } - - // restart server and ensure lease still exists - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - - // overwrite old client with newly dialed connection - // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } - kvc = integration.ToGRPC(nc).KV - lsc = integration.ToGRPC(nc).Lease - defer nc.Close() - - // revoke the detached lease - _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID}) - if err != nil { - t.Fatal(err) - } - rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 1 { - t.Fatalf("only detached lease removed, key should remain") - } -} - -func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.Client(0)).KV - lsc := integration.ToGRPC(clus.Client(0)).Lease - - var leaseIDs []int64 - for i := 0; i < 2; i++ { - lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL}) - if err != nil { - t.Fatal(err) - } - if lresp.Error != "" { - t.Fatal(lresp.Error) - } - leaseIDs = append(leaseIDs, lresp.ID) - - _, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID}) - if err != nil { - t.Fatal(err) - } - } - - // restart server and ensure lease still exists - clus.Members[0].Stop(t) - clus.Members[0].Restart(t) - clus.WaitMembersForLeader(t, clus.Members) - for i, leaseID := range leaseIDs { - if !leaseExist(t, clus, leaseID) { - t.Errorf("#%d: unexpected lease not exists", i) - } - } - - // overwrite old client with newly dialed connection - // otherwise, error with "grpc: RPC failed fast due to transport failure" - nc, err := integration.NewClientV3(clus.Members[0]) - if err != nil { - t.Fatal(err) - } - kvc = integration.ToGRPC(nc).KV - lsc = integration.ToGRPC(nc).Lease - defer nc.Close() - - // revoke the old lease - _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[0]}) - if err != nil { - t.Fatal(err) - } - // key should still exist - rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 1 { - t.Fatalf("only detached lease removed, key should remain") - } - - // revoke the latest lease - _, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[1]}) - if err != nil { - t.Fatal(err) - } - rresp, err = kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")}) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 0 { - t.Fatalf("lease removed but key remains") - } -} - -// acquireLeaseAndKey creates a new lease and creates an attached key. -func acquireLeaseAndKey(clus *integration.Cluster, key string) (int64, error) { - // create lease - lresp, err := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant( - context.TODO(), - &pb.LeaseGrantRequest{TTL: 1}) - if err != nil { - return 0, err - } - if lresp.Error != "" { - return 0, fmt.Errorf(lresp.Error) - } - // attach to key - put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID} - if _, err := integration.ToGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil { - return 0, err - } - return lresp.ID, nil -} - -// testLeaseRemoveLeasedKey performs some action while holding a lease with an -// attached key "foo", then confirms the key is gone. -func testLeaseRemoveLeasedKey(t *testing.T, act func(*integration.Cluster, int64) error) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - leaseID, err := acquireLeaseAndKey(clus, "foo") - if err != nil { - t.Fatal(err) - } - - if err = act(clus, leaseID); err != nil { - t.Fatal(err) - } - - // confirm no key - rreq := &pb.RangeRequest{Key: []byte("foo")} - rresp, err := integration.ToGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq) - if err != nil { - t.Fatal(err) - } - if len(rresp.Kvs) != 0 { - t.Fatalf("lease removed but key remains") - } -} - -func leaseExist(t *testing.T, clus *integration.Cluster, leaseID int64) bool { - l := integration.ToGRPC(clus.RandClient()).Lease - - _, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5}) - if err == nil { - _, err = l.LeaseRevoke(context.Background(), &pb.LeaseRevokeRequest{ID: leaseID}) - if err != nil { - t.Fatalf("failed to check lease %v", err) - } - return false - } - - if eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) { - return true - } - t.Fatalf("unexpecter error %v", err) - - return true -} diff --git a/tests/integration/v3_stm_test.go b/tests/integration/v3_stm_test.go deleted file mode 100644 index 94ec37da6a9..00000000000 --- a/tests/integration/v3_stm_test.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "math/rand" - "strconv" - "testing" - - "go.etcd.io/etcd/client/pkg/v3/testutil" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestSTMConflict tests that conflicts are retried. -func TestSTMConflict(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - etcdc := clus.RandClient() - keys := make([]string, 5) - for i := 0; i < len(keys); i++ { - keys[i] = fmt.Sprintf("foo-%d", i) - if _, err := etcdc.Put(context.TODO(), keys[i], "100"); err != nil { - t.Fatalf("could not make key (%v)", err) - } - } - - errc := make(chan error) - for i := range keys { - curEtcdc := clus.RandClient() - srcKey := keys[i] - applyf := func(stm concurrency.STM) error { - src := stm.Get(srcKey) - // must be different key to avoid double-adding - dstKey := srcKey - for dstKey == srcKey { - dstKey = keys[rand.Intn(len(keys))] - } - dst := stm.Get(dstKey) - srcV, _ := strconv.ParseInt(src, 10, 64) - dstV, _ := strconv.ParseInt(dst, 10, 64) - if srcV == 0 { - // can't rand.Intn on 0, so skip this transaction - return nil - } - xfer := int64(rand.Intn(int(srcV)) / 2) - stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer)) - stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer)) - return nil - } - go func() { - iso := concurrency.WithIsolation(concurrency.RepeatableReads) - _, err := concurrency.NewSTM(curEtcdc, applyf, iso) - errc <- err - }() - } - - // wait for txns - for range keys { - if err := <-errc; err != nil { - t.Fatalf("apply failed (%v)", err) - } - } - - // ensure sum matches initial sum - sum := 0 - for _, oldkey := range keys { - rk, err := etcdc.Get(context.TODO(), oldkey) - if err != nil { - t.Fatalf("couldn't fetch key %s (%v)", oldkey, err) - } - v, _ := strconv.ParseInt(string(rk.Kvs[0].Value), 10, 64) - sum += int(v) - } - if sum != len(keys)*100 { - t.Fatalf("bad sum. got %d, expected %d", sum, len(keys)*100) - } -} - -// TestSTMPutNewKey confirms a STM put on a new key is visible after commit. -func TestSTMPutNewKey(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - etcdc := clus.RandClient() - applyf := func(stm concurrency.STM) error { - stm.Put("foo", "bar") - return nil - } - - iso := concurrency.WithIsolation(concurrency.RepeatableReads) - if _, err := concurrency.NewSTM(etcdc, applyf, iso); err != nil { - t.Fatalf("error on stm txn (%v)", err) - } - - resp, err := etcdc.Get(context.TODO(), "foo") - if err != nil { - t.Fatalf("error fetching key (%v)", err) - } - if string(resp.Kvs[0].Value) != "bar" { - t.Fatalf("bad value. got %+v, expected 'bar' value", resp) - } -} - -// TestSTMAbort tests that an aborted txn does not modify any keys. -func TestSTMAbort(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - etcdc := clus.RandClient() - ctx, cancel := context.WithCancel(context.TODO()) - applyf := func(stm concurrency.STM) error { - stm.Put("foo", "baz") - cancel() - stm.Put("foo", "bap") - return nil - } - - iso := concurrency.WithIsolation(concurrency.RepeatableReads) - sctx := concurrency.WithAbortContext(ctx) - if _, err := concurrency.NewSTM(etcdc, applyf, iso, sctx); err == nil { - t.Fatalf("no error on stm txn") - } - - resp, err := etcdc.Get(context.TODO(), "foo") - if err != nil { - t.Fatalf("error fetching key (%v)", err) - } - if len(resp.Kvs) != 0 { - t.Fatalf("bad value. got %+v, expected nothing", resp) - } -} - -// TestSTMSerialize tests that serialization is honored when serializable. -func TestSTMSerialize(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - etcdc := clus.RandClient() - - // set up initial keys - keys := make([]string, 5) - for i := 0; i < len(keys); i++ { - keys[i] = fmt.Sprintf("foo-%d", i) - } - - // update keys in full batches - updatec := make(chan struct{}) - go func() { - defer close(updatec) - for i := 0; i < 5; i++ { - s := fmt.Sprintf("%d", i) - var ops []v3.Op - for _, k := range keys { - ops = append(ops, v3.OpPut(k, s)) - } - if _, err := etcdc.Txn(context.TODO()).Then(ops...).Commit(); err != nil { - t.Errorf("couldn't put keys (%v)", err) - } - updatec <- struct{}{} - } - }() - - // read all keys in txn, make sure all values match - errc := make(chan error) - for range updatec { - curEtcdc := clus.RandClient() - applyf := func(stm concurrency.STM) error { - var vs []string - for i := range keys { - vs = append(vs, stm.Get(keys[i])) - } - for i := range vs { - if vs[0] != vs[i] { - return fmt.Errorf("got vs[%d] = %v, want %v", i, vs[i], vs[0]) - } - } - return nil - } - go func() { - iso := concurrency.WithIsolation(concurrency.Serializable) - _, err := concurrency.NewSTM(curEtcdc, applyf, iso) - errc <- err - }() - } - - for i := 0; i < 5; i++ { - if err := <-errc; err != nil { - t.Error(err) - } - } -} - -// TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion -// fails the first GET revision comparison within STM; trigger retry. -func TestSTMApplyOnConcurrentDeletion(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - etcdc := clus.RandClient() - if _, err := etcdc.Put(context.TODO(), "foo", "bar"); err != nil { - t.Fatal(err) - } - donec, readyc := make(chan struct{}), make(chan struct{}) - go func() { - <-readyc - if _, err := etcdc.Delete(context.TODO(), "foo"); err != nil { - t.Error(err) - } - close(donec) - }() - - try := 0 - applyf := func(stm concurrency.STM) error { - try++ - stm.Get("foo") - if try == 1 { - // trigger delete to make GET rev comparison outdated - close(readyc) - <-donec - } - stm.Put("foo2", "bar2") - return nil - } - - iso := concurrency.WithIsolation(concurrency.RepeatableReads) - if _, err := concurrency.NewSTM(etcdc, applyf, iso); err != nil { - t.Fatalf("error on stm txn (%v)", err) - } - if try != 2 { - t.Fatalf("STM apply expected to run twice, got %d", try) - } - - resp, err := etcdc.Get(context.TODO(), "foo2") - if err != nil { - t.Fatalf("error fetching key (%v)", err) - } - if string(resp.Kvs[0].Value) != "bar2" { - t.Fatalf("bad value. got %+v, expected 'bar2' value", resp) - } -} - -func TestSTMSerializableSnapshotPut(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - cli := clus.Client(0) - // key with lower create/mod revision than keys being updated - _, err := cli.Put(context.TODO(), "a", "0") - testutil.AssertNil(t, err) - - tries := 0 - applyf := func(stm concurrency.STM) error { - if tries > 2 { - return fmt.Errorf("too many retries") - } - tries++ - stm.Get("a") - stm.Put("b", "1") - return nil - } - - iso := concurrency.WithIsolation(concurrency.SerializableSnapshot) - _, err = concurrency.NewSTM(cli, applyf, iso) - testutil.AssertNil(t, err) - _, err = concurrency.NewSTM(cli, applyf, iso) - testutil.AssertNil(t, err) - - resp, err := cli.Get(context.TODO(), "b") - testutil.AssertNil(t, err) - if resp.Kvs[0].Version != 2 { - t.Fatalf("bad version. got %+v, expected version 2", resp) - } -} diff --git a/tests/integration/v3_tls_test.go b/tests/integration/v3_tls_test.go deleted file mode 100644 index ec7bcbf3fd1..00000000000 --- a/tests/integration/v3_tls_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "crypto/tls" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -func TestTLSClientCipherSuitesValid(t *testing.T) { testTLSCipherSuites(t, true) } -func TestTLSClientCipherSuitesMismatch(t *testing.T) { testTLSCipherSuites(t, false) } - -// testTLSCipherSuites ensures mismatching client-side cipher suite -// fail TLS handshake with the server. -func testTLSCipherSuites(t *testing.T, valid bool) { - integration.BeforeTest(t) - - cipherSuites := []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - } - srvTLS, cliTLS := integration.TestTLSInfo, integration.TestTLSInfo - if valid { - srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites, cipherSuites - } else { - srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:] - } - - // go1.13 enables TLS 1.3 by default - // and in TLS 1.3, cipher suites are not configurable, - // so setting Max TLS version to TLS 1.2 to test cipher config. - srvTLS.MaxVersion = tls.VersionTLS12 - cliTLS.MaxVersion = tls.VersionTLS12 - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS}) - defer clus.Terminate(t) - - cc, err := cliTLS.ClientConfig() - if err != nil { - t.Fatal(err) - } - cli, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - TLS: cc, - }) - if cli != nil { - cli.Close() - } - if !valid && cerr != context.DeadlineExceeded { - t.Fatalf("expected %v with TLS handshake failure, got %v", context.DeadlineExceeded, cerr) - } - if valid && cerr != nil { - t.Fatalf("expected TLS handshake success, got %v", cerr) - } -} - -func TestTLSMinMaxVersion(t *testing.T) { - integration.BeforeTest(t) - - tests := []struct { - name string - minVersion uint16 - maxVersion uint16 - expectError bool - }{ - { - name: "Connect with default TLS version should succeed", - minVersion: 0, - maxVersion: 0, - }, - { - name: "Connect with TLS 1.2 only should fail", - minVersion: tls.VersionTLS12, - maxVersion: tls.VersionTLS12, - expectError: true, - }, - { - name: "Connect with TLS 1.2 and 1.3 should succeed", - minVersion: tls.VersionTLS12, - maxVersion: tls.VersionTLS13, - }, - { - name: "Connect with TLS 1.3 only should succeed", - minVersion: tls.VersionTLS13, - maxVersion: tls.VersionTLS13, - }, - } - - // Configure server to support TLS 1.3 only. - srvTLS := integration.TestTLSInfo - srvTLS.MinVersion = tls.VersionTLS13 - srvTLS.MaxVersion = tls.VersionTLS13 - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, ClientTLS: &srvTLS}) - defer clus.Terminate(t) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cc, err := integration.TestTLSInfo.ClientConfig() - assert.NoError(t, err) - - cc.MinVersion = tt.minVersion - cc.MaxVersion = tt.maxVersion - cli, cerr := integration.NewClient(t, clientv3.Config{ - Endpoints: []string{clus.Members[0].GRPCURL()}, - DialTimeout: time.Second, - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - TLS: cc, - }) - if cerr != nil { - assert.True(t, tt.expectError, "got TLS handshake error while expecting success: %v", cerr) - assert.Equal(t, context.DeadlineExceeded, cerr, "expected %v with TLS handshake failure, got %v", context.DeadlineExceeded, cerr) - return - } - - cli.Close() - }) - } -} diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go deleted file mode 100644 index bdebeacfc5c..00000000000 --- a/tests/integration/v3_watch_restore_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/tests/v3/framework/config" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// MustFetchNotEmptyMetric attempts to fetch given 'metric' from 'member', -// waiting for not-empty value or 'timeout'. -func MustFetchNotEmptyMetric(tb testing.TB, member *integration.Member, metric string, timeout <-chan time.Time) string { - metricValue := "" - tick := time.Tick(config.TickDuration) - for metricValue == "" { - tb.Logf("Waiting for metric: %v", metric) - select { - case <-timeout: - tb.Fatalf("Failed to fetch metric %v", metric) - return "" - case <-tick: - var err error - metricValue, err = member.Metric(metric) - if err != nil { - tb.Fatal(err) - } - } - } - return metricValue -} - -// TestV3WatchRestoreSnapshotUnsync tests whether slow follower can restore -// from leader snapshot, and still notify on watchers from an old revision -// that were created in synced watcher group in the first place. -// TODO: fix panic with gRPC proxy "panic: watcher current revision should not exceed current revision" -func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{ - Size: 3, - SnapshotCount: 10, - SnapshotCatchUpEntries: 5, - }) - defer clus.Terminate(t) - - // spawn a watcher before shutdown, and put it in synced watcher - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, errW := integration.ToGRPC(clus.Client(0)).Watch.Watch(ctx) - if errW != nil { - t.Fatal(errW) - } - if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 5}}}); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - wresp, errR := wStream.Recv() - if errR != nil { - t.Errorf("wStream.Recv error: %v", errR) - } - if !wresp.Created { - t.Errorf("wresp.Created got = %v, want = true", wresp.Created) - } - - clus.Members[0].InjectPartition(t, clus.Members[1:]...) - initialLead := clus.WaitMembersForLeader(t, clus.Members[1:]) - t.Logf("elected lead: %v", clus.Members[initialLead].Server.MemberId()) - t.Logf("sleeping for 2 seconds") - time.Sleep(2 * time.Second) - t.Logf("sleeping for 2 seconds DONE") - - kvc := integration.ToGRPC(clus.Client(1)).KV - - // to trigger snapshot from the leader to the stopped follower - for i := 0; i < 15; i++ { - _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) - if err != nil { - t.Errorf("#%d: couldn't put key (%v)", i, err) - } - } - - // trigger snapshot send from leader to this slow follower - // which then calls watchable store Restore - clus.Members[0].RecoverPartition(t, clus.Members[1:]...) - // We don't expect leadership change here, just recompute the leader'Server index - // within clus.Members list. - lead := clus.WaitLeader(t) - - // Sending is scheduled on fifo 'sched' within EtcdServer::run, - // so it can start delayed after recovery. - send := MustFetchNotEmptyMetric(t, clus.Members[lead], - "etcd_network_snapshot_send_inflights_total", - time.After(5*time.Second)) - - if send != "0" && send != "1" { - // 0 if already sent, 1 if sending - t.Fatalf("inflight snapshot snapshot_send_inflights_total expected 0 or 1, got %q", send) - } - - receives := MustFetchNotEmptyMetric(t, clus.Members[(lead+1)%3], - "etcd_network_snapshot_receive_inflights_total", - time.After(5*time.Second)) - if receives != "0" && receives != "1" { - // 0 if already received, 1 if receiving - t.Fatalf("inflight snapshot receives expected 0 or 1, got %q", receives) - } - - t.Logf("sleeping for 2 seconds") - time.Sleep(2 * time.Second) - t.Logf("sleeping for 2 seconds DONE") - - // slow follower now applies leader snapshot - // should be able to notify on old-revision watchers in unsynced - // make sure restore watch operation correctly moves watchers - // between synced and unsynced watchers - errc := make(chan error, 1) - go func() { - cresp, cerr := wStream.Recv() - if cerr != nil { - errc <- cerr - return - } - // from start revision 5 to latest revision 16 - if len(cresp.Events) != 12 { - errc <- fmt.Errorf("expected 12 events, got %+v", cresp.Events) - return - } - errc <- nil - }() - select { - case <-time.After(10 * time.Second): - t.Fatal("took too long to receive events from restored watcher") - case err := <-errc: - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - } -} diff --git a/tests/integration/v3_watch_test.go b/tests/integration/v3_watch_test.go deleted file mode 100644 index 8e0a29ae799..00000000000 --- a/tests/integration/v3_watch_test.go +++ /dev/null @@ -1,1293 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "bytes" - "context" - "fmt" - "reflect" - "sort" - "sync" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3WatchFromCurrentRevision tests Watch APIs from current revision. -func TestV3WatchFromCurrentRevision(t *testing.T) { - integration.BeforeTest(t) - tests := []struct { - name string - - putKeys []string - watchRequest *pb.WatchRequest - - wresps []*pb.WatchResponse - }{ - { - "watch the key, matching", - []string{"foo"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}}, - - []*pb.WatchResponse{ - { - Header: &pb.ResponseHeader{Revision: 2}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - }, - }, - }, - }, - { - "watch the key, non-matching", - []string{"foo"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("helloworld")}}}, - - []*pb.WatchResponse{}, - }, - { - "watch the prefix, matching", - []string{"fooLong"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), - RangeEnd: []byte("fop")}}}, - - []*pb.WatchResponse{ - { - Header: &pb.ResponseHeader{Revision: 2}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - }, - }, - }, - }, - { - "watch the prefix, non-matching", - []string{"foo"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("helloworld"), - RangeEnd: []byte("helloworle")}}}, - - []*pb.WatchResponse{}, - }, - { - "watch full range, matching", - []string{"fooLong"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte(""), - RangeEnd: []byte("\x00")}}}, - - []*pb.WatchResponse{ - { - Header: &pb.ResponseHeader{Revision: 2}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("fooLong"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - }, - }, - }, - }, - { - "multiple puts, one watcher with matching key", - []string{"foo", "foo", "foo"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}}, - - []*pb.WatchResponse{ - { - Header: &pb.ResponseHeader{Revision: 2}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - }, - }, - { - Header: &pb.ResponseHeader{Revision: 3}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2}, - }, - }, - }, - { - Header: &pb.ResponseHeader{Revision: 4}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3}, - }, - }, - }, - }, - }, - { - "multiple puts, one watcher with matching perfix", - []string{"foo", "foo", "foo"}, - &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), - RangeEnd: []byte("fop")}}}, - - []*pb.WatchResponse{ - { - Header: &pb.ResponseHeader{Revision: 2}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - }, - }, - { - Header: &pb.ResponseHeader{Revision: 3}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2}, - }, - }, - }, - { - Header: &pb.ResponseHeader{Revision: 4}, - Created: false, - Events: []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 3}, - }, - }, - }, - }, - }, - } - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - wAPI := integration.ToGRPC(clus.RandClient()).Watch - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, err := wAPI.Watch(ctx) - if err != nil { - t.Fatalf("#%d: wAPI.Watch error: %v", i, err) - } - - err = wStream.Send(tt.watchRequest) - if err != nil { - t.Fatalf("#%d: wStream.Send error: %v", i, err) - } - - // ensure watcher request created a new watcher - cresp, err := wStream.Recv() - if err != nil { - t.Fatalf("#%d: wStream.Recv error: %v", i, err) - } - if !cresp.Created { - t.Fatalf("#%d: did not create watchid, got %+v", i, cresp) - } - if cresp.Canceled { - t.Fatalf("#%d: canceled watcher on create %+v", i, cresp) - } - - createdWatchId := cresp.WatchId - if cresp.Header == nil || cresp.Header.Revision != 1 { - t.Fatalf("#%d: header revision got +%v, wanted revison 1", i, cresp) - } - - // asynchronously create keys - ch := make(chan struct{}, 1) - go func() { - for _, k := range tt.putKeys { - kvc := integration.ToGRPC(clus.RandClient()).KV - req := &pb.PutRequest{Key: []byte(k), Value: []byte("bar")} - if _, err := kvc.Put(context.TODO(), req); err != nil { - t.Errorf("#%d: couldn't put key (%v)", i, err) - } - } - ch <- struct{}{} - }() - - // check stream results - for j, wresp := range tt.wresps { - resp, err := wStream.Recv() - if err != nil { - t.Errorf("#%d.%d: wStream.Recv error: %v", i, j, err) - } - - if resp.Header == nil { - t.Fatalf("#%d.%d: unexpected nil resp.Header", i, j) - } - if resp.Header.Revision != wresp.Header.Revision { - t.Errorf("#%d.%d: resp.Header.Revision got = %d, want = %d", i, j, resp.Header.Revision, wresp.Header.Revision) - } - - if wresp.Created != resp.Created { - t.Errorf("#%d.%d: resp.Created got = %v, want = %v", i, j, resp.Created, wresp.Created) - } - if resp.WatchId != createdWatchId { - t.Errorf("#%d.%d: resp.WatchId got = %d, want = %d", i, j, resp.WatchId, createdWatchId) - } - - if !reflect.DeepEqual(resp.Events, wresp.Events) { - t.Errorf("#%d.%d: resp.Events got = %+v, want = %+v", i, j, resp.Events, wresp.Events) - } - } - - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } - - // wait for the client to finish sending the keys before terminating the cluster - <-ch - }) - } -} - -// TestV3WatchFutureRevision tests Watch APIs from a future revision. -func TestV3WatchFutureRevision(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - wAPI := integration.ToGRPC(clus.RandClient()).Watch - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, err := wAPI.Watch(ctx) - if err != nil { - t.Fatalf("wAPI.Watch error: %v", err) - } - - wkey := []byte("foo") - wrev := int64(10) - req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: wkey, StartRevision: wrev}}} - err = wStream.Send(req) - if err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - - // ensure watcher request created a new watcher - cresp, err := wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - if !cresp.Created { - t.Fatalf("create %v, want %v", cresp.Created, true) - } - - kvc := integration.ToGRPC(clus.RandClient()).KV - for { - req := &pb.PutRequest{Key: wkey, Value: []byte("bar")} - resp, rerr := kvc.Put(context.TODO(), req) - if rerr != nil { - t.Fatalf("couldn't put key (%v)", rerr) - } - if resp.Header.Revision == wrev { - break - } - } - - // ensure watcher request created a new watcher - cresp, err = wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - if cresp.Header.Revision != wrev { - t.Fatalf("revision = %d, want %d", cresp.Header.Revision, wrev) - } - if len(cresp.Events) != 1 { - t.Fatalf("failed to receive events") - } - if cresp.Events[0].Kv.ModRevision != wrev { - t.Errorf("mod revision = %d, want %d", cresp.Events[0].Kv.ModRevision, wrev) - } -} - -// TestV3WatchWrongRange tests wrong range does not create watchers. -func TestV3WatchWrongRange(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - wAPI := integration.ToGRPC(clus.RandClient()).Watch - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, err := wAPI.Watch(ctx) - if err != nil { - t.Fatalf("wAPI.Watch error: %v", err) - } - - tests := []struct { - key []byte - end []byte - canceled bool - }{ - {[]byte("a"), []byte("a"), true}, // wrong range end - {[]byte("b"), []byte("a"), true}, // wrong range end - {[]byte("foo"), []byte{0}, false}, // watch request with 'WithFromKey' - } - for i, tt := range tests { - if err := wStream.Send(&pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: tt.key, RangeEnd: tt.end, StartRevision: 1}}}); err != nil { - t.Fatalf("#%d: wStream.Send error: %v", i, err) - } - cresp, err := wStream.Recv() - if err != nil { - t.Fatalf("#%d: wStream.Recv error: %v", i, err) - } - if !cresp.Created { - t.Fatalf("#%d: create %v, want %v", i, cresp.Created, true) - } - if cresp.Canceled != tt.canceled { - t.Fatalf("#%d: canceled %v, want %v", i, tt.canceled, cresp.Canceled) - } - if tt.canceled && cresp.WatchId != clientv3.InvalidWatchID { - t.Fatalf("#%d: canceled watch ID %d, want %d", i, cresp.WatchId, clientv3.InvalidWatchID) - } - } -} - -// TestV3WatchCancelSynced tests Watch APIs cancellation from synced map. -func TestV3WatchCancelSynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchCancel(t, 0) -} - -// TestV3WatchCancelUnsynced tests Watch APIs cancellation from unsynced map. -func TestV3WatchCancelUnsynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchCancel(t, 1) -} - -func testV3WatchCancel(t *testing.T, startRev int64) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if errW != nil { - t.Fatalf("wAPI.Watch error: %v", errW) - } - - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - - wresp, errR := wStream.Recv() - if errR != nil { - t.Errorf("wStream.Recv error: %v", errR) - } - if !wresp.Created { - t.Errorf("wresp.Created got = %v, want = true", wresp.Created) - } - - creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: wresp.WatchId}}} - if err := wStream.Send(creq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - - cresp, err := wStream.Recv() - if err != nil { - t.Errorf("wStream.Recv error: %v", err) - } - if !cresp.Canceled { - t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled) - } - - kvc := integration.ToGRPC(clus.RandClient()).KV - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Errorf("couldn't put key (%v)", err) - } - - // watch got canceled, so this should block - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } -} - -// TestV3WatchCurrentPutOverlap ensures current watchers receive all events with -// overlapping puts. -func TestV3WatchCurrentPutOverlap(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if wErr != nil { - t.Fatalf("wAPI.Watch error: %v", wErr) - } - - // last mod_revision that will be observed - nrRevisions := 32 - // first revision already allocated as empty revision - var wg sync.WaitGroup - for i := 1; i < nrRevisions; i++ { - wg.Add(1) - go func() { - defer wg.Done() - kvc := integration.ToGRPC(clus.RandClient()).KV - req := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} - if _, err := kvc.Put(context.TODO(), req); err != nil { - t.Errorf("couldn't put key (%v)", err) - } - }() - } - - // maps watcher to current expected revision - progress := make(map[int64]int64) - - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), RangeEnd: []byte("fop")}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("first watch request failed (%v)", err) - } - - more := true - progress[-1] = 0 // watcher creation pending - for more { - resp, err := wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - - if resp.Created { - // accept events > header revision - progress[resp.WatchId] = resp.Header.Revision + 1 - if resp.Header.Revision == int64(nrRevisions) { - // covered all revisions; create no more watchers - progress[-1] = int64(nrRevisions) + 1 - } else if err := wStream.Send(wreq); err != nil { - t.Fatalf("watch request failed (%v)", err) - } - } else if len(resp.Events) == 0 { - t.Fatalf("got events %v, want non-empty", resp.Events) - } else { - wRev, ok := progress[resp.WatchId] - if !ok { - t.Fatalf("got %+v, but watch id shouldn't exist ", resp) - } - if resp.Events[0].Kv.ModRevision != wRev { - t.Fatalf("got %+v, wanted first revision %d", resp, wRev) - } - lastRev := resp.Events[len(resp.Events)-1].Kv.ModRevision - progress[resp.WatchId] = lastRev + 1 - } - more = false - for _, v := range progress { - if v <= int64(nrRevisions) { - more = true - break - } - } - } - - if rok, nr := waitResponse(wStream, time.Second); !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } - - wg.Wait() -} - -// TestV3WatchEmptyKey ensures synced watchers see empty key PUTs as PUT events -func TestV3WatchEmptyKey(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if werr != nil { - t.Fatal(werr) - } - req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo")}}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } - - // put a key with empty value - kvc := integration.ToGRPC(clus.RandClient()).KV - preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } - - // check received PUT - resp, rerr := ws.Recv() - if rerr != nil { - t.Fatal(rerr) - } - wevs := []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - } - if !reflect.DeepEqual(resp.Events, wevs) { - t.Fatalf("got %v, expected %v", resp.Events, wevs) - } -} - -func TestV3WatchMultipleWatchersSynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleWatchers(t, 0) -} - -func TestV3WatchMultipleWatchersUnsynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleWatchers(t, 1) -} - -// testV3WatchMultipleWatchers tests multiple watchers on the same key -// and one watcher with matching prefix. It first puts the key -// that matches all watchers, and another key that matches only -// one watcher to test if it receives expected events. -func testV3WatchMultipleWatchers(t *testing.T, startRev int64) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, errW := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if errW != nil { - t.Fatalf("wAPI.Watch error: %v", errW) - } - - watchKeyN := 4 - for i := 0; i < watchKeyN+1; i++ { - var wreq *pb.WatchRequest - if i < watchKeyN { - wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} - } else { - wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("fo"), RangeEnd: []byte("fp"), StartRevision: startRev}}} - } - if err := wStream.Send(wreq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - } - - ids := make(map[int64]struct{}) - for i := 0; i < watchKeyN+1; i++ { - wresp, err := wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - if !wresp.Created { - t.Fatalf("wresp.Created got = %v, want = true", wresp.Created) - } - ids[wresp.WatchId] = struct{}{} - } - - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - for i := 0; i < watchKeyN+1; i++ { - wresp, err := wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - if _, ok := ids[wresp.WatchId]; !ok { - t.Errorf("watchId %d is not created!", wresp.WatchId) - } else { - delete(ids, wresp.WatchId) - } - if len(wresp.Events) == 0 { - t.Errorf("#%d: no events received", i) - } - for _, ev := range wresp.Events { - if string(ev.Kv.Key) != "foo" { - t.Errorf("ev.Kv.Key got = %s, want = foo", ev.Kv.Key) - } - if string(ev.Kv.Value) != "bar" { - t.Errorf("ev.Kv.Value got = %s, want = bar", ev.Kv.Value) - } - } - } - - // now put one key that has only one matching watcher - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("fo"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - wresp, err := wStream.Recv() - if err != nil { - t.Errorf("wStream.Recv error: %v", err) - } - if len(wresp.Events) != 1 { - t.Fatalf("len(wresp.Events) got = %d, want = 1", len(wresp.Events)) - } - if string(wresp.Events[0].Kv.Key) != "fo" { - t.Errorf("wresp.Events[0].Kv.Key got = %s, want = fo", wresp.Events[0].Kv.Key) - } - - // now Recv should block because there is no more events coming - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } -} - -func TestV3WatchMultipleEventsTxnSynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleEventsTxn(t, 0) -} - -func TestV3WatchMultipleEventsTxnUnsynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleEventsTxn(t, 1) -} - -// testV3WatchMultipleEventsTxn tests Watch APIs when it receives multiple events. -func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if wErr != nil { - t.Fatalf("wAPI.Watch error: %v", wErr) - } - - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: startRev}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - if resp, err := wStream.Recv(); err != nil || !resp.Created { - t.Fatalf("create response failed: resp=%v, err=%v", resp, err) - } - - kvc := integration.ToGRPC(clus.RandClient()).KV - txn := pb.TxnRequest{} - for i := 0; i < 3; i++ { - ru := &pb.RequestOp{} - ru.Request = &pb.RequestOp_RequestPut{ - RequestPut: &pb.PutRequest{ - Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte("bar")}} - txn.Success = append(txn.Success, ru) - } - - tresp, err := kvc.Txn(context.Background(), &txn) - if err != nil { - t.Fatalf("kvc.Txn error: %v", err) - } - if !tresp.Succeeded { - t.Fatalf("kvc.Txn failed: %+v", tresp) - } - - var events []*mvccpb.Event - for len(events) < 3 { - resp, err := wStream.Recv() - if err != nil { - t.Errorf("wStream.Recv error: %v", err) - } - events = append(events, resp.Events...) - } - sort.Sort(eventsSortByKey(events)) - - wevents := []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo2"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - } - - if !reflect.DeepEqual(events, wevents) { - t.Errorf("events got = %+v, want = %+v", events, wevents) - } - - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } -} - -type eventsSortByKey []*mvccpb.Event - -func (evs eventsSortByKey) Len() int { return len(evs) } -func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] } -func (evs eventsSortByKey) Less(i, j int) bool { - return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 -} - -func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - kvc := integration.ToGRPC(clus.RandClient()).KV - - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if wErr != nil { - t.Fatalf("wAPI.Watch error: %v", wErr) - } - - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), RangeEnd: []byte("fop"), StartRevision: 1}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo0"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo1"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - allWevents := []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 3, Version: 1}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo0"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 4, Version: 2}, - }, - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo1"), Value: []byte("bar"), CreateRevision: 3, ModRevision: 5, Version: 2}, - }, - } - - var events []*mvccpb.Event - for len(events) < 4 { - resp, err := wStream.Recv() - if err != nil { - t.Errorf("wStream.Recv error: %v", err) - } - if resp.Created { - continue - } - events = append(events, resp.Events...) - // if PUT requests are committed by now, first receive would return - // multiple events, but if not, it returns a single event. In SSD, - // it should return 4 events at once. - } - - if !reflect.DeepEqual(events, allWevents) { - t.Errorf("events got = %+v, want = %+v", events, allWevents) - } - - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } -} - -func TestV3WatchMultipleStreamsSynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleStreams(t, 0) -} - -func TestV3WatchMultipleStreamsUnsynced(t *testing.T) { - integration.BeforeTest(t) - testV3WatchMultipleStreams(t, 1) -} - -// testV3WatchMultipleStreams tests multiple watchers on the same key on multiple streams. -func testV3WatchMultipleStreams(t *testing.T, startRev int64) { - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - wAPI := integration.ToGRPC(clus.RandClient()).Watch - kvc := integration.ToGRPC(clus.RandClient()).KV - - streams := make([]pb.Watch_WatchClient, 5) - for i := range streams { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, errW := wAPI.Watch(ctx) - if errW != nil { - t.Fatalf("wAPI.Watch error: %v", errW) - } - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), StartRevision: startRev}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("wStream.Send error: %v", err) - } - streams[i] = wStream - } - - for _, wStream := range streams { - wresp, err := wStream.Recv() - if err != nil { - t.Fatalf("wStream.Recv error: %v", err) - } - if !wresp.Created { - t.Fatalf("wresp.Created got = %v, want = true", wresp.Created) - } - } - - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil { - t.Fatalf("couldn't put key (%v)", err) - } - - var wg sync.WaitGroup - wg.Add(len(streams)) - wevents := []*mvccpb.Event{ - { - Type: mvccpb.PUT, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}, - }, - } - for i := range streams { - go func(i int) { - defer wg.Done() - wStream := streams[i] - wresp, err := wStream.Recv() - if err != nil { - t.Errorf("wStream.Recv error: %v", err) - } - if wresp.WatchId != 0 { - t.Errorf("watchId got = %d, want = 0", wresp.WatchId) - } - if !reflect.DeepEqual(wresp.Events, wevents) { - t.Errorf("wresp.Events got = %+v, want = %+v", wresp.Events, wevents) - } - // now Recv should block because there is no more events coming - rok, nr := waitResponse(wStream, 1*time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", nr) - } - }(i) - } - wg.Wait() -} - -// waitResponse waits on the given stream for given duration. -// If there is no more events, true and a nil response will be -// returned closing the WatchClient stream. Or the response will -// be returned. -func waitResponse(wc pb.Watch_WatchClient, timeout time.Duration) (bool, *pb.WatchResponse) { - rCh := make(chan *pb.WatchResponse, 1) - donec := make(chan struct{}) - defer close(donec) - go func() { - resp, _ := wc.Recv() - select { - case rCh <- resp: - case <-donec: - } - }() - select { - case nr := <-rCh: - return false, nr - case <-time.After(timeout): - } - // didn't get response - wc.CloseSend() - return true, nil -} - -func TestWatchWithProgressNotify(t *testing.T) { - // accelerate report interval so test terminates quickly - oldpi := v3rpc.GetProgressReportInterval() - // using atomics to avoid race warnings - v3rpc.SetProgressReportInterval(3 * time.Second) - testInterval := 3 * time.Second - defer func() { v3rpc.SetProgressReportInterval(oldpi) }() - - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - wStream, wErr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if wErr != nil { - t.Fatalf("wAPI.Watch error: %v", wErr) - } - - // create two watchers, one with progressNotify set. - wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1, ProgressNotify: true}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("watch request failed (%v)", err) - } - wreq = &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo"), StartRevision: 1}}} - if err := wStream.Send(wreq); err != nil { - t.Fatalf("watch request failed (%v)", err) - } - - // two creation + one notification - for i := 0; i < 3; i++ { - rok, resp := waitResponse(wStream, testInterval+time.Second) - if resp.Created { - continue - } - - if rok { - t.Errorf("failed to receive response from watch stream") - } - if resp.Header.Revision != 1 { - t.Errorf("revision = %d, want 1", resp.Header.Revision) - } - if len(resp.Events) != 0 { - t.Errorf("len(resp.Events) = %d, want 0", len(resp.Events)) - } - } - - // no more notification - rok, resp := waitResponse(wStream, time.Second) - if !rok { - t.Errorf("unexpected pb.WatchResponse is received %+v", resp) - } -} - -// TestV3WatchClose opens many watchers concurrently on multiple streams. -func TestV3WatchClose(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true}) - defer clus.Terminate(t) - - c := clus.Client(0) - wapi := integration.ToGRPC(c).Watch - - var wg sync.WaitGroup - wg.Add(100) - for i := 0; i < 100; i++ { - go func() { - ctx, cancel := context.WithCancel(context.TODO()) - defer func() { - wg.Done() - cancel() - }() - ws, err := wapi.Watch(ctx) - if err != nil { - return - } - cr := &pb.WatchCreateRequest{Key: []byte("a")} - req := &pb.WatchRequest{ - RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: cr}} - ws.Send(req) - ws.Recv() - }() - } - - clus.Members[0].Bridge().DropConnections() - wg.Wait() -} - -// TestV3WatchWithFilter ensures watcher filters out the events correctly. -func TestV3WatchWithFilter(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(ctx) - if werr != nil { - t.Fatal(werr) - } - req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte("foo"), - Filters: []pb.WatchCreateRequest_FilterType{pb.WatchCreateRequest_NOPUT}, - }}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } - - recv := make(chan *pb.WatchResponse, 1) - go func() { - // check received PUT - resp, rerr := ws.Recv() - if rerr != nil { - t.Error(rerr) - } - recv <- resp - }() - - // put a key with empty value - kvc := integration.ToGRPC(clus.RandClient()).KV - preq := &pb.PutRequest{Key: []byte("foo")} - if _, err := kvc.Put(context.TODO(), preq); err != nil { - t.Fatal(err) - } - - select { - case <-recv: - t.Fatal("failed to filter out put event") - case <-time.After(100 * time.Millisecond): - } - - dreq := &pb.DeleteRangeRequest{Key: []byte("foo")} - if _, err := kvc.DeleteRange(context.TODO(), dreq); err != nil { - t.Fatal(err) - } - - select { - case resp := <-recv: - wevs := []*mvccpb.Event{ - { - Type: mvccpb.DELETE, - Kv: &mvccpb.KeyValue{Key: []byte("foo"), ModRevision: 3}, - }, - } - if !reflect.DeepEqual(resp.Events, wevs) { - t.Fatalf("got %v, expected %v", resp.Events, wevs) - } - case <-time.After(100 * time.Millisecond): - t.Fatal("failed to receive delete event") - } -} - -func TestV3WatchWithPrevKV(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - wctx, wcancel := context.WithCancel(context.Background()) - defer wcancel() - - tests := []struct { - key string - end string - vals []string - }{{ - key: "foo", - end: "fop", - vals: []string{"bar1", "bar2"}, - }, { - key: "/abc", - end: "/abd", - vals: []string{"first", "second"}, - }} - for i, tt := range tests { - kvc := integration.ToGRPC(clus.RandClient()).KV - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[0])}); err != nil { - t.Fatal(err) - } - - ws, werr := integration.ToGRPC(clus.RandClient()).Watch.Watch(wctx) - if werr != nil { - t.Fatal(werr) - } - - req := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{ - CreateRequest: &pb.WatchCreateRequest{ - Key: []byte(tt.key), - RangeEnd: []byte(tt.end), - PrevKv: true, - }}} - if err := ws.Send(req); err != nil { - t.Fatal(err) - } - if _, err := ws.Recv(); err != nil { - t.Fatal(err) - } - - if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte(tt.key), Value: []byte(tt.vals[1])}); err != nil { - t.Fatal(err) - } - - recv := make(chan *pb.WatchResponse, 1) - go func() { - // check received PUT - resp, rerr := ws.Recv() - if rerr != nil { - t.Error(rerr) - } - recv <- resp - }() - - select { - case resp := <-recv: - if tt.vals[1] != string(resp.Events[0].Kv.Value) { - t.Errorf("#%d: unequal value: want=%s, get=%s", i, tt.vals[1], resp.Events[0].Kv.Value) - } - if tt.vals[0] != string(resp.Events[0].PrevKv.Value) { - t.Errorf("#%d: unequal value: want=%s, get=%s", i, tt.vals[0], resp.Events[0].PrevKv.Value) - } - case <-time.After(30 * time.Second): - t.Error("timeout waiting for watch response") - } - } -} - -// TestV3WatchCancellation ensures that watch cancellation frees up server resources. -func TestV3WatchCancellation(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - cli := clus.RandClient() - - // increment watcher total count and keep a stream open - cli.Watch(ctx, "/foo") - - for i := 0; i < 1000; i++ { - ctx, cancel := context.WithCancel(ctx) - cli.Watch(ctx, "/foo") - cancel() - } - - // Wait a little for cancellations to take hold - time.Sleep(3 * time.Second) - - minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } - - var expected string - if integration.ThroughProxy { - // grpc proxy has additional 2 watches open - expected = "3" - } else { - expected = "1" - } - - if minWatches != expected { - t.Fatalf("expected %s watch, got %s", expected, minWatches) - } -} - -// TestV3WatchCloseCancelRace ensures that watch close doesn't decrement the watcher total too far. -func TestV3WatchCloseCancelRace(t *testing.T) { - integration.BeforeTest(t) - - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - cli := clus.RandClient() - - for i := 0; i < 1000; i++ { - ctx, cancel := context.WithCancel(ctx) - cli.Watch(ctx, "/foo") - cancel() - } - - // Wait a little for cancellations to take hold - time.Sleep(3 * time.Second) - - minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total") - if err != nil { - t.Fatal(err) - } - - var expected string - if integration.ThroughProxy { - // grpc proxy has additional 2 watches open - expected = "2" - } else { - expected = "0" - } - - if minWatches != expected { - t.Fatalf("expected %s watch, got %s", expected, minWatches) - } -} diff --git a/tests/integration/v3election_grpc_test.go b/tests/integration/v3election_grpc_test.go deleted file mode 100644 index c7bf7990528..00000000000 --- a/tests/integration/v3election_grpc_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "fmt" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3ElectionCampaign checks that Campaign will not give -// simultaneous leadership to multiple campaigners. -func TestV3ElectionCampaign(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } - lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } - - lc := integration.ToGRPC(clus.Client(0)).Election - req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} - l1, lerr1 := lc.Campaign(context.TODO(), req1) - if lerr1 != nil { - t.Fatal(lerr1) - } - - campaignc := make(chan struct{}) - go func() { - defer close(campaignc) - req2 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("def")} - l2, lerr2 := lc.Campaign(context.TODO(), req2) - if lerr2 != nil { - t.Error(lerr2) - } - if l1.Header.Revision >= l2.Header.Revision { - t.Errorf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision) - } - }() - - select { - case <-time.After(200 * time.Millisecond): - case <-campaignc: - t.Fatalf("got leadership before resign") - } - - if _, uerr := lc.Resign(context.TODO(), &epb.ResignRequest{Leader: l1.Leader}); uerr != nil { - t.Fatal(uerr) - } - - select { - case <-time.After(200 * time.Millisecond): - t.Fatalf("campaigner unelected after resign") - case <-campaignc: - } - - lval, lverr := lc.Leader(context.TODO(), &epb.LeaderRequest{Name: []byte("foo")}) - if lverr != nil { - t.Fatal(lverr) - } - - if string(lval.Kv.Value) != "def" { - t.Fatalf("got election value %q, expected %q", string(lval.Kv.Value), "def") - } -} - -// TestV3ElectionObserve checks that an Observe stream receives -// proclamations from different leaders uninterrupted. -func TestV3ElectionObserve(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lc := integration.ToGRPC(clus.Client(0)).Election - - // observe leadership events - observec := make(chan struct{}, 1) - go func() { - defer close(observec) - s, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte("foo")}) - observec <- struct{}{} - if err != nil { - t.Error(err) - } - for i := 0; i < 10; i++ { - resp, rerr := s.Recv() - if rerr != nil { - t.Error(rerr) - } - respV := 0 - fmt.Sscanf(string(resp.Kv.Value), "%d", &respV) - // leader transitions should not go backwards - if respV < i { - t.Errorf(`got observe value %q, expected >= "%d"`, string(resp.Kv.Value), i) - } - i = respV - } - }() - - select { - case <-observec: - case <-time.After(time.Second): - t.Fatalf("observe stream took too long to start") - } - - lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } - c1, cerr1 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("0")}) - if cerr1 != nil { - t.Fatal(cerr1) - } - - // overlap other leader so it waits on resign - leader2c := make(chan struct{}) - go func() { - defer close(leader2c) - - lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Error(err2) - } - c2, cerr2 := lc.Campaign(context.TODO(), &epb.CampaignRequest{Name: []byte("foo"), Lease: lease2.ID, Value: []byte("5")}) - if cerr2 != nil { - t.Error(cerr2) - } - for i := 6; i < 10; i++ { - v := []byte(fmt.Sprintf("%d", i)) - req := &epb.ProclaimRequest{Leader: c2.Leader, Value: v} - if _, err := lc.Proclaim(context.TODO(), req); err != nil { - t.Error(err) - } - } - }() - - for i := 1; i < 5; i++ { - v := []byte(fmt.Sprintf("%d", i)) - req := &epb.ProclaimRequest{Leader: c1.Leader, Value: v} - if _, err := lc.Proclaim(context.TODO(), req); err != nil { - t.Fatal(err) - } - } - // start second leader - lc.Resign(context.TODO(), &epb.ResignRequest{Leader: c1.Leader}) - - select { - case <-observec: - case <-time.After(time.Second): - t.Fatalf("observe did not observe all events in time") - } - - <-leader2c -} diff --git a/tests/integration/v3lock_grpc_test.go b/tests/integration/v3lock_grpc_test.go deleted file mode 100644 index f293bc1a556..00000000000 --- a/tests/integration/v3lock_grpc_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "context" - "testing" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - lockpb "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - "go.etcd.io/etcd/tests/v3/framework/integration" -) - -// TestV3LockLockWaiter tests that a client will wait for a lock, then acquire it -// once it is unlocked. -func TestV3LockLockWaiter(t *testing.T) { - integration.BeforeTest(t) - clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1}) - defer clus.Terminate(t) - - lease1, err1 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err1 != nil { - t.Fatal(err1) - } - lease2, err2 := integration.ToGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 30}) - if err2 != nil { - t.Fatal(err2) - } - - lc := integration.ToGRPC(clus.Client(0)).Lock - l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID}) - if lerr1 != nil { - t.Fatal(lerr1) - } - - lockc := make(chan struct{}) - go func() { - l2, lerr2 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease2.ID}) - if lerr2 != nil { - t.Error(lerr2) - } - if l1.Header.Revision >= l2.Header.Revision { - t.Errorf("expected l1 revision < l2 revision, got %d >= %d", l1.Header.Revision, l2.Header.Revision) - } - close(lockc) - }() - - select { - case <-time.After(200 * time.Millisecond): - case <-lockc: - t.Fatalf("locked before unlock") - } - - if _, uerr := lc.Unlock(context.TODO(), &lockpb.UnlockRequest{Key: l1.Key}); uerr != nil { - t.Fatal(uerr) - } - - select { - case <-time.After(200 * time.Millisecond): - t.Fatalf("waiter did not lock after unlock") - case <-lockc: - } -} diff --git a/tests/linearizability/client.go b/tests/linearizability/client.go deleted file mode 100644 index 5526eee08ed..00000000000 --- a/tests/linearizability/client.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "context" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/linearizability/identity" - "go.etcd.io/etcd/tests/v3/linearizability/model" -) - -type recordingClient struct { - client clientv3.Client - history *model.AppendableHistory -} - -func NewClient(endpoints []string, ids identity.Provider) (*recordingClient, error) { - cc, err := clientv3.New(clientv3.Config{ - Endpoints: endpoints, - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - return nil, err - } - return &recordingClient{ - client: *cc, - history: model.NewAppendableHistory(ids), - }, nil -} - -func (c *recordingClient) Close() error { - return c.client.Close() -} - -func (c *recordingClient) Get(ctx context.Context, key string) ([]*mvccpb.KeyValue, error) { - callTime := time.Now() - resp, err := c.client.Get(ctx, key) - returnTime := time.Now() - if err != nil { - return nil, err - } - c.history.AppendGet(key, callTime, returnTime, resp) - return resp.Kvs, nil -} - -func (c *recordingClient) Put(ctx context.Context, key, value string) error { - callTime := time.Now() - resp, err := c.client.Put(ctx, key, value) - returnTime := time.Now() - c.history.AppendPut(key, value, callTime, returnTime, resp, err) - return err -} - -func (c *recordingClient) Delete(ctx context.Context, key string) error { - callTime := time.Now() - resp, err := c.client.Delete(ctx, key) - returnTime := time.Now() - c.history.AppendDelete(key, callTime, returnTime, resp, err) - return nil -} - -func (c *recordingClient) CompareAndSet(ctx context.Context, key, expectedValue, newValue string) error { - callTime := time.Now() - txn := c.client.Txn(ctx) - var cmp clientv3.Cmp - if expectedValue == "" { - cmp = clientv3.Compare(clientv3.CreateRevision(key), "=", 0) - } else { - cmp = clientv3.Compare(clientv3.Value(key), "=", expectedValue) - } - resp, err := txn.If( - cmp, - ).Then( - clientv3.OpPut(key, newValue), - ).Commit() - returnTime := time.Now() - c.history.AppendTxn(key, expectedValue, newValue, callTime, returnTime, resp, err) - return err -} - -func (c *recordingClient) LeaseGrant(ctx context.Context, ttl int64) (int64, error) { - callTime := time.Now() - resp, err := c.client.Lease.Grant(ctx, ttl) - returnTime := time.Now() - c.history.AppendLeaseGrant(callTime, returnTime, resp, err) - var leaseId int64 - if resp != nil { - leaseId = int64(resp.ID) - } - return leaseId, err -} - -func (c *recordingClient) LeaseRevoke(ctx context.Context, leaseId int64) error { - callTime := time.Now() - resp, err := c.client.Lease.Revoke(ctx, clientv3.LeaseID(leaseId)) - returnTime := time.Now() - c.history.AppendLeaseRevoke(leaseId, callTime, returnTime, resp, err) - return err -} - -func (c *recordingClient) PutWithLease(ctx context.Context, key string, value string, leaseId int64) error { - callTime := time.Now() - opts := clientv3.WithLease(clientv3.LeaseID(leaseId)) - resp, err := c.client.Put(ctx, key, value, opts) - returnTime := time.Now() - c.history.AppendPutWithLease(key, value, int64(leaseId), callTime, returnTime, resp, err) - return err -} - -func (c *recordingClient) Defragment(ctx context.Context) error { - callTime := time.Now() - resp, err := c.client.Defragment(ctx, c.client.Endpoints()[0]) - returnTime := time.Now() - c.history.AppendDefragment(callTime, returnTime, resp, err) - return err -} diff --git a/tests/linearizability/failpoints.go b/tests/linearizability/failpoints.go deleted file mode 100644 index 675e18b164b..00000000000 --- a/tests/linearizability/failpoints.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "context" - "fmt" - "math/rand" - "strings" - "testing" - "time" - - "go.uber.org/zap" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/e2e" -) - -const ( - triggerTimeout = 5 * time.Second -) - -var ( - KillFailpoint Failpoint = killFailpoint{} - DefragBeforeCopyPanic Failpoint = goPanicFailpoint{"defragBeforeCopy", triggerDefrag, AnyMember} - DefragBeforeRenamePanic Failpoint = goPanicFailpoint{"defragBeforeRename", triggerDefrag, AnyMember} - BeforeCommitPanic Failpoint = goPanicFailpoint{"beforeCommit", nil, AnyMember} - AfterCommitPanic Failpoint = goPanicFailpoint{"afterCommit", nil, AnyMember} - RaftBeforeSavePanic Failpoint = goPanicFailpoint{"raftBeforeSave", nil, AnyMember} - RaftAfterSavePanic Failpoint = goPanicFailpoint{"raftAfterSave", nil, AnyMember} - BackendBeforePreCommitHookPanic Failpoint = goPanicFailpoint{"commitBeforePreCommitHook", nil, AnyMember} - BackendAfterPreCommitHookPanic Failpoint = goPanicFailpoint{"commitAfterPreCommitHook", nil, AnyMember} - BackendBeforeStartDBTxnPanic Failpoint = goPanicFailpoint{"beforeStartDBTxn", nil, AnyMember} - BackendAfterStartDBTxnPanic Failpoint = goPanicFailpoint{"afterStartDBTxn", nil, AnyMember} - BackendBeforeWritebackBufPanic Failpoint = goPanicFailpoint{"beforeWritebackBuf", nil, AnyMember} - BackendAfterWritebackBufPanic Failpoint = goPanicFailpoint{"afterWritebackBuf", nil, AnyMember} - CompactBeforeCommitScheduledCompactPanic Failpoint = goPanicFailpoint{"compactBeforeCommitScheduledCompact", triggerCompact, AnyMember} - CompactAfterCommitScheduledCompactPanic Failpoint = goPanicFailpoint{"compactAfterCommitScheduledCompact", triggerCompact, AnyMember} - CompactBeforeSetFinishedCompactPanic Failpoint = goPanicFailpoint{"compactBeforeSetFinishedCompact", triggerCompact, AnyMember} - CompactAfterSetFinishedCompactPanic Failpoint = goPanicFailpoint{"compactAfterSetFinishedCompact", triggerCompact, AnyMember} - CompactBeforeCommitBatchPanic Failpoint = goPanicFailpoint{"compactBeforeCommitBatch", triggerCompact, AnyMember} - CompactAfterCommitBatchPanic Failpoint = goPanicFailpoint{"compactAfterCommitBatch", triggerCompact, AnyMember} - RaftBeforeLeaderSendPanic Failpoint = goPanicFailpoint{"raftBeforeLeaderSend", nil, Leader} - BlackholePeerNetwork Failpoint = blackholePeerNetworkFailpoint{duration: time.Second} - DelayPeerNetwork Failpoint = delayPeerNetworkFailpoint{duration: time.Second, baseLatency: 75 * time.Millisecond, randomizedLatency: 50 * time.Millisecond} - RandomFailpoint Failpoint = randomFailpoint{[]Failpoint{ - KillFailpoint, BeforeCommitPanic, AfterCommitPanic, RaftBeforeSavePanic, - RaftAfterSavePanic, DefragBeforeCopyPanic, DefragBeforeRenamePanic, - BackendBeforePreCommitHookPanic, BackendAfterPreCommitHookPanic, - BackendBeforeStartDBTxnPanic, BackendAfterStartDBTxnPanic, - BackendBeforeWritebackBufPanic, BackendAfterWritebackBufPanic, - CompactBeforeCommitScheduledCompactPanic, CompactAfterCommitScheduledCompactPanic, - CompactBeforeSetFinishedCompactPanic, CompactAfterSetFinishedCompactPanic, - CompactBeforeCommitBatchPanic, CompactAfterCommitBatchPanic, - RaftBeforeLeaderSendPanic, - BlackholePeerNetwork, - DelayPeerNetwork, - }} - RaftBeforeApplySnapPanic Failpoint = goPanicFailpoint{"raftBeforeApplySnap", triggerBlackholeUntilSnapshot, Follower} - RaftAfterApplySnapPanic Failpoint = goPanicFailpoint{"raftAfterApplySnap", triggerBlackholeUntilSnapshot, Follower} - RaftAfterWALReleasePanic Failpoint = goPanicFailpoint{"raftAfterWALRelease", triggerBlackholeUntilSnapshot, Follower} - RaftBeforeSaveSnapPanic Failpoint = goPanicFailpoint{"raftBeforeSaveSnap", triggerBlackholeUntilSnapshot, Follower} - RaftAfterSaveSnapPanic Failpoint = goPanicFailpoint{"raftAfterSaveSnap", triggerBlackholeUntilSnapshot, Follower} - RandomSnapshotFailpoint Failpoint = randomFailpoint{[]Failpoint{ - RaftBeforeApplySnapPanic, RaftAfterApplySnapPanic, RaftAfterWALReleasePanic, RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, - }} - // TODO: Figure out how to reliably trigger below failpoints and add them to RandomFailpoint - raftBeforeFollowerSendPanic Failpoint = goPanicFailpoint{"raftBeforeFollowerSend", nil, AnyMember} -) - -type Failpoint interface { - Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error - Name() string - Available(e2e.EtcdProcess) bool -} - -type killFailpoint struct{} - -func (f killFailpoint) Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error { - member := clus.Procs[rand.Int()%len(clus.Procs)] - - killCtx, cancel := context.WithTimeout(ctx, triggerTimeout) - defer cancel() - for member.IsRunning() { - err := member.Kill() - if err != nil { - lg.Info("Sending kill signal failed", zap.Error(err)) - } - err = member.Wait(killCtx) - if err != nil && !strings.Contains(err.Error(), "unexpected exit code") { - lg.Info("Failed to kill the process", zap.Error(err)) - return fmt.Errorf("failed to kill the process within %s, err: %w", triggerTimeout, err) - } - } - - err := member.Start(ctx) - if err != nil { - return err - } - return nil -} - -func (f killFailpoint) Name() string { - return "Kill" -} - -func (f killFailpoint) Available(e2e.EtcdProcess) bool { - return true -} - -type goPanicFailpoint struct { - failpoint string - trigger func(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error - target failpointTarget -} - -type failpointTarget string - -const ( - AnyMember failpointTarget = "AnyMember" - Leader failpointTarget = "Leader" - Follower failpointTarget = "Follower" -) - -func (f goPanicFailpoint) Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error { - member := f.pickMember(t, clus) - - triggerCtx, cancel := context.WithTimeout(ctx, triggerTimeout) - defer cancel() - - for member.IsRunning() { - lg.Info("Setting up gofailpoint", zap.String("failpoint", f.Name())) - err := member.Failpoints().Setup(triggerCtx, f.failpoint, "panic") - if err != nil { - lg.Info("goFailpoint setup failed", zap.String("failpoint", f.Name()), zap.Error(err)) - } - if !member.IsRunning() { - // TODO: Check member logs that etcd not running is caused panic caused by proper gofailpoint. - break - } - if f.trigger != nil { - lg.Info("Triggering gofailpoint", zap.String("failpoint", f.Name())) - err = f.trigger(t, triggerCtx, member, clus) - if err != nil { - lg.Info("gofailpoint trigger failed", zap.String("failpoint", f.Name()), zap.Error(err)) - } - } - lg.Info("Waiting for member to exist", zap.String("member", member.Config().Name)) - err = member.Wait(triggerCtx) - if err != nil && !strings.Contains(err.Error(), "unexpected exit code") { - lg.Info("Member didn't exit as expected", zap.String("member", member.Config().Name), zap.Error(err)) - return fmt.Errorf("member didn't exit as expected: %v", err) - } - lg.Info("Member existed as expected", zap.String("member", member.Config().Name)) - } - - err := member.Start(ctx) - if err != nil { - return err - } - return nil -} - -func (f goPanicFailpoint) pickMember(t *testing.T, clus *e2e.EtcdProcessCluster) e2e.EtcdProcess { - switch f.target { - case AnyMember: - return clus.Procs[rand.Int()%len(clus.Procs)] - case Leader: - return clus.Procs[clus.WaitLeader(t)] - case Follower: - return clus.Procs[(clus.WaitLeader(t)+1)%len(clus.Procs)] - default: - panic("unknown target") - } -} - -func (f goPanicFailpoint) Available(member e2e.EtcdProcess) bool { - memberFailpoints := member.Failpoints() - if memberFailpoints == nil { - return false - } - available := memberFailpoints.Available() - _, found := available[f.failpoint] - return found -} - -func (f goPanicFailpoint) Name() string { - return f.failpoint -} - -func triggerDefrag(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error { - cc, err := clientv3.New(clientv3.Config{ - Endpoints: member.EndpointsV3(), - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - return fmt.Errorf("failed creating client: %w", err) - } - defer cc.Close() - _, err = cc.Defragment(ctx, member.EndpointsV3()[0]) - if err != nil && !strings.Contains(err.Error(), "error reading from server: EOF") { - return err - } - return nil -} - -func triggerCompact(_ *testing.T, ctx context.Context, member e2e.EtcdProcess, _ *e2e.EtcdProcessCluster) error { - cc, err := clientv3.New(clientv3.Config{ - Endpoints: member.EndpointsV3(), - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - return fmt.Errorf("failed creating client: %w", err) - } - defer cc.Close() - resp, err := cc.Get(ctx, "/") - if err != nil { - return err - } - _, err = cc.Compact(ctx, resp.Header.Revision) - if err != nil && !strings.Contains(err.Error(), "error reading from server: EOF") { - return err - } - return nil -} - -// latestRevisionForEndpoint gets latest revision of the first endpoint in Client.Endpoints list -func latestRevisionForEndpoint(ctx context.Context, c *clientv3.Client) (int64, error) { - cntx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) - defer cancel() - resp, err := c.Status(cntx, c.Endpoints()[0]) - if err != nil { - return 0, err - } - return resp.Header.Revision, err -} - -func triggerBlackholeUntilSnapshot(t *testing.T, ctx context.Context, member e2e.EtcdProcess, clus *e2e.EtcdProcessCluster) error { - leader := clus.Procs[clus.WaitLeader(t)] - lc, err := clientv3.New(clientv3.Config{ - Endpoints: []string{leader.Config().ClientURL}, - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - return err - } - defer lc.Close() - - mc, err := clientv3.New(clientv3.Config{ - Endpoints: []string{member.Config().ClientURL}, - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - return err - } - defer mc.Close() - - proxy := member.PeerProxy() - // Blackholing will cause peers to not be able to use streamWriters registered with member - // but peer traffic is still possible because member has 'pipeline' with peers - // TODO: find a way to stop all traffic - proxy.BlackholeTx() - proxy.BlackholeRx() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - // Have to refresh revBlackholedMem. It can still increase as member processes changes that are received but not yet applied. - revBlackholedMem, err := latestRevisionForEndpoint(ctx, mc) - if err != nil { - return err - } - revLeader, err := latestRevisionForEndpoint(ctx, lc) - if err != nil { - return err - } - t.Logf("Leader: [%s], Member: [%s], revLeader: %d, revBlackholedMem: %d", leader.Config().Name, member.Config().Name, revLeader, revBlackholedMem) - // Blackholed member has to be sufficiently behind to trigger snapshot transfer. - // Need to make sure leader compacted latest revBlackholedMem inside EtcdServer.snapshot. - // That's why we wait for clus.Cfg.SnapshotCount (to trigger snapshot) + clus.Cfg.SnapshotCatchUpEntries (EtcdServer.snapshot compaction offset) - if revLeader-revBlackholedMem > int64(clus.Cfg.SnapshotCount+clus.Cfg.SnapshotCatchUpEntries) { - break - } - time.Sleep(100 * time.Millisecond) - } - - proxy.UnblackholeTx() - proxy.UnblackholeRx() - return nil -} - -type randomFailpoint struct { - failpoints []Failpoint -} - -func (f randomFailpoint) Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error { - availableFailpoints := make([]Failpoint, 0, len(f.failpoints)) - for _, failpoint := range f.failpoints { - count := 0 - for _, proc := range clus.Procs { - if failpoint.Available(proc) { - count++ - } - } - if count == len(clus.Procs) { - availableFailpoints = append(availableFailpoints, failpoint) - } - } - failpoint := availableFailpoints[rand.Int()%len(availableFailpoints)] - lg.Info("Triggering failpoint\n", zap.String("failpoint", failpoint.Name())) - return failpoint.Trigger(ctx, t, lg, clus) -} - -func (f randomFailpoint) Name() string { - return "Random" -} - -func (f randomFailpoint) Available(e2e.EtcdProcess) bool { - return true -} - -type blackholePeerNetworkFailpoint struct { - duration time.Duration -} - -func (f blackholePeerNetworkFailpoint) Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error { - member := clus.Procs[rand.Int()%len(clus.Procs)] - proxy := member.PeerProxy() - - proxy.BlackholeTx() - proxy.BlackholeRx() - lg.Info("Blackholing traffic from and to member", zap.String("member", member.Config().Name)) - time.Sleep(f.duration) - lg.Info("Traffic restored from and to member", zap.String("member", member.Config().Name)) - proxy.UnblackholeTx() - proxy.UnblackholeRx() - return nil -} - -func (f blackholePeerNetworkFailpoint) Name() string { - return "blackhole" -} - -func (f blackholePeerNetworkFailpoint) Available(clus e2e.EtcdProcess) bool { - return clus.PeerProxy() != nil -} - -type delayPeerNetworkFailpoint struct { - duration time.Duration - baseLatency time.Duration - randomizedLatency time.Duration -} - -func (f delayPeerNetworkFailpoint) Trigger(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) error { - member := clus.Procs[rand.Int()%len(clus.Procs)] - proxy := member.PeerProxy() - - proxy.DelayRx(f.baseLatency, f.randomizedLatency) - proxy.DelayTx(f.baseLatency, f.randomizedLatency) - lg.Info("Delaying traffic from and to member", zap.String("member", member.Config().Name), zap.Duration("baseLatency", f.baseLatency), zap.Duration("randomizedLatency", f.randomizedLatency)) - time.Sleep(f.duration) - lg.Info("Traffic delay removed", zap.String("member", member.Config().Name)) - proxy.UndelayRx() - proxy.UndelayTx() - return nil -} - -func (f delayPeerNetworkFailpoint) Name() string { - return "delay" -} - -func (f delayPeerNetworkFailpoint) Available(clus e2e.EtcdProcess) bool { - return clus.PeerProxy() != nil -} diff --git a/tests/linearizability/identity/id.go b/tests/linearizability/identity/id.go deleted file mode 100644 index 31f57ccc199..00000000000 --- a/tests/linearizability/identity/id.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package identity - -import "sync/atomic" - -type Provider interface { - ClientId() int - RequestId() int -} - -func NewIdProvider() Provider { - return &atomicProvider{} -} - -type atomicProvider struct { - clientId atomic.Int64 - requestId atomic.Int64 -} - -func (id *atomicProvider) ClientId() int { - // Substract one as ClientId should start from zero. - return int(id.clientId.Add(1) - 1) -} - -func (id *atomicProvider) RequestId() int { - return int(id.requestId.Add(1)) -} diff --git a/tests/linearizability/identity/lease_ids.go b/tests/linearizability/identity/lease_ids.go deleted file mode 100644 index 23eeb5d904c..00000000000 --- a/tests/linearizability/identity/lease_ids.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package identity - -import ( - "sync" -) - -type LeaseIdStorage interface { - LeaseId(int) int64 - AddLeaseId(int, int64) - RemoveLeaseId(int) -} - -func NewLeaseIdStorage() LeaseIdStorage { - return &atomicClientId2LeaseIdMapper{m: map[int]int64{}} -} - -type atomicClientId2LeaseIdMapper struct { - sync.RWMutex - // m is used to store clientId to leaseId mapping. - m map[int]int64 -} - -func (lm *atomicClientId2LeaseIdMapper) LeaseId(clientId int) int64 { - lm.RLock() - defer lm.RUnlock() - return lm.m[clientId] -} - -func (lm *atomicClientId2LeaseIdMapper) AddLeaseId(clientId int, leaseId int64) { - lm.Lock() - defer lm.Unlock() - lm.m[clientId] = leaseId -} - -func (lm *atomicClientId2LeaseIdMapper) RemoveLeaseId(clientId int) { - lm.Lock() - defer lm.Unlock() - delete(lm.m, clientId) -} diff --git a/tests/linearizability/linearizability_test.go b/tests/linearizability/linearizability_test.go deleted file mode 100644 index e5e7b3786b6..00000000000 --- a/tests/linearizability/linearizability_test.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "context" - "encoding/json" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/anishathalye/porcupine" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - "golang.org/x/sync/errgroup" - "golang.org/x/time/rate" - - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/linearizability/identity" - "go.etcd.io/etcd/tests/v3/linearizability/model" -) - -const ( - // waitBetweenFailpointTriggers - waitBetweenFailpointTriggers = time.Second -) - -var ( - LowTraffic = trafficConfig{ - name: "LowTraffic", - minimalQPS: 100, - maximalQPS: 200, - clientCount: 8, - traffic: traffic{ - keyCount: 4, - leaseTTL: DefaultLeaseTTL, - largePutSize: 32769, - writes: []requestChance{ - {operation: Put, chance: 50}, - {operation: LargePut, chance: 5}, - {operation: Delete, chance: 10}, - {operation: PutWithLease, chance: 10}, - {operation: LeaseRevoke, chance: 10}, - {operation: CompareAndSet, chance: 10}, - }, - }, - } - HighTraffic = trafficConfig{ - name: "HighTraffic", - minimalQPS: 200, - maximalQPS: 1000, - clientCount: 12, - traffic: traffic{ - keyCount: 4, - largePutSize: 32769, - leaseTTL: DefaultLeaseTTL, - writes: []requestChance{ - {operation: Put, chance: 90}, - {operation: LargePut, chance: 5}, - }, - }, - } - defaultTraffic = LowTraffic - trafficList = []trafficConfig{ - LowTraffic, HighTraffic, - } -) - -func TestLinearizability(t *testing.T) { - testRunner.BeforeTest(t) - type scenario struct { - name string - failpoint Failpoint - config e2e.EtcdProcessClusterConfig - traffic *trafficConfig - } - scenarios := []scenario{} - for _, traffic := range trafficList { - scenarios = append(scenarios, scenario{ - name: "ClusterOfSize1/" + traffic.name, - failpoint: RandomFailpoint, - traffic: &traffic, - config: *e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithSnapshotCount(100), - e2e.WithGoFailEnabled(true), - e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints - ), - }) - scenarios = append(scenarios, scenario{ - name: "ClusterOfSize3/" + traffic.name, - failpoint: RandomFailpoint, - traffic: &traffic, - config: *e2e.NewConfig( - e2e.WithSnapshotCount(100), - e2e.WithPeerProxy(true), - e2e.WithGoFailEnabled(true), - e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints - ), - }) - } - scenarios = append(scenarios, []scenario{ - { - name: "Issue14370", - failpoint: RaftBeforeSavePanic, - config: *e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithGoFailEnabled(true), - ), - }, - { - name: "Issue14685", - failpoint: DefragBeforeCopyPanic, - config: *e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithGoFailEnabled(true), - ), - }, - { - name: "Issue13766", - failpoint: KillFailpoint, - traffic: &HighTraffic, - config: *e2e.NewConfig( - e2e.WithSnapshotCount(100), - ), - }, - // TODO: investigate periodic `Model is not linearizable` failures - // see https://github.com/etcd-io/etcd/pull/15104#issuecomment-1416371288 - /*{ - name: "Snapshot", - failpoint: RandomSnapshotFailpoint, - traffic: &HighTraffic, - config: *e2e.NewConfig( - e2e.WithGoFailEnabled(true), - e2e.WithSnapshotCount(100), - e2e.WithSnapshotCatchUpEntries(100), - e2e.WithPeerProxy(true), - ), - },*/ - }...) - for _, scenario := range scenarios { - if scenario.traffic == nil { - scenario.traffic = &defaultTraffic - } - - t.Run(scenario.name, func(t *testing.T) { - lg := zaptest.NewLogger(t) - scenario.config.Logger = lg - ctx := context.Background() - clus, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&scenario.config)) - if err != nil { - t.Fatal(err) - } - defer clus.Close() - operations, watchResponses := testLinearizability(ctx, t, lg, clus, FailpointConfig{ - failpoint: scenario.failpoint, - count: 1, - retries: 3, - waitBetweenTriggers: waitBetweenFailpointTriggers, - }, *scenario.traffic) - forcestopCluster(clus) - validateWatchResponses(t, watchResponses) - longestHistory, remainingEvents := watchEventHistory(watchResponses) - validateEventsMatch(t, longestHistory, remainingEvents) - operations = patchOperationBasedOnWatchEvents(operations, longestHistory) - checkOperationsAndPersistResults(t, lg, operations, clus) - }) - } -} - -func testLinearizability(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, failpoint FailpointConfig, traffic trafficConfig) (operations []porcupine.Operation, responses [][]watchResponse) { - // Run multiple test components (traffic, failpoints, etc) in parallel and use canceling context to propagate stop signal. - g := errgroup.Group{} - trafficCtx, trafficCancel := context.WithCancel(ctx) - g.Go(func() error { - triggerFailpoints(ctx, t, lg, clus, failpoint) - time.Sleep(time.Second) - trafficCancel() - return nil - }) - watchCtx, watchCancel := context.WithCancel(ctx) - g.Go(func() error { - operations = simulateTraffic(trafficCtx, t, lg, clus, traffic) - time.Sleep(time.Second) - watchCancel() - return nil - }) - g.Go(func() error { - responses = collectClusterWatchEvents(watchCtx, t, lg, clus) - return nil - }) - g.Wait() - return operations, responses -} - -func patchOperationBasedOnWatchEvents(operations []porcupine.Operation, watchEvents []watchEvent) []porcupine.Operation { - newOperations := make([]porcupine.Operation, 0, len(operations)) - persisted := map[model.EtcdOperation]watchEvent{} - for _, op := range watchEvents { - persisted[op.Op] = op - } - lastObservedOperation := lastOperationObservedInWatch(operations, persisted) - - for _, op := range operations { - request := op.Input.(model.EtcdRequest) - resp := op.Output.(model.EtcdResponse) - if resp.Err == nil || op.Call > lastObservedOperation.Call || request.Type != model.Txn { - // Cannot patch those requests. - newOperations = append(newOperations, op) - continue - } - event := matchWatchEvent(request.Txn, persisted) - if event != nil { - // Set revision and time based on watchEvent. - op.Return = event.Time.UnixNano() - op.Output = model.EtcdResponse{ - Revision: event.Revision, - ResultUnknown: true, - } - newOperations = append(newOperations, op) - continue - } - if hasNonUniqueWriteOperation(request.Txn) && !hasUniqueWriteOperation(request.Txn) { - // Leave operation as it is as we cannot match non-unique operations to watch events. - newOperations = append(newOperations, op) - continue - } - // Remove non persisted operations - } - return newOperations -} - -func lastOperationObservedInWatch(operations []porcupine.Operation, watchEvents map[model.EtcdOperation]watchEvent) porcupine.Operation { - var maxCallTime int64 - var lastOperation porcupine.Operation - for _, op := range operations { - request := op.Input.(model.EtcdRequest) - if request.Type != model.Txn { - continue - } - event := matchWatchEvent(request.Txn, watchEvents) - if event != nil && op.Call > maxCallTime { - maxCallTime = op.Call - lastOperation = op - } - } - return lastOperation -} - -func matchWatchEvent(request *model.TxnRequest, watchEvents map[model.EtcdOperation]watchEvent) *watchEvent { - for _, etcdOp := range request.Ops { - if etcdOp.Type == model.Put { - // Remove LeaseID which is not exposed in watch. - event, ok := watchEvents[model.EtcdOperation{ - Type: etcdOp.Type, - Key: etcdOp.Key, - Value: etcdOp.Value, - }] - if ok { - return &event - } - } - } - return nil -} - -func hasNonUniqueWriteOperation(request *model.TxnRequest) bool { - for _, etcdOp := range request.Ops { - if etcdOp.Type == model.Put || etcdOp.Type == model.Delete { - return true - } - } - return false -} - -func hasUniqueWriteOperation(request *model.TxnRequest) bool { - for _, etcdOp := range request.Ops { - if etcdOp.Type == model.Put { - return true - } - } - return false -} - -func triggerFailpoints(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, config FailpointConfig) { - var err error - successes := 0 - failures := 0 - for _, proc := range clus.Procs { - if !config.failpoint.Available(proc) { - t.Errorf("Failpoint %q not available on %s", config.failpoint.Name(), proc.Config().Name) - return - } - } - for successes < config.count && failures < config.retries { - time.Sleep(config.waitBetweenTriggers) - lg.Info("Triggering failpoint\n", zap.String("failpoint", config.failpoint.Name())) - err = config.failpoint.Trigger(ctx, t, lg, clus) - if err != nil { - lg.Info("Failed to trigger failpoint", zap.String("failpoint", config.failpoint.Name()), zap.Error(err)) - failures++ - continue - } - successes++ - } - if successes < config.count || failures >= config.retries { - t.Errorf("failed to trigger failpoints enough times, err: %v", err) - } -} - -type FailpointConfig struct { - failpoint Failpoint - count int - retries int - waitBetweenTriggers time.Duration -} - -func simulateTraffic(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, config trafficConfig) []porcupine.Operation { - mux := sync.Mutex{} - endpoints := clus.EndpointsV3() - - ids := identity.NewIdProvider() - lm := identity.NewLeaseIdStorage() - h := model.History{} - limiter := rate.NewLimiter(rate.Limit(config.maximalQPS), 200) - - startTime := time.Now() - wg := sync.WaitGroup{} - for i := 0; i < config.clientCount; i++ { - wg.Add(1) - endpoints := []string{endpoints[i%len(endpoints)]} - c, err := NewClient(endpoints, ids) - if err != nil { - t.Fatal(err) - } - go func(c *recordingClient, clientId int) { - defer wg.Done() - defer c.Close() - - config.traffic.Run(ctx, clientId, c, limiter, ids, lm) - mux.Lock() - h = h.Merge(c.history.History) - mux.Unlock() - }(c, i) - } - wg.Wait() - endTime := time.Now() - operations := h.Operations() - lg.Info("Recorded operations", zap.Int("count", len(operations))) - - qps := float64(len(operations)) / float64(endTime.Sub(startTime)) * float64(time.Second) - lg.Info("Average traffic", zap.Float64("qps", qps)) - if qps < config.minimalQPS { - t.Errorf("Requiring minimal %f qps for test results to be reliable, got %f qps", config.minimalQPS, qps) - } - return operations -} - -type trafficConfig struct { - name string - minimalQPS float64 - maximalQPS float64 - clientCount int - traffic Traffic -} - -func watchEventHistory(responses [][]watchResponse) (longest []watchEvent, rest [][]watchEvent) { - ops := make([][]watchEvent, len(responses)) - for i, resps := range responses { - ops[i] = toWatchEvents(resps) - } - - sort.Slice(ops, func(i, j int) bool { - return len(ops[i]) > len(ops[j]) - }) - return ops[0], ops[1:] -} - -func validateEventsMatch(t *testing.T, longestHistory []watchEvent, other [][]watchEvent) { - for i := 0; i < len(other); i++ { - length := len(other[i]) - // We compare prefix of watch events, as we are not guaranteed to collect all events from each node. - if diff := cmp.Diff(longestHistory[:length], other[i][:length], cmpopts.IgnoreFields(watchEvent{}, "Time")); diff != "" { - t.Errorf("Events in watches do not match, %s", diff) - } - } -} - -func checkOperationsAndPersistResults(t *testing.T, lg *zap.Logger, operations []porcupine.Operation, clus *e2e.EtcdProcessCluster) { - path, err := testResultsDirectory(t) - if err != nil { - t.Error(err) - } - - linearizable, info := porcupine.CheckOperationsVerbose(model.Etcd, operations, 5*time.Minute) - if linearizable == porcupine.Illegal { - t.Error("Model is not linearizable") - } - if linearizable == porcupine.Unknown { - t.Error("Linearization timed out") - } - if linearizable != porcupine.Ok { - persistOperationHistory(t, lg, path, operations) - persistMemberDataDir(t, lg, clus, path) - } - - visualizationPath := filepath.Join(path, "history.html") - lg.Info("Saving visualization", zap.String("path", visualizationPath)) - err = porcupine.VisualizePath(model.Etcd, info, visualizationPath) - if err != nil { - t.Errorf("Failed to visualize, err: %v", err) - } -} - -func persistOperationHistory(t *testing.T, lg *zap.Logger, path string, operations []porcupine.Operation) { - historyFilePath := filepath.Join(path, "history.json") - lg.Info("Saving operation history", zap.String("path", historyFilePath)) - file, err := os.OpenFile(historyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) - if err != nil { - t.Errorf("Failed to save operation history: %v", err) - return - } - defer file.Close() - encoder := json.NewEncoder(file) - for _, op := range operations { - err := encoder.Encode(op) - if err != nil { - t.Errorf("Failed to encode operation: %v", err) - } - } -} - -func persistMemberDataDir(t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, path string) { - for _, member := range clus.Procs { - memberDataDir := filepath.Join(path, member.Config().Name) - err := os.RemoveAll(memberDataDir) - if err != nil { - t.Error(err) - } - lg.Info("Saving member data dir", zap.String("member", member.Config().Name), zap.String("path", memberDataDir)) - err = os.Rename(member.Config().DataDirPath, memberDataDir) - if err != nil { - t.Error(err) - } - } -} - -func testResultsDirectory(t *testing.T) (string, error) { - path, err := filepath.Abs(filepath.Join(resultsDirectory, strings.ReplaceAll(t.Name(), "/", "_"))) - if err != nil { - return path, err - } - err = os.MkdirAll(path, 0700) - if err != nil { - return path, err - } - return path, nil -} - -// forcestopCluster stops the etcd member with signal kill. -func forcestopCluster(clus *e2e.EtcdProcessCluster) error { - for _, member := range clus.Procs { - member.Kill() - } - return clus.Stop() -} diff --git a/tests/linearizability/main_test.go b/tests/linearizability/main_test.go deleted file mode 100644 index 63ee784eca6..00000000000 --- a/tests/linearizability/main_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "os" - "path/filepath" - "testing" - - "go.etcd.io/etcd/tests/v3/framework" -) - -var testRunner = framework.E2eTestRunner -var resultsDirectory string - -func TestMain(m *testing.M) { - var ok bool - var err error - resultsDirectory, ok = os.LookupEnv("RESULTS_DIR") - if !ok { - resultsDirectory = "/tmp/" - } - resultsDirectory, err = filepath.Abs(resultsDirectory) - if err != nil { - panic(err) - } - - testRunner.TestMain(m) -} diff --git a/tests/linearizability/model/history.go b/tests/linearizability/model/history.go deleted file mode 100644 index 2a7fe985e67..00000000000 --- a/tests/linearizability/model/history.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "time" - - "github.com/anishathalye/porcupine" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/linearizability/identity" -) - -type AppendableHistory struct { - // id of the next write operation. If needed a new id might be requested from idProvider. - id int - idProvider identity.Provider - - History -} - -func NewAppendableHistory(ids identity.Provider) *AppendableHistory { - return &AppendableHistory{ - id: ids.ClientId(), - idProvider: ids, - History: History{ - successful: []porcupine.Operation{}, - failed: []porcupine.Operation{}, - }, - } -} - -func (h *AppendableHistory) AppendGet(key string, start, end time.Time, resp *clientv3.GetResponse) { - var readData string - if len(resp.Kvs) == 1 { - readData = string(resp.Kvs[0].Value) - } - var revision int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: getRequest(key), - Call: start.UnixNano(), - Output: getResponse(readData, revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendPut(key, value string, start, end time.Time, resp *clientv3.PutResponse, err error) { - request := putRequest(key, value) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: putResponse(revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendPutWithLease(key, value string, leaseID int64, start, end time.Time, resp *clientv3.PutResponse, err error) { - request := putWithLeaseRequest(key, value, leaseID) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: putResponse(revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendLeaseGrant(start, end time.Time, resp *clientv3.LeaseGrantResponse, err error) { - var leaseID int64 - if resp != nil { - leaseID = int64(resp.ID) - } - request := leaseGrantRequest(leaseID) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - if resp != nil && resp.ResponseHeader != nil { - revision = resp.ResponseHeader.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: leaseGrantResponse(revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendLeaseRevoke(id int64, start time.Time, end time.Time, resp *clientv3.LeaseRevokeResponse, err error) { - request := leaseRevokeRequest(id) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: leaseRevokeResponse(revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendDelete(key string, start, end time.Time, resp *clientv3.DeleteResponse, err error) { - request := deleteRequest(key) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - var deleted int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - deleted = resp.Deleted - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: deleteResponse(deleted, revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendTxn(key, expectValue, newValue string, start, end time.Time, resp *clientv3.TxnResponse, err error) { - request := txnRequest(key, expectValue, newValue) - if err != nil { - h.appendFailed(request, start, err) - return - } - var revision int64 - if resp != nil && resp.Header != nil { - revision = resp.Header.Revision - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: txnResponse(resp.Succeeded, revision), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) AppendDefragment(start, end time.Time, resp *clientv3.DefragmentResponse, err error) { - request := defragmentRequest() - if err != nil { - h.appendFailed(request, start, err) - return - } - h.successful = append(h.successful, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: defragmentResponse(), - Return: end.UnixNano(), - }) -} - -func (h *AppendableHistory) appendFailed(request EtcdRequest, start time.Time, err error) { - h.failed = append(h.failed, porcupine.Operation{ - ClientId: h.id, - Input: request, - Call: start.UnixNano(), - Output: failedResponse(err), - Return: 0, // For failed writes we don't know when request has really finished. - }) - // Operations of single client needs to be sequential. - // As we don't know return time of failed operations, all new writes need to be done with new client id. - h.id = h.idProvider.ClientId() -} - -func getRequest(key string) EtcdRequest { - return EtcdRequest{Type: Txn, Txn: &TxnRequest{Ops: []EtcdOperation{{Type: Get, Key: key}}}} -} - -func getResponse(value string, revision int64) EtcdResponse { - return EtcdResponse{Txn: &TxnResponse{OpsResult: []EtcdOperationResult{{Value: ToValueOrHash(value)}}}, Revision: revision} -} - -func failedResponse(err error) EtcdResponse { - return EtcdResponse{Err: err} -} - -func unknownResponse(revision int64) EtcdResponse { - return EtcdResponse{ResultUnknown: true, Revision: revision} -} - -func putRequest(key, value string) EtcdRequest { - return EtcdRequest{Type: Txn, Txn: &TxnRequest{Ops: []EtcdOperation{{Type: Put, Key: key, Value: ToValueOrHash(value)}}}} -} - -func putResponse(revision int64) EtcdResponse { - return EtcdResponse{Txn: &TxnResponse{OpsResult: []EtcdOperationResult{{}}}, Revision: revision} -} - -func deleteRequest(key string) EtcdRequest { - return EtcdRequest{Type: Txn, Txn: &TxnRequest{Ops: []EtcdOperation{{Type: Delete, Key: key}}}} -} - -func deleteResponse(deleted int64, revision int64) EtcdResponse { - return EtcdResponse{Txn: &TxnResponse{OpsResult: []EtcdOperationResult{{Deleted: deleted}}}, Revision: revision} -} - -func txnRequest(key, expectValue, newValue string) EtcdRequest { - return EtcdRequest{Type: Txn, Txn: &TxnRequest{Conds: []EtcdCondition{{Key: key, ExpectedValue: ToValueOrHash(expectValue)}}, Ops: []EtcdOperation{{Type: Put, Key: key, Value: ToValueOrHash(newValue)}}}} -} - -func txnResponse(succeeded bool, revision int64) EtcdResponse { - var result []EtcdOperationResult - if succeeded { - result = []EtcdOperationResult{{}} - } - return EtcdResponse{Txn: &TxnResponse{OpsResult: result, TxnResult: !succeeded}, Revision: revision} -} - -func putWithLeaseRequest(key, value string, leaseID int64) EtcdRequest { - return EtcdRequest{Type: Txn, Txn: &TxnRequest{Ops: []EtcdOperation{{Type: Put, Key: key, Value: ToValueOrHash(value), LeaseID: leaseID}}}} -} - -func leaseGrantRequest(leaseID int64) EtcdRequest { - return EtcdRequest{Type: LeaseGrant, LeaseGrant: &LeaseGrantRequest{LeaseID: leaseID}} -} - -func leaseGrantResponse(revision int64) EtcdResponse { - return EtcdResponse{LeaseGrant: &LeaseGrantReponse{}, Revision: revision} -} - -func leaseRevokeRequest(leaseID int64) EtcdRequest { - return EtcdRequest{Type: LeaseRevoke, LeaseRevoke: &LeaseRevokeRequest{LeaseID: leaseID}} -} - -func leaseRevokeResponse(revision int64) EtcdResponse { - return EtcdResponse{LeaseRevoke: &LeaseRevokeResponse{}, Revision: revision} -} - -func defragmentRequest() EtcdRequest { - return EtcdRequest{Type: Defragment, Defragment: &DefragmentRequest{}} -} - -func defragmentResponse() EtcdResponse { - return EtcdResponse{Defragment: &DefragmentResponse{}} -} - -type History struct { - successful []porcupine.Operation - // failed requests are kept separate as we don't know return time of failed operations. - // Based on https://github.com/anishathalye/porcupine/issues/10 - failed []porcupine.Operation -} - -func (h History) Merge(h2 History) History { - result := History{ - successful: make([]porcupine.Operation, 0, len(h.successful)+len(h2.successful)), - failed: make([]porcupine.Operation, 0, len(h.failed)+len(h2.failed)), - } - result.successful = append(result.successful, h.successful...) - result.successful = append(result.successful, h2.successful...) - result.failed = append(result.failed, h.failed...) - result.failed = append(result.failed, h2.failed...) - return result -} - -func (h History) Operations() []porcupine.Operation { - operations := make([]porcupine.Operation, 0, len(h.successful)+len(h.failed)) - var maxTime int64 - for _, op := range h.successful { - operations = append(operations, op) - if op.Return > maxTime { - maxTime = op.Return - } - } - for _, op := range h.failed { - if op.Call > maxTime { - maxTime = op.Call - } - } - // Failed requests don't have a known return time. - // Simulate Infinity by using last observed time. - for _, op := range h.failed { - op.Return = maxTime + time.Second.Nanoseconds() - operations = append(operations, op) - } - return operations -} diff --git a/tests/linearizability/model/model.go b/tests/linearizability/model/model.go deleted file mode 100644 index 43a8efe1d58..00000000000 --- a/tests/linearizability/model/model.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "hash/fnv" - "reflect" - "strings" - - "github.com/anishathalye/porcupine" -) - -type OperationType string - -const ( - Get OperationType = "get" - Put OperationType = "put" - Delete OperationType = "delete" -) - -var Etcd = porcupine.Model{ - Init: func() interface{} { - return "[]" // empty PossibleStates - }, - Step: func(st interface{}, in interface{}, out interface{}) (bool, interface{}) { - var states PossibleStates - err := json.Unmarshal([]byte(st.(string)), &states) - if err != nil { - panic(err) - } - ok, states := step(states, in.(EtcdRequest), out.(EtcdResponse)) - data, err := json.Marshal(states) - if err != nil { - panic(err) - } - return ok, string(data) - }, - DescribeOperation: func(in, out interface{}) string { - return describeEtcdRequestResponse(in.(EtcdRequest), out.(EtcdResponse)) - }, -} - -type RequestType string - -const ( - Txn RequestType = "txn" - LeaseGrant RequestType = "leaseGrant" - LeaseRevoke RequestType = "leaseRevoke" - Defragment RequestType = "defragment" -) - -type EtcdRequest struct { - Type RequestType - LeaseGrant *LeaseGrantRequest - LeaseRevoke *LeaseRevokeRequest - Txn *TxnRequest - Defragment *DefragmentRequest -} - -type TxnRequest struct { - Conds []EtcdCondition - Ops []EtcdOperation -} - -type EtcdCondition struct { - Key string - ExpectedValue ValueOrHash -} - -type EtcdOperation struct { - Type OperationType - Key string - Value ValueOrHash - LeaseID int64 -} - -type LeaseGrantRequest struct { - LeaseID int64 -} -type LeaseRevokeRequest struct { - LeaseID int64 -} -type DefragmentRequest struct{} - -type EtcdResponse struct { - Err error - Revision int64 - ResultUnknown bool - Txn *TxnResponse - LeaseGrant *LeaseGrantReponse - LeaseRevoke *LeaseRevokeResponse - Defragment *DefragmentResponse -} - -type TxnResponse struct { - TxnResult bool - OpsResult []EtcdOperationResult -} - -type LeaseGrantReponse struct { - LeaseID int64 -} -type LeaseRevokeResponse struct{} -type DefragmentResponse struct{} - -func Match(r1, r2 EtcdResponse) bool { - return ((r1.ResultUnknown || r2.ResultUnknown) && (r1.Revision == r2.Revision)) || reflect.DeepEqual(r1, r2) -} - -type EtcdOperationResult struct { - Value ValueOrHash - Deleted int64 -} - -var leased = struct{}{} - -type EtcdLease struct { - LeaseID int64 - Keys map[string]struct{} -} -type PossibleStates []EtcdState - -type EtcdState struct { - Revision int64 - KeyValues map[string]ValueOrHash - KeyLeases map[string]int64 - Leases map[int64]EtcdLease -} - -type ValueOrHash struct { - Value string - Hash uint32 -} - -func ToValueOrHash(value string) ValueOrHash { - v := ValueOrHash{} - if len(value) < 20 { - v.Value = value - } else { - h := fnv.New32a() - h.Write([]byte(value)) - v.Hash = h.Sum32() - } - return v -} - -func describeEtcdRequestResponse(request EtcdRequest, response EtcdResponse) string { - return fmt.Sprintf("%s -> %s", describeEtcdRequest(request), describeEtcdResponse(request, response)) -} - -func describeEtcdResponse(request EtcdRequest, response EtcdResponse) string { - if response.Err != nil { - return fmt.Sprintf("err: %q", response.Err) - } - if response.ResultUnknown { - return fmt.Sprintf("unknown, rev: %d", response.Revision) - } - if request.Type == Txn { - return fmt.Sprintf("%s, rev: %d", describeTxnResponse(request.Txn, response.Txn), response.Revision) - } - if response.Revision == 0 { - return "ok" - } - return fmt.Sprintf("ok, rev: %d", response.Revision) -} - -func describeEtcdRequest(request EtcdRequest) string { - switch request.Type { - case Txn: - describeOperations := describeEtcdOperations(request.Txn.Ops) - if len(request.Txn.Conds) != 0 { - return fmt.Sprintf("if(%s).then(%s)", describeEtcdConditions(request.Txn.Conds), describeOperations) - } - return describeOperations - case LeaseGrant: - return fmt.Sprintf("leaseGrant(%d)", request.LeaseGrant.LeaseID) - case LeaseRevoke: - return fmt.Sprintf("leaseRevoke(%d)", request.LeaseRevoke.LeaseID) - case Defragment: - return fmt.Sprintf("defragment()") - default: - return fmt.Sprintf("", request.Type) - } -} - -func describeEtcdConditions(conds []EtcdCondition) string { - opsDescription := make([]string, len(conds)) - for i := range conds { - opsDescription[i] = fmt.Sprintf("%s==%s", conds[i].Key, describeValueOrHash(conds[i].ExpectedValue)) - } - return strings.Join(opsDescription, " && ") -} - -func describeEtcdOperations(ops []EtcdOperation) string { - opsDescription := make([]string, len(ops)) - for i := range ops { - opsDescription[i] = describeEtcdOperation(ops[i]) - } - return strings.Join(opsDescription, ", ") -} - -func describeTxnResponse(request *TxnRequest, response *TxnResponse) string { - if response.TxnResult { - return fmt.Sprintf("txn failed") - } - respDescription := make([]string, len(response.OpsResult)) - for i := range response.OpsResult { - respDescription[i] = describeEtcdOperationResponse(request.Ops[i].Type, response.OpsResult[i]) - } - return strings.Join(respDescription, ", ") -} - -func describeEtcdOperation(op EtcdOperation) string { - switch op.Type { - case Get: - return fmt.Sprintf("get(%q)", op.Key) - case Put: - if op.LeaseID != 0 { - return fmt.Sprintf("put(%q, %s, %d)", op.Key, describeValueOrHash(op.Value), op.LeaseID) - } - return fmt.Sprintf("put(%q, %s, nil)", op.Key, describeValueOrHash(op.Value)) - case Delete: - return fmt.Sprintf("delete(%q)", op.Key) - default: - return fmt.Sprintf("", op.Type) - } -} - -func describeEtcdOperationResponse(op OperationType, resp EtcdOperationResult) string { - switch op { - case Get: - return describeValueOrHash(resp.Value) - case Put: - return fmt.Sprintf("ok") - case Delete: - return fmt.Sprintf("deleted: %d", resp.Deleted) - default: - return fmt.Sprintf("", op) - } -} - -func describeValueOrHash(value ValueOrHash) string { - if value.Hash != 0 { - return fmt.Sprintf("hash: %d", value.Hash) - } - if value.Value == "" { - return "nil" - } - return fmt.Sprintf("%q", value.Value) -} - -func step(states PossibleStates, request EtcdRequest, response EtcdResponse) (bool, PossibleStates) { - if len(states) == 0 { - // states were not initialized - if response.Err != nil || response.ResultUnknown || response.Revision == 0 { - return true, nil - } - return true, PossibleStates{initState(request, response)} - } - if response.Err != nil { - states = applyFailedRequest(states, request) - } else { - states = applyRequest(states, request, response) - } - return len(states) > 0, states -} - -// initState tries to create etcd state based on the first request. -func initState(request EtcdRequest, response EtcdResponse) EtcdState { - state := EtcdState{ - Revision: response.Revision, - KeyValues: map[string]ValueOrHash{}, - KeyLeases: map[string]int64{}, - Leases: map[int64]EtcdLease{}, - } - switch request.Type { - case Txn: - if response.Txn.TxnResult { - return state - } - for i, op := range request.Txn.Ops { - opResp := response.Txn.OpsResult[i] - switch op.Type { - case Get: - if opResp.Value.Value != "" && opResp.Value.Hash == 0 { - state.KeyValues[op.Key] = opResp.Value - } - case Put: - state.KeyValues[op.Key] = op.Value - case Delete: - default: - panic("Unknown operation") - } - } - case LeaseGrant: - lease := EtcdLease{ - LeaseID: request.LeaseGrant.LeaseID, - Keys: map[string]struct{}{}, - } - state.Leases[request.LeaseGrant.LeaseID] = lease - case LeaseRevoke: - case Defragment: - default: - panic(fmt.Sprintf("Unknown request type: %v", request.Type)) - } - return state -} - -// applyFailedRequest handles a failed requests, one that it's not known if it was persisted or not. -func applyFailedRequest(states PossibleStates, request EtcdRequest) PossibleStates { - for _, s := range states { - newState, _ := applyRequestToSingleState(s, request) - if !reflect.DeepEqual(newState, s) { - states = append(states, newState) - } - } - return states -} - -// applyRequest handles a successful request by applying it to possible states and checking if they match the response. -func applyRequest(states PossibleStates, request EtcdRequest, response EtcdResponse) PossibleStates { - newStates := make(PossibleStates, 0, len(states)) - for _, s := range states { - newState, expectResponse := applyRequestToSingleState(s, request) - if Match(expectResponse, response) { - newStates = append(newStates, newState) - } - } - return newStates -} - -// applyRequestToSingleState handles a successful request, returning updated state and response it would generate. -func applyRequestToSingleState(s EtcdState, request EtcdRequest) (EtcdState, EtcdResponse) { - newKVs := map[string]ValueOrHash{} - for k, v := range s.KeyValues { - newKVs[k] = v - } - s.KeyValues = newKVs - switch request.Type { - case Txn: - success := true - for _, cond := range request.Txn.Conds { - if val := s.KeyValues[cond.Key]; val != cond.ExpectedValue { - success = false - break - } - } - if !success { - return s, EtcdResponse{Revision: s.Revision, Txn: &TxnResponse{TxnResult: true}} - } - opResp := make([]EtcdOperationResult, len(request.Txn.Ops)) - increaseRevision := false - for i, op := range request.Txn.Ops { - switch op.Type { - case Get: - opResp[i].Value = s.KeyValues[op.Key] - case Put: - _, leaseExists := s.Leases[op.LeaseID] - if op.LeaseID != 0 && !leaseExists { - break - } - s.KeyValues[op.Key] = op.Value - increaseRevision = true - s = detachFromOldLease(s, op.Key) - if leaseExists { - s = attachToNewLease(s, op.LeaseID, op.Key) - } - case Delete: - if _, ok := s.KeyValues[op.Key]; ok { - delete(s.KeyValues, op.Key) - increaseRevision = true - s = detachFromOldLease(s, op.Key) - opResp[i].Deleted = 1 - } - default: - panic("unsupported operation") - } - } - if increaseRevision { - s.Revision += 1 - } - return s, EtcdResponse{Txn: &TxnResponse{OpsResult: opResp}, Revision: s.Revision} - case LeaseGrant: - lease := EtcdLease{ - LeaseID: request.LeaseGrant.LeaseID, - Keys: map[string]struct{}{}, - } - s.Leases[request.LeaseGrant.LeaseID] = lease - return s, EtcdResponse{Revision: s.Revision, LeaseGrant: &LeaseGrantReponse{}} - case LeaseRevoke: - //Delete the keys attached to the lease - keyDeleted := false - for key, _ := range s.Leases[request.LeaseRevoke.LeaseID].Keys { - //same as delete. - if _, ok := s.KeyValues[key]; ok { - if !keyDeleted { - keyDeleted = true - } - delete(s.KeyValues, key) - delete(s.KeyLeases, key) - } - } - //delete the lease - delete(s.Leases, request.LeaseRevoke.LeaseID) - if keyDeleted { - s.Revision += 1 - } - return s, EtcdResponse{Revision: s.Revision, LeaseRevoke: &LeaseRevokeResponse{}} - case Defragment: - return s, defragmentResponse() - default: - panic(fmt.Sprintf("Unknown request type: %v", request.Type)) - } -} - -func detachFromOldLease(s EtcdState, key string) EtcdState { - if oldLeaseId, ok := s.KeyLeases[key]; ok { - delete(s.Leases[oldLeaseId].Keys, key) - delete(s.KeyLeases, key) - } - return s -} - -func attachToNewLease(s EtcdState, leaseID int64, key string) EtcdState { - s.KeyLeases[key] = leaseID - s.Leases[leaseID].Keys[key] = leased - return s -} diff --git a/tests/linearizability/model/model_test.go b/tests/linearizability/model/model_test.go deleted file mode 100644 index d6800263c3b..00000000000 --- a/tests/linearizability/model/model_test.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestModelStep(t *testing.T) { - tcs := []struct { - name string - operations []testOperation - }{ - { - name: "First Get can start from non-empty value and non-zero revision", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("", 42)}, - }, - }, - { - name: "First Put can start from non-zero revision", - operations: []testOperation{ - {req: putRequest("key", "1"), resp: putResponse(42)}, - }, - }, - { - name: "First delete can start from non-zero revision", - operations: []testOperation{ - {req: deleteRequest("key"), resp: deleteResponse(0, 42)}, - }, - }, - { - name: "First Txn can start from non-zero revision", - operations: []testOperation{ - {req: txnRequest("key", "", "42"), resp: txnResponse(false, 42)}, - }, - }, - { - name: "Get response data should match put", - operations: []testOperation{ - {req: putRequest("key1", "11"), resp: putResponse(1)}, - {req: putRequest("key2", "12"), resp: putResponse(2)}, - {req: getRequest("key1"), resp: getResponse("11", 1), failure: true}, - {req: getRequest("key1"), resp: getResponse("12", 1), failure: true}, - {req: getRequest("key1"), resp: getResponse("12", 2), failure: true}, - {req: getRequest("key1"), resp: getResponse("11", 2)}, - {req: getRequest("key2"), resp: getResponse("11", 2), failure: true}, - {req: getRequest("key2"), resp: getResponse("12", 1), failure: true}, - {req: getRequest("key2"), resp: getResponse("11", 1), failure: true}, - {req: getRequest("key2"), resp: getResponse("12", 2)}, - }, - }, - { - name: "Get response data should match large put", - operations: []testOperation{ - {req: putRequest("key", "012345678901234567890"), resp: putResponse(1)}, - {req: getRequest("key"), resp: getResponse("123456789012345678901", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("012345678901234567890", 1)}, - {req: putRequest("key", "123456789012345678901"), resp: putResponse(2)}, - {req: getRequest("key"), resp: getResponse("123456789012345678901", 2)}, - {req: getRequest("key"), resp: getResponse("012345678901234567890", 2), failure: true}, - }, - }, - { - name: "Put must increase revision by 1", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("", 1)}, - {req: putRequest("key", "1"), resp: putResponse(1), failure: true}, - {req: putRequest("key", "1"), resp: putResponse(3), failure: true}, - {req: putRequest("key", "1"), resp: putResponse(2)}, - }, - }, - { - name: "Put can fail and be lost before get", - operations: []testOperation{ - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: getRequest("key"), resp: getResponse("2", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("1", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2), failure: true}, - }, - }, - { - name: "Put can fail and be lost before put", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("", 1)}, - {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "3"), resp: getResponse("", 2)}, - }, - }, - { - name: "Put can fail and be lost before delete", - operations: []testOperation{ - {req: deleteRequest("key"), resp: deleteResponse(0, 1)}, - {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(0, 1)}, - }, - }, - { - name: "Put can fail and be lost before txn", - operations: []testOperation{ - // Txn failure - {req: getRequest("key"), resp: getResponse("", 1)}, - {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(false, 1)}, - // Txn success - {req: putRequest("key", "2"), resp: putResponse(2)}, - {req: putRequest("key", "4"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "2", "5"), resp: txnResponse(true, 3)}, - }, - }, - { - name: "Put can fail and be lost before txn success", - operations: []testOperation{}, - }, - { - name: "Put can fail but be persisted and increase revision before get", - operations: []testOperation{ - // One failed request, one persisted. - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: putRequest("key", "2"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("3", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2)}, - // Two failed request, two persisted. - {req: putRequest("key", "3"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "4"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("4", 4)}, - }, - }, - { - name: "Put can fail but be persisted and increase revision before delete", - operations: []testOperation{ - // One failed request, one persisted. - {req: deleteRequest("key"), resp: deleteResponse(0, 1)}, - {req: putRequest("key", "1"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 1), failure: true}, - {req: deleteRequest("key"), resp: deleteResponse(1, 2), failure: true}, - {req: deleteRequest("key"), resp: deleteResponse(1, 3)}, - // Two failed request, two persisted. - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: putRequest("key", "5"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "6"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 7)}, - // Two failed request, one persisted. - {req: putRequest("key", "8"), resp: putResponse(8)}, - {req: putRequest("key", "9"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "10"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 10)}, - }, - }, - { - name: "Put can fail but be persisted before txn", - operations: []testOperation{ - // Txn success - {req: getRequest("key"), resp: getResponse("", 1)}, - {req: putRequest("key", "2"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "2", ""), resp: txnResponse(true, 2), failure: true}, - {req: txnRequest("key", "2", ""), resp: txnResponse(true, 3)}, - // Txn failure - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: txnRequest("key", "5", ""), resp: txnResponse(false, 4)}, - {req: putRequest("key", "5"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("5", 5)}, - }, - }, - { - name: "Delete only increases revision on success", - operations: []testOperation{ - {req: putRequest("key1", "11"), resp: putResponse(1)}, - {req: putRequest("key2", "12"), resp: putResponse(2)}, - {req: deleteRequest("key1"), resp: deleteResponse(1, 2), failure: true}, - {req: deleteRequest("key1"), resp: deleteResponse(1, 3)}, - {req: deleteRequest("key1"), resp: deleteResponse(0, 4), failure: true}, - {req: deleteRequest("key1"), resp: deleteResponse(0, 3)}, - }, - }, - { - name: "Delete not existing key", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("", 1)}, - {req: deleteRequest("key"), resp: deleteResponse(1, 2), failure: true}, - {req: deleteRequest("key"), resp: deleteResponse(0, 1)}, - }, - }, - { - name: "Delete clears value", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: deleteRequest("key"), resp: deleteResponse(1, 2)}, - {req: getRequest("key"), resp: getResponse("1", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("1", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("", 2)}, - }, - }, - { - name: "Delete can fail and be lost before get", - operations: []testOperation{ - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: getRequest("key"), resp: getResponse("", 2), failure: true}, - }, - }, - { - name: "Delete can fail and be lost before delete", - operations: []testOperation{ - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 1), failure: true}, - {req: deleteRequest("key"), resp: deleteResponse(1, 2)}, - }, - }, - { - name: "Delete can fail and be lost before put", - operations: []testOperation{ - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "1"), resp: putResponse(2)}, - }, - }, - { - name: "Delete can fail but be persisted before get", - operations: []testOperation{ - // One failed request, one persisted. - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("", 2)}, - // Two failed request, one persisted. - {req: putRequest("key", "3"), resp: putResponse(3)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("", 4)}, - }, - }, - { - name: "Delete can fail but be persisted before put", - operations: []testOperation{ - // One failed request, one persisted. - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "3"), resp: putResponse(3)}, - // Two failed request, one persisted. - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "5"), resp: putResponse(5)}, - }, - }, - { - name: "Delete can fail but be persisted before delete", - operations: []testOperation{ - // One failed request, one persisted. - {req: putRequest("key", "1"), resp: putResponse(1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(0, 2)}, - {req: putRequest("key", "3"), resp: putResponse(3)}, - // Two failed request, one persisted. - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(0, 4)}, - }, - }, - { - name: "Delete can fail but be persisted before txn", - operations: []testOperation{ - // Txn success - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "", "3"), resp: txnResponse(true, 3)}, - // Txn failure - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: deleteRequest("key"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "4", "5"), resp: txnResponse(false, 5)}, - }, - }, - { - name: "Txn sets new value if value matches expected", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: txnResponse(true, 1), failure: true}, - {req: txnRequest("key", "1", "2"), resp: txnResponse(false, 2), failure: true}, - {req: txnRequest("key", "1", "2"), resp: txnResponse(false, 1), failure: true}, - {req: txnRequest("key", "1", "2"), resp: txnResponse(true, 2)}, - {req: getRequest("key"), resp: getResponse("1", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("1", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2)}, - }, - }, - { - name: "Txn can expect on empty key", - operations: []testOperation{ - {req: getRequest("key1"), resp: getResponse("", 1)}, - {req: txnRequest("key1", "", "2"), resp: txnResponse(true, 2)}, - {req: txnRequest("key2", "", "3"), resp: txnResponse(true, 3)}, - {req: txnRequest("key3", "4", "4"), resp: txnResponse(false, 4), failure: true}, - }, - }, - { - name: "Txn doesn't do anything if value doesn't match expected", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(true, 2), failure: true}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(true, 1), failure: true}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(false, 2), failure: true}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(false, 1)}, - {req: getRequest("key"), resp: getResponse("2", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("3", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("3", 2), failure: true}, - {req: getRequest("key"), resp: getResponse("1", 1)}, - }, - }, - { - name: "Txn can fail and be lost before get", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: getRequest("key"), resp: getResponse("2", 2), failure: true}, - }, - }, - { - name: "Txn can fail and be lost before delete", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 2)}, - }, - }, - { - name: "Txn can fail and be lost before put", - operations: []testOperation{ - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "3"), resp: putResponse(2)}, - }, - }, - { - name: "Txn can fail but be persisted before get", - operations: []testOperation{ - // One failed request, one persisted. - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("2", 1), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2)}, - // Two failed request, two persisted. - {req: putRequest("key", "3"), resp: putResponse(3)}, - {req: txnRequest("key", "3", "4"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "4", "5"), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("5", 5)}, - }, - }, - { - name: "Txn can fail but be persisted before put", - operations: []testOperation{ - // One failed request, one persisted. - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "3"), resp: putResponse(3)}, - // Two failed request, two persisted. - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: txnRequest("key", "4", "5"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "5", "6"), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "7"), resp: putResponse(7)}, - }, - }, - { - name: "Txn can fail but be persisted before delete", - operations: []testOperation{ - // One failed request, one persisted. - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 3)}, - // Two failed request, two persisted. - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: txnRequest("key", "4", "5"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "5", "6"), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 7)}, - }, - }, - { - name: "Txn can fail but be persisted before txn", - operations: []testOperation{ - // One failed request, one persisted with success. - {req: getRequest("key"), resp: getResponse("1", 1)}, - {req: txnRequest("key", "1", "2"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "2", "3"), resp: txnResponse(true, 3)}, - // Two failed request, two persisted with success. - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: txnRequest("key", "4", "5"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "5", "6"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "6", "7"), resp: txnResponse(true, 7)}, - // One failed request, one persisted with failure. - {req: putRequest("key", "8"), resp: putResponse(8)}, - {req: txnRequest("key", "8", "9"), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "8", "10"), resp: txnResponse(false, 9)}, - }, - }, - { - name: "Put with valid lease id should succeed. Put with invalid lease id should fail", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key", "3", 2), resp: putResponse(3), failure: true}, - {req: getRequest("key"), resp: getResponse("2", 2)}, - }, - }, - { - name: "Put with valid lease id should succeed. Put with expired lease id should fail", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: getRequest("key"), resp: getResponse("2", 2)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: putWithLeaseRequest("key", "4", 1), resp: putResponse(4), failure: true}, - {req: getRequest("key"), resp: getResponse("", 3)}, - }, - }, - { - name: "Revoke should increment the revision", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: getRequest("key"), resp: getResponse("", 3)}, - }, - }, - { - name: "Put following a PutWithLease will detach the key from the lease", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: putRequest("key", "3"), resp: putResponse(3)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: getRequest("key"), resp: getResponse("3", 3)}, - }, - }, - { - name: "Change lease. Revoking older lease should not increment revision", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: leaseGrantRequest(2), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key", "3", 2), resp: putResponse(3)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: getRequest("key"), resp: getResponse("3", 3)}, - {req: leaseRevokeRequest(2), resp: leaseRevokeResponse(4)}, - {req: getRequest("key"), resp: getResponse("", 4)}, - }, - }, - { - name: "Update key with same lease", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key", "3", 1), resp: putResponse(3)}, - {req: getRequest("key"), resp: getResponse("3", 3)}, - }, - }, - { - name: "Deleting a leased key - revoke should not increment revision", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "2", 1), resp: putResponse(2)}, - {req: deleteRequest("key"), resp: deleteResponse(1, 3)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(4), failure: true}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - }, - }, - { - name: "Lease a few keys - revoke should increment revision only once", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)}, - {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)}, - {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(6)}, - }, - }, - { - name: "Lease some keys then delete some of them. Revoke should increment revision since some keys were still leased", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)}, - {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)}, - {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)}, - {req: deleteRequest("key1"), resp: deleteResponse(1, 6)}, - {req: deleteRequest("key3"), resp: deleteResponse(1, 7)}, - {req: deleteRequest("key4"), resp: deleteResponse(1, 8)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(9)}, - {req: deleteRequest("key2"), resp: deleteResponse(0, 9)}, - {req: getRequest("key1"), resp: getResponse("", 9)}, - {req: getRequest("key2"), resp: getResponse("", 9)}, - {req: getRequest("key3"), resp: getResponse("", 9)}, - {req: getRequest("key4"), resp: getResponse("", 9)}, - }, - }, - { - name: "Lease some keys then delete all of them. Revoke should not increment", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key1", "1", 1), resp: putResponse(2)}, - {req: putWithLeaseRequest("key2", "2", 1), resp: putResponse(3)}, - {req: putWithLeaseRequest("key3", "3", 1), resp: putResponse(4)}, - {req: putWithLeaseRequest("key4", "4", 1), resp: putResponse(5)}, - {req: deleteRequest("key1"), resp: deleteResponse(1, 6)}, - {req: deleteRequest("key2"), resp: deleteResponse(1, 7)}, - {req: deleteRequest("key3"), resp: deleteResponse(1, 8)}, - {req: deleteRequest("key4"), resp: deleteResponse(1, 9)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(9)}, - }, - }, - { - name: "All request types", - operations: []testOperation{ - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: getRequest("key"), resp: getResponse("4", 4)}, - {req: txnRequest("key", "4", "5"), resp: txnResponse(true, 5)}, - {req: deleteRequest("key"), resp: deleteResponse(1, 6)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - }, - }, - { - name: "Defragment success between all other request types", - operations: []testOperation{ - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: getRequest("key"), resp: getResponse("4", 4)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: txnRequest("key", "4", "5"), resp: txnResponse(true, 5)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - {req: deleteRequest("key"), resp: deleteResponse(1, 6)}, - {req: defragmentRequest(), resp: defragmentResponse()}, - }, - }, - { - name: "Defragment failures between all other request types", - operations: []testOperation{ - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: leaseGrantRequest(1), resp: leaseGrantResponse(1)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: putWithLeaseRequest("key", "1", 1), resp: putResponse(2)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: leaseRevokeRequest(1), resp: leaseRevokeResponse(3)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: putRequest("key", "4"), resp: putResponse(4)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: getRequest("key"), resp: getResponse("4", 4)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: txnRequest("key", "4", "5"), resp: txnResponse(true, 5)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - {req: deleteRequest("key"), resp: deleteResponse(1, 6)}, - {req: defragmentRequest(), resp: failedResponse(errors.New("failed"))}, - }, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - state := Etcd.Init() - for _, op := range tc.operations { - ok, newState := Etcd.Step(state, op.req, op.resp) - if ok != !op.failure { - t.Logf("state: %v", state) - t.Errorf("Unexpected operation result, expect: %v, got: %v, operation: %s", !op.failure, ok, Etcd.DescribeOperation(op.req, op.resp)) - } - if ok { - state = newState - t.Logf("state: %v", state) - } - } - }) - } -} - -type testOperation struct { - req EtcdRequest - resp EtcdResponse - failure bool -} - -func TestModelDescribe(t *testing.T) { - tcs := []struct { - req EtcdRequest - resp EtcdResponse - expectDescribe string - }{ - { - req: getRequest("key1"), - resp: getResponse("", 1), - expectDescribe: `get("key1") -> nil, rev: 1`, - }, - { - req: getRequest("key2"), - resp: getResponse("2", 2), - expectDescribe: `get("key2") -> "2", rev: 2`, - }, - { - req: getRequest("key2b"), - resp: getResponse("01234567890123456789", 2), - expectDescribe: `get("key2b") -> hash: 2945867837, rev: 2`, - }, - { - req: putRequest("key3", "3"), - resp: putResponse(3), - expectDescribe: `put("key3", "3", nil) -> ok, rev: 3`, - }, - { - req: putWithLeaseRequest("key3b", "3b", 3), - resp: putResponse(3), - expectDescribe: `put("key3b", "3b", 3) -> ok, rev: 3`, - }, - { - req: putRequest("key3c", "01234567890123456789"), - resp: putResponse(3), - expectDescribe: `put("key3c", hash: 2945867837, nil) -> ok, rev: 3`, - }, - { - req: putRequest("key4", "4"), - resp: failedResponse(errors.New("failed")), - expectDescribe: `put("key4", "4", nil) -> err: "failed"`, - }, - { - req: putRequest("key4b", "4b"), - resp: unknownResponse(42), - expectDescribe: `put("key4b", "4b", nil) -> unknown, rev: 42`, - }, - { - req: deleteRequest("key5"), - resp: deleteResponse(1, 5), - expectDescribe: `delete("key5") -> deleted: 1, rev: 5`, - }, - { - req: deleteRequest("key6"), - resp: failedResponse(errors.New("failed")), - expectDescribe: `delete("key6") -> err: "failed"`, - }, - { - req: txnRequest("key7", "7", "77"), - resp: txnResponse(false, 7), - expectDescribe: `if(key7=="7").then(put("key7", "77", nil)) -> txn failed, rev: 7`, - }, - { - req: txnRequest("key8", "8", "88"), - resp: txnResponse(true, 8), - expectDescribe: `if(key8=="8").then(put("key8", "88", nil)) -> ok, rev: 8`, - }, - { - req: txnRequest("key9", "9", "99"), - resp: failedResponse(errors.New("failed")), - expectDescribe: `if(key9=="9").then(put("key9", "99", nil)) -> err: "failed"`, - }, - { - req: defragmentRequest(), - resp: defragmentResponse(), - expectDescribe: `defragment() -> ok`, - }, - } - for _, tc := range tcs { - assert.Equal(t, tc.expectDescribe, Etcd.DescribeOperation(tc.req, tc.resp)) - } -} - -func TestModelResponseMatch(t *testing.T) { - tcs := []struct { - resp1 EtcdResponse - resp2 EtcdResponse - expectMatch bool - }{ - { - resp1: getResponse("a", 1), - resp2: getResponse("a", 1), - expectMatch: true, - }, - { - resp1: getResponse("a", 1), - resp2: getResponse("b", 1), - expectMatch: false, - }, - { - resp1: getResponse("a", 1), - resp2: getResponse("a", 2), - expectMatch: false, - }, - { - resp1: getResponse("a", 1), - resp2: failedResponse(errors.New("failed request")), - expectMatch: false, - }, - { - resp1: getResponse("a", 1), - resp2: unknownResponse(1), - expectMatch: true, - }, - { - resp1: getResponse("a", 1), - resp2: unknownResponse(0), - expectMatch: false, - }, - { - resp1: putResponse(3), - resp2: putResponse(3), - expectMatch: true, - }, - { - resp1: putResponse(3), - resp2: putResponse(4), - expectMatch: false, - }, - { - resp1: putResponse(3), - resp2: failedResponse(errors.New("failed request")), - expectMatch: false, - }, - { - resp1: putResponse(3), - resp2: unknownResponse(3), - expectMatch: true, - }, - { - resp1: putResponse(3), - resp2: unknownResponse(0), - expectMatch: false, - }, - { - resp1: deleteResponse(1, 5), - resp2: deleteResponse(1, 5), - expectMatch: true, - }, - { - resp1: deleteResponse(1, 5), - resp2: deleteResponse(0, 5), - expectMatch: false, - }, - { - resp1: deleteResponse(1, 5), - resp2: deleteResponse(1, 6), - expectMatch: false, - }, - { - resp1: deleteResponse(1, 5), - resp2: failedResponse(errors.New("failed request")), - expectMatch: false, - }, - { - resp1: deleteResponse(1, 5), - resp2: unknownResponse(5), - expectMatch: true, - }, - { - resp1: deleteResponse(0, 5), - resp2: unknownResponse(0), - expectMatch: false, - }, - { - resp1: deleteResponse(1, 5), - resp2: unknownResponse(0), - expectMatch: false, - }, - { - resp1: deleteResponse(0, 5), - resp2: unknownResponse(2), - expectMatch: false, - }, - { - resp1: txnResponse(false, 7), - resp2: txnResponse(false, 7), - expectMatch: true, - }, - { - resp1: txnResponse(true, 7), - resp2: txnResponse(false, 7), - expectMatch: false, - }, - { - resp1: txnResponse(false, 7), - resp2: txnResponse(false, 8), - expectMatch: false, - }, - { - resp1: txnResponse(false, 7), - resp2: failedResponse(errors.New("failed request")), - expectMatch: false, - }, - { - resp1: txnResponse(true, 7), - resp2: unknownResponse(7), - expectMatch: true, - }, - { - resp1: txnResponse(false, 7), - resp2: unknownResponse(7), - expectMatch: true, - }, - { - resp1: txnResponse(true, 7), - resp2: unknownResponse(0), - expectMatch: false, - }, - { - resp1: txnResponse(false, 7), - resp2: unknownResponse(0), - expectMatch: false, - }, - } - for i, tc := range tcs { - assert.Equal(t, tc.expectMatch, Match(tc.resp1, tc.resp2), "%d %+v %+v", i, tc.resp1, tc.resp2) - } -} diff --git a/tests/linearizability/traffic.go b/tests/linearizability/traffic.go deleted file mode 100644 index ed41305c84d..00000000000 --- a/tests/linearizability/traffic.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "context" - "fmt" - "math/rand" - "strings" - "time" - - "golang.org/x/time/rate" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/tests/v3/linearizability/identity" -) - -var ( - DefaultLeaseTTL int64 = 7200 - RequestTimeout = 40 * time.Millisecond -) - -type TrafficRequestType string - -const ( - Get TrafficRequestType = "get" - Put TrafficRequestType = "put" - LargePut TrafficRequestType = "largePut" - Delete TrafficRequestType = "delete" - PutWithLease TrafficRequestType = "putWithLease" - LeaseRevoke TrafficRequestType = "leaseRevoke" - CompareAndSet TrafficRequestType = "compareAndSet" - Defragment TrafficRequestType = "defragment" -) - -type Traffic interface { - Run(ctx context.Context, clientId int, c *recordingClient, limiter *rate.Limiter, ids identity.Provider, lm identity.LeaseIdStorage) -} - -type traffic struct { - keyCount int - writes []requestChance - leaseTTL int64 - largePutSize int -} - -type requestChance struct { - operation TrafficRequestType - chance int -} - -func (t traffic) Run(ctx context.Context, clientId int, c *recordingClient, limiter *rate.Limiter, ids identity.Provider, lm identity.LeaseIdStorage) { - - for { - select { - case <-ctx.Done(): - return - default: - } - key := fmt.Sprintf("%d", rand.Int()%t.keyCount) - // Execute one read per one write to avoid operation history include too many failed writes when etcd is down. - resp, err := t.Read(ctx, c, limiter, key) - if err != nil { - continue - } - // Provide each write with unique id to make it easier to validate operation history. - t.Write(ctx, c, limiter, key, fmt.Sprintf("%d", ids.RequestId()), lm, clientId, resp) - } -} - -func (t traffic) Read(ctx context.Context, c *recordingClient, limiter *rate.Limiter, key string) ([]*mvccpb.KeyValue, error) { - getCtx, cancel := context.WithTimeout(ctx, RequestTimeout) - resp, err := c.Get(getCtx, key) - cancel() - if err == nil { - limiter.Wait(ctx) - } - return resp, err -} - -func (t traffic) Write(ctx context.Context, c *recordingClient, limiter *rate.Limiter, key string, newValue string, lm identity.LeaseIdStorage, cid int, lastValues []*mvccpb.KeyValue) error { - writeCtx, cancel := context.WithTimeout(ctx, RequestTimeout) - - var err error - switch t.pickWriteRequest() { - case Put: - err = c.Put(writeCtx, key, newValue) - case LargePut: - err = c.Put(writeCtx, key, randString(t.largePutSize)) - case Delete: - err = c.Delete(writeCtx, key) - case CompareAndSet: - var expectValue string - if len(lastValues) != 0 { - expectValue = string(lastValues[0].Value) - } - err = c.CompareAndSet(writeCtx, key, expectValue, newValue) - case PutWithLease: - leaseId := lm.LeaseId(cid) - if leaseId == 0 { - leaseId, err = c.LeaseGrant(writeCtx, t.leaseTTL) - if err == nil { - lm.AddLeaseId(cid, leaseId) - limiter.Wait(ctx) - } - } - if leaseId != 0 { - putCtx, putCancel := context.WithTimeout(ctx, RequestTimeout) - err = c.PutWithLease(putCtx, key, newValue, leaseId) - putCancel() - } - case LeaseRevoke: - leaseId := lm.LeaseId(cid) - if leaseId != 0 { - err = c.LeaseRevoke(writeCtx, leaseId) - //if LeaseRevoke has failed, do not remove the mapping. - if err == nil { - lm.RemoveLeaseId(cid) - } - } - case Defragment: - err = c.Defragment(writeCtx) - default: - panic("invalid operation") - } - cancel() - if err == nil { - limiter.Wait(ctx) - } - return err -} - -func (t traffic) pickWriteRequest() TrafficRequestType { - sum := 0 - for _, op := range t.writes { - sum += op.chance - } - roll := rand.Int() % sum - for _, op := range t.writes { - if roll < op.chance { - return op.operation - } - roll -= op.chance - } - panic("unexpected") -} - -func randString(size int) string { - data := strings.Builder{} - data.Grow(size) - for i := 0; i < size; i++ { - data.WriteByte(byte(int('a') + rand.Intn(26))) - } - return data.String() -} diff --git a/tests/linearizability/watch.go b/tests/linearizability/watch.go deleted file mode 100644 index 2aaf59b1886..00000000000 --- a/tests/linearizability/watch.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package linearizability - -import ( - "context" - "sync" - "testing" - "time" - - "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/mvccpb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/tests/v3/framework/e2e" - "go.etcd.io/etcd/tests/v3/linearizability/model" -) - -func collectClusterWatchEvents(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster) [][]watchResponse { - mux := sync.Mutex{} - var wg sync.WaitGroup - memberResponses := make([][]watchResponse, len(clus.Procs)) - for i, member := range clus.Procs { - c, err := clientv3.New(clientv3.Config{ - Endpoints: member.EndpointsV3(), - Logger: zap.NewNop(), - DialKeepAliveTime: 1 * time.Millisecond, - DialKeepAliveTimeout: 5 * time.Millisecond, - }) - if err != nil { - t.Fatal(err) - } - - wg.Add(1) - go func(i int, c *clientv3.Client) { - defer wg.Done() - defer c.Close() - responses := watchMember(ctx, lg, c) - mux.Lock() - memberResponses[i] = responses - mux.Unlock() - }(i, c) - } - wg.Wait() - return memberResponses -} -func watchMember(ctx context.Context, lg *zap.Logger, c *clientv3.Client) (resps []watchResponse) { - var lastRevision int64 = 0 - for { - select { - case <-ctx.Done(): - return resps - default: - } - for resp := range c.Watch(ctx, "", clientv3.WithPrefix(), clientv3.WithRev(lastRevision+1)) { - resps = append(resps, watchResponse{resp, time.Now()}) - lastRevision = resp.Header.Revision - if resp.Err() != nil { - lg.Info("Watch error", zap.Error(resp.Err())) - } - } - } -} - -func validateWatchResponses(t *testing.T, responses [][]watchResponse) { - for _, memberResponses := range responses { - validateMemberWatchResponses(t, memberResponses) - } -} - -func validateMemberWatchResponses(t *testing.T, responses []watchResponse) { - var lastRevision int64 = 1 - for _, resp := range responses { - if resp.Header.Revision < lastRevision { - t.Errorf("Revision should never decrease") - } - if resp.Header.Revision == lastRevision && len(resp.Events) != 0 { - t.Errorf("Got two non-empty responses about same revision") - } - for _, event := range resp.Events { - if event.Kv.ModRevision != lastRevision+1 { - t.Errorf("Expect revision to grow by 1, last: %d, mod: %d", lastRevision, event.Kv.ModRevision) - } - lastRevision = event.Kv.ModRevision - } - if resp.Header.Revision != lastRevision { - t.Errorf("Expect response revision equal last event mod revision") - } - lastRevision = resp.Header.Revision - } -} - -func toWatchEvents(responses []watchResponse) (events []watchEvent) { - for _, resp := range responses { - for _, event := range resp.Events { - var op model.OperationType - switch event.Type { - case mvccpb.PUT: - op = model.Put - case mvccpb.DELETE: - op = model.Delete - } - events = append(events, watchEvent{ - Time: resp.time, - Revision: event.Kv.ModRevision, - Op: model.EtcdOperation{ - Type: op, - Key: string(event.Kv.Key), - Value: model.ToValueOrHash(string(event.Kv.Value)), - }, - }) - } - } - return events -} - -type watchResponse struct { - clientv3.WatchResponse - time time.Time -} - -type watchEvent struct { - Op model.EtcdOperation - Revision int64 - Time time.Time -} diff --git a/tests/manual/Makefile b/tests/manual/Makefile deleted file mode 100644 index 819f17c70f9..00000000000 --- a/tests/manual/Makefile +++ /dev/null @@ -1,283 +0,0 @@ -TMP_DOCKERFILE:=$(shell mktemp) -GO_VERSION ?= 1.19.5 -TMP_DIR_MOUNT_FLAG = --tmpfs=/tmp:exec -ifdef HOST_TMP_DIR - TMP_DIR_MOUNT_FLAG = --mount type=bind,source=$(HOST_TMP_DIR),destination=/tmp -endif - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-static-ip-test -# -# gcloud auth configure-docker -# make push-docker-static-ip-test -# -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-static-ip-test -# -# make docker-static-ip-test-certs-run -# make docker-static-ip-test-certs-metrics-proxy-run - -build-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./docker-static-ip/Dockerfile > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - --file ./docker-static-ip/Dockerfile \ - $(TMP_DOCKERFILE) - -push-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) - -pull-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) - -docker-static-ip-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-static-ip/certs,destination=/certs \ - gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-static-ip-test-certs-metrics-proxy-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-static-ip/certs-metrics-proxy,destination=/certs-metrics-proxy \ - gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-metrics-proxy/run.sh && rm -rf m*.etcd" - - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-dns-test -# -# gcloud auth configure-docker -# make push-docker-dns-test -# -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-dns-test -# -# make docker-dns-test-insecure-run -# make docker-dns-test-certs-run -# make docker-dns-test-certs-gateway-run -# make docker-dns-test-certs-wildcard-run -# make docker-dns-test-certs-common-name-auth-run -# make docker-dns-test-certs-common-name-multi-run -# make docker-dns-test-certs-san-dns-run - -build-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./docker-dns/Dockerfile > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - --file ./docker-dns/Dockerfile \ - $(TMP_DOCKERFILE) - - docker run \ - --rm \ - --dns 127.0.0.1 \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local" - -push-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) - -pull-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) - -docker-dns-test-insecure-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/insecure,destination=/insecure \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /insecure/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs,destination=/certs \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-gateway-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-gateway,destination=/certs-gateway \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-wildcard-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-wildcard,destination=/certs-wildcard \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-common-name-auth-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-auth,destination=/certs-common-name-auth \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-common-name-auth/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-common-name-multi-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-multi,destination=/certs-common-name-multi \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-common-name-multi/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-san-dns-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-san-dns,destination=/certs-san-dns \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-san-dns/run.sh && rm -rf m*.etcd" - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-dns-srv-test -# gcloud auth configure-docker -# make push-docker-dns-srv-test -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-dns-srv-test -# make docker-dns-srv-test-certs-run -# make docker-dns-srv-test-certs-gateway-run -# make docker-dns-srv-test-certs-wildcard-run - -build-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - --file ./docker-dns-srv/Dockerfile \ - $(TMP_DOCKERFILE) - - docker run \ - --rm \ - --dns 127.0.0.1 \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local" - -push-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) - -pull-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) - -docker-dns-srv-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs,destination=/certs \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-dns-srv-test-certs-gateway-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-gateway,destination=/certs-gateway \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" - -docker-dns-srv-test-certs-wildcard-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-wildcard,destination=/certs-wildcard \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" diff --git a/tests/manual/docker-dns-srv/Dockerfile b/tests/manual/docker-dns-srv/Dockerfile deleted file mode 100644 index dbc3f4bdc69..00000000000 --- a/tests/manual/docker-dns-srv/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -FROM ubuntu:18.04 - -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN apt-get -y update \ - && apt-get -y install \ - build-essential \ - gcc \ - apt-utils \ - pkg-config \ - software-properties-common \ - apt-transport-https \ - libssl-dev \ - sudo \ - bash \ - curl \ - tar \ - git \ - netcat \ - bind9 \ - dnsutils \ - && apt-get -y update \ - && apt-get -y upgrade \ - && apt-get -y autoremove \ - && apt-get -y autoclean - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} -ENV GO_VERSION REPLACE_ME_GO_VERSION -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang -RUN rm -rf ${GOROOT} \ - && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ - && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ - && go version \ - && go get -v -u github.com/mattn/goreman - -RUN mkdir -p /var/bind /etc/bind -RUN chown root:bind /var/bind /etc/bind - -ADD named.conf etcd.zone rdns.zone /etc/bind/ -RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone -ADD resolv.conf /etc/resolv.conf diff --git a/tests/manual/docker-dns-srv/certs-gateway/Procfile b/tests/manual/docker-dns-srv/certs-gateway/Procfile deleted file mode 100644 index 7e3c3d9368a..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/Procfile +++ /dev/null @@ -1,7 +0,0 @@ -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -gateway: ./etcd gateway start --discovery-srv etcd.local --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790 diff --git a/tests/manual/docker-dns-srv/certs-gateway/ca-csr.json b/tests/manual/docker-dns-srv/certs-gateway/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns-srv/certs-gateway/ca.crt b/tests/manual/docker-dns-srv/certs-gateway/ca.crt deleted file mode 100644 index 19b26c45551..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUbQA3lX1hcR1W8D5wmmAwaLp4AWQwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDdZjG+dJixdUuZLIlPVE/qvqNqbgIQy3Hrgq9OlPevLu3FAKIgTHoSKugq -jOuBjzAtmbGTky3PPmkjWrOUWKEUYMuJJzXA1fO2NALXle47NVyVVfuwCmDnaAAL -Sw4QTZKREoe3EwswbeYguQinCqazRwbXMzzfypIfaHAyGrqFCq12IvarrjfDcamm -egtPkxNNdj1QHbkeYXcp76LOSBRjD2B3bzZvyVv/wPORaGTFXQ0feGz/93/Y/E0z -BL5TdZ84qmgKxW04hxkhhuuxsL5zDNpbXcGm//Zw9qzO/AvtEux6ag9t0JziiEtj -zLz5M7yXivfG4oxEeLKTieS/1ZkbAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBR7XtZP3fc6ElgHl6hdSHLmrFWj -MzANBgkqhkiG9w0BAQsFAAOCAQEAPy3ol3CPyFxuWD0IGKde26p1mT8cdoaeRbOa -2Z3GMuRrY2ojaKMfXuroOi+5ZbR9RSvVXhVX5tEMOSy81tb5OGPZP24Eroh4CUfK -bw7dOeBNCm9tcmHkV+5frJwOgjN2ja8W8jBlV1flLx+Jpyk2PSGun5tQPsDlqzor -E8QQ2FzCzxoGiEpB53t5gKeX+mH6gS1c5igJ5WfsEGXBC4xJm/u8/sg30uCGP6kT -tCoQ8gnvGen2OqYJEfCIEk28/AZJvJ90TJFS3ExXJpyfImK9j5VcTohW+KvcX5xF -W7M6KCGVBQtophobt3v/Zs4f11lWck9xVFCPGn9+LI1dbJUIIQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs-gateway/gencert.json b/tests/manual/docker-dns-srv/certs-gateway/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns-srv/certs-gateway/gencerts.sh b/tests/manual/docker-dns-srv/certs-gateway/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns-srv/certs-gateway/run.sh b/tests/manual/docker-dns-srv/certs-gateway/run.sh deleted file mode 100755 index d70cb1e7617..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/run.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-gateway/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --discovery-srv etcd.local \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --discovery-srv etcd.local \ - put abc def - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --discovery-srv etcd.local \ - get abc - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=127.0.0.1:23790 \ - put ghi jkl - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=127.0.0.1:23790 \ - get ghi diff --git a/tests/manual/docker-dns-srv/certs-gateway/server-ca-csr.json b/tests/manual/docker-dns-srv/certs-gateway/server-ca-csr.json deleted file mode 100644 index 72bd3808288..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/server-ca-csr.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "m1.etcd.local", - "m2.etcd.local", - "m3.etcd.local", - "etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns-srv/certs-gateway/server.crt b/tests/manual/docker-dns-srv/certs-gateway/server.crt deleted file mode 100644 index ef591cc7cc9..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/server.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIENTCCAx2gAwIBAgIUcviGEkA57QgUUFUIuB23kO/jHWIwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTI5MDBaFw0yNzExMjkxOTI5 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL6rB1Kh08Fo -FieWqzB4WvKxSFjLWlNfAXbSC1IEPEc/2JOSTF/VfsEX7Xf4eDlTUIZ/TpMS4nUE -Jn0rOIxDJWieQgF99a88CKCwVeqyiQ1iGlI/Ls78P7712QJ1QvcYPBRCvAFo2VLg -TSNhq4taRtAnP690TJVKMSxHg7qtMIpiBLc8ryNbtNUkQHl7/puiBZVVFwHQZm6d -ZRkfMqXWs4+VKLTx0pqJaM0oWVISQlLWQV83buVsuDVyLAZu2MjRYZwBj9gQwZDO -15VGvacjMU+l1+nLRuODrpGeGlxwfT57jqipbUtTsoZFsGxPdIWn14M6Pzw/mML4 -guYLKv3UqkkCAwEAAaOB1TCB0jAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFKYKYVPu -XPnZ2j0NORiNPUJpBnhkMB8GA1UdIwQYMBaAFHte1k/d9zoSWAeXqF1IcuasVaMz -MFMGA1UdEQRMMEqCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 -Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B -AQsFAAOCAQEAK40lD6Nx/V6CaShL95fQal7mFp/LXiyrlFTqCqrCruVnntwpukSx -I864bNMxVSTStEA3NM5V4mGuYjRvdjS65LBhaS1MQDPb4ofPj0vnxDOx6fryRIsB -wYKDuT4LSQ7pV/hBfL/bPb+itvb24G4/ECbduOprrywxmZskeEm/m0WqUb1A08Hv -6vDleyt382Wnxahq8txhMU+gNLTGVne60hhfLR+ePK7MJ4oyk3yeUxsmsnBkYaOu -gYOak5nWzRa09dLq6/vHQLt6n0AB0VurMAjshzO2rsbdOkD233sdkvKiYpayAyEf -Iu7S5vNjP9jiUgmws6G95wgJOd2xv54D4Q== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs-gateway/server.key.insecure b/tests/manual/docker-dns-srv/certs-gateway/server.key.insecure deleted file mode 100644 index 623457b5dab..00000000000 --- a/tests/manual/docker-dns-srv/certs-gateway/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvqsHUqHTwWgWJ5arMHha8rFIWMtaU18BdtILUgQ8Rz/Yk5JM -X9V+wRftd/h4OVNQhn9OkxLidQQmfSs4jEMlaJ5CAX31rzwIoLBV6rKJDWIaUj8u -zvw/vvXZAnVC9xg8FEK8AWjZUuBNI2Gri1pG0Cc/r3RMlUoxLEeDuq0wimIEtzyv -I1u01SRAeXv+m6IFlVUXAdBmbp1lGR8ypdazj5UotPHSmolozShZUhJCUtZBXzdu -5Wy4NXIsBm7YyNFhnAGP2BDBkM7XlUa9pyMxT6XX6ctG44OukZ4aXHB9PnuOqKlt -S1OyhkWwbE90hafXgzo/PD+YwviC5gsq/dSqSQIDAQABAoIBAEAOsb0fRUdbMuZG -BmmYZeXXjdjXKReNea5zzv3VEnNVjeu2YRZpYdZ5tXxy6+FGjm1BZCKhW5e4tz2i -QbNN88l8MezSZrJi1vs1gwgAx27JoNI1DALaWIhNjIT45HCjobuk2AkZMrpXRVM3 -wyxkPho8tXa6+efGL1MTC7yx5vb2dbhnEsjrPdUO0GLVP56bgrz7vRk+hE772uq2 -QDenZg+PcH+hOhptbY1h9CYotGWYXCpi0+yoHhsh5PTcEpyPmLWSkACsHovm3MIn -a5oU0uh28nVBfYE0Sk6I9XBERHVO/OrCvz4Y3ZbVyGpCdLcaMB5wI1P4a5ULV52+ -VPrALQkCgYEA+w85KYuL+eUjHeMqa8V8A9xgcl1+dvB8SXgfRRm5QTqxgetzurD9 -G7vgMex42nqgoW1XUx6i9roRk3Qn3D2NKvBJcpMohYcY3HcGkCsBwtNUCyOWKasS -Oj2q9LzPjVqTFII0zzarQ85XuuZyTRieFAMoYmsS8O/GcapKqYhPIDMCgYEAwmuR -ctnCNgoEj1NaLBSAcq7njONvYUFvbXO8BCyd1WeLZyz/krgXxuhQh9oXIccWAKX2 -uxIDaoWV8F5c8bNOkeebHzVHfaLpwl4IlLa/i5WTIc+IZmpBR0aiS021k/M3KkDg -KnQXAer6jEymT3lUL0AqZd+GX6DjFw61zPOFH5MCgYAnCiv6YN/IYTA/woZjMddi -Bk/dGNrEhgrdpdc++IwNL6JQsJtTaZhCSsnHGZ2FY9I8p/MPUtFGipKXGlXkcpHU -Hn9dWLLRaLud9MhJfNaORCxqewMrwZVZByPhYMbplS8P3lt16WtiZODRiGo3wN87 -/221OC8+1hpGrJNln3OmbwKBgDV8voEoY4PWcba0qcQix8vFTrK2B3hsNimYg4tq -cum5GOMDwDQvLWttkmotl9uVF/qJrj19ES+HHN8KNuvP9rexTj3hvI9V+JWepSG0 -vTG7rsTIgbAbX2Yqio/JC0Fu0ihvvLwxP/spGFDs7XxD1uNA9ekc+6znaFJ5m46N -GHy9AoGBAJmGEv5+rM3cucRyYYhE7vumXeCLXyAxxaf0f7+1mqRVO6uNGNGbNY6U -Heq6De4yc1VeAXUpkGQi/afPJNMU+fy8paCjFyzID1yLvdtFOG38KDbgMmj4t+cH -xTp2RT3MkcCWPq2+kXZeQjPdesPkzdB+nA8ckaSursV908n6AHcM ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns-srv/certs-wildcard/Procfile b/tests/manual/docker-dns-srv/certs-wildcard/Procfile deleted file mode 100644 index 3d5dc6eaee3..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/Procfile +++ /dev/null @@ -1,5 +0,0 @@ -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr diff --git a/tests/manual/docker-dns-srv/certs-wildcard/ca-csr.json b/tests/manual/docker-dns-srv/certs-wildcard/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns-srv/certs-wildcard/ca.crt b/tests/manual/docker-dns-srv/certs-wildcard/ca.crt deleted file mode 100644 index c89d6531c94..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUWzsBehxAkgLLYBUZEUpSjHkIaMowDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCxjHVNtcCSCz1w9AiN7zAql0ZsPN6MNQWJ2j3iPCvmy9oi0wqSfYXTs+xw -Y4Q+j0dfA54+PcyIOSBQCZBeLLIwCaXN+gLkMxYEWCCVgWYUa6UY+NzPKRCfkbwG -oE2Ilv3R1FWIpMqDVE2rLmTb3YxSiw460Ruv4l16kodEzfs4BRcqrEiobBwaIMLd -0rDJju7Q2TcioNji+HFoXV2aLN58LDgKO9AqszXxW88IKwUspfGBcsA4Zti/OHr+ -W+i/VxsxnQSJiAoKYbv9SkS8fUWw2hQ9SBBCKqE3jLzI71HzKgjS5TiQVZJaD6oK -cw8FjexOELZd4r1+/p+nQdKqwnb5AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRLfPxmhlZix1eTdBMAzMVlAnOV -gTANBgkqhkiG9w0BAQsFAAOCAQEAeT2NfOt3WsBLUVcnyGMeVRQ0gXazxJXD/Z+3 -2RF3KClqBLuGmPUZVl0FU841J6hLlwNjS33mye7k2OHrjJcouElbV3Olxsgh/EV0 -J7b7Wf4zWYHFNZz/VxwGHunsEZ+SCXUzU8OiMrEcHkOVzhtbC2veVPJzrESqd88z -m1MseGW636VIcrg4fYRS9EebRPFvlwfymMd+bqLky9KsUbjNupYd/TlhpAudrIzA -wO9ZUDb/0P44iOo+xURCoodxDTM0vvfZ8eJ6VZ/17HIf/a71kvk1oMqEhf060nmF -IxnbK6iUqqhV8DLE1869vpFvgbDdOxP7BeabN5FXEnZFDTLDqg== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs-wildcard/gencert.json b/tests/manual/docker-dns-srv/certs-wildcard/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns-srv/certs-wildcard/gencerts.sh b/tests/manual/docker-dns-srv/certs-wildcard/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns-srv/certs-wildcard/run.sh b/tests/manual/docker-dns-srv/certs-wildcard/run.sh deleted file mode 100755 index cc4000ef009..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/run.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-wildcard/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --discovery-srv etcd.local \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --discovery-srv etcd.local \ - put abc def - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --discovery-srv etcd.local \ - get abc diff --git a/tests/manual/docker-dns-srv/certs-wildcard/server-ca-csr.json b/tests/manual/docker-dns-srv/certs-wildcard/server-ca-csr.json deleted file mode 100644 index fd9adae03eb..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/server-ca-csr.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "*.etcd.local", - "etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns-srv/certs-wildcard/server.crt b/tests/manual/docker-dns-srv/certs-wildcard/server.crt deleted file mode 100644 index 385f0321ca8..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEFjCCAv6gAwIBAgIUCIUuNuEPRjp/EeDBNHipRI/qoAcwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTUxODAyMDBaFw0yNzExMTMxODAy -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMzoOebyKdXF -5QiVs0mB3cVqMRgRoRGWt9emIOsYCX89SBaRNOIAByop98Vb1GmUDNDv1qR4Oq+m -4JlWhgZniABWpekFw8mpN8wMIT86DoNnTe64ouLkDQRZDYOBO9I2+r4EuschRxNs -+Hh5W9JzX/eOomnOhaZfTp6EaxczRHnVmgkWuFUnacfUf7W2FE/HAYfjYpvXw5/+ -eT9AW+Jg/b9SkyU9XKEpWZT7NMqF9OXDXYdxHtRNTGxasLEqPZnG58mqR2QFU2me -/motY24faZpHo8i9ASb03Vy6xee2/FlS6cj2POCGQx3oLZsiQdgIOva7JrQtRsCn -e5P0Wk4qk+cCAwEAAaOBtjCBszAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCI+fP2T -xgvJG68Xdgamg4lzGRX1MB8GA1UdIwQYMBaAFEt8/GaGVmLHV5N0EwDMxWUCc5WB -MDQGA1UdEQQtMCuCDCouZXRjZC5sb2NhbIIKZXRjZC5sb2NhbIIJbG9jYWxob3N0 -hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQASub3+YZAXJ8x8b55Hl7FkkIt+rML1 -LdgPHsolNntNXeSqVJ4oi4KvuaM0ueFf/+AlTusTAbXWbi/qiG5Tw24xyzY6NGgV -/vCs56YqNlFyr3bNp1QJlnV3JQ4d3KqosulJ5jk+InhjAKJKomMH01pYhhStRAKg -1fNwSyD34oyZpSQL0Z7X7wdaMGdOmzxwE99EG6jmYl/P7MiP6rC0WP1elIF4sCGM -jY6oewvIMj0zWloBf/NlzrcY7VKpPqvBnV65Tllyo5n4y1sc8y2uzgJO/QnVKqhp -Sdd/74mU8dSh3ALSOqkbmIBhqig21jP7GBgNCNdmsaR2LvPI97n1PYE7 ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs-wildcard/server.key.insecure b/tests/manual/docker-dns-srv/certs-wildcard/server.key.insecure deleted file mode 100644 index 2b6595fa880..00000000000 --- a/tests/manual/docker-dns-srv/certs-wildcard/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAzOg55vIp1cXlCJWzSYHdxWoxGBGhEZa316Yg6xgJfz1IFpE0 -4gAHKin3xVvUaZQM0O/WpHg6r6bgmVaGBmeIAFal6QXDyak3zAwhPzoOg2dN7rii -4uQNBFkNg4E70jb6vgS6xyFHE2z4eHlb0nNf946iac6Fpl9OnoRrFzNEedWaCRa4 -VSdpx9R/tbYUT8cBh+Nim9fDn/55P0Bb4mD9v1KTJT1coSlZlPs0yoX05cNdh3Ee -1E1MbFqwsSo9mcbnyapHZAVTaZ7+ai1jbh9pmkejyL0BJvTdXLrF57b8WVLpyPY8 -4IZDHegtmyJB2Ag69rsmtC1GwKd7k/RaTiqT5wIDAQABAoIBAF0nTfuCKCa5WtA2 -TlWippGzHzKUASef32A4dEqsmNSxpW4tAV+lJ5yxi6S7hKui1Ni/0FLhHbzxHrZX -MYMD2j5dJfvz1Ph+55DqCstVt3dhpXpbkiGYD5rkaVJZlDqTKBbuy4LvzAI2zhbn -BSl9rik7PPbhHr1uIq3KAW2Arya7dlpPZiEX04Dg9xqZvxZkxt9IM25E+uzTWKSR -v5BRmijWiGJ6atujgmP7KcYtgBC5EDR9yZf2uK+hnsKEcH94TUkTnJriTcOCKVbb -isAuzsxStLpmyibfiLXD55aYjzr7KRVzQpoVXGJ4vJfs7lTxqxXBjUIsBJMPBcck -ATabIcECgYEA8C8JeKPmcA4KaGFSusF5OsXt4SM9jz5Kr7larA+ozuuR/z0m4pnx -AdjwQiGlhXaMtyziZ7Uwx+tmfnJDijpE/hUnkcAIKheDLXB/r1VpJdj/mqXtK49Y -mnOxV66TcWAmXav31TgmLVSj0SYLGEnvV4MPbgJroMg3VO7LnNbNL7cCgYEA2maB -Edbn4pJqUjVCZG68m0wQHmFZFOaoYZLeR3FgH+PQYIzUj96TP9XFpOwBmYAl2jiM -kQZ3Q6VQY37rwu0M+2BVFkQFnFbelH5jXbHDLdoeFDGCRnJkH2VG1kE/rPfzVsiz -NFDJD+17kPw3tTdHwDYGHwxyNuEoBQw3q6hfXVECgYBEUfzttiGMalIHkveHbBVh -5H9f9ThDkMKJ7b2fB+1KvrOO2QRAnO1zSxQ8m3mL10b7q+bS/TVdCNbkzPftT9nk -NHxG90rbPkjwGfoYE8GPJITApsYqB+J6PMKLYHtMWr9PEeWzXv9tEZBvo9SwGgfc -6sjuz/1xhMJIhIyilm9TTQKBgHRsYDGaVlK5qmPYcGQJhBFlItKPImW579jT6ho7 -nfph/xr49/cZt3U4B/w6sz+YyJTjwEsvHzS4U3o2lod6xojaeYE9EaCdzllqZp3z -vRAcThyFp+TV5fm2i2R7s+4I33dL1fv1dLlA57YKPcgkh+M26Vxzzg7jR+oo8SRY -xT2BAoGBAKNR60zpSQZ2SuqEoWcj1Nf+KloZv2tZcnsHhqhiugbYhZOQVyTCNipa -Ib3/BGERCyI7oWMk0yTTQK4wg3+0EsxQX10hYJ5+rd4btWac7G/tjo2+BSaTnWSW -0vWM/nu33Pq0JHYIo0q0Jee0evTgizqH9UJ3wI5LG29LKwurXxPW ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns-srv/certs/Procfile b/tests/manual/docker-dns-srv/certs/Procfile deleted file mode 100644 index 9be48cb8718..00000000000 --- a/tests/manual/docker-dns-srv/certs/Procfile +++ /dev/null @@ -1,11 +0,0 @@ -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd4: ./etcd --name m4 --data-dir /tmp/m4.data --listen-client-urls https://127.0.0.1:13791 --advertise-client-urls https://m4.etcd.local:13791 --listen-peer-urls https://127.0.0.1:13880 --initial-advertise-peer-urls=https://m1.etcd.local:13880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd5: ./etcd --name m5 --data-dir /tmp/m5.data --listen-client-urls https://127.0.0.1:23791 --advertise-client-urls https://m5.etcd.local:23791 --listen-peer-urls https://127.0.0.1:23880 --initial-advertise-peer-urls=https://m5.etcd.local:23880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd6: ./etcd --name m6 --data-dir /tmp/m6.data --listen-client-urls https://127.0.0.1:33791 --advertise-client-urls https://m6.etcd.local:33791 --listen-peer-urls https://127.0.0.1:33880 --initial-advertise-peer-urls=https://m6.etcd.local:33880 --initial-cluster-token tkn --discovery-srv=etcd.local --discovery-srv-name=c1 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr diff --git a/tests/manual/docker-dns-srv/certs/ca-csr.json b/tests/manual/docker-dns-srv/certs/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns-srv/certs/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns-srv/certs/ca.crt b/tests/manual/docker-dns-srv/certs/ca.crt deleted file mode 100644 index ebe259d0bfc..00000000000 --- a/tests/manual/docker-dns-srv/certs/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrjCCApagAwIBAgIUb8ICEcp5me1o5zF4mh4GKnf57hUwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODExMDkxNzQ2MDBaFw0yODExMDYxNzQ2 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDEBKTfgg0MFy62Sslp8nJPLknl+qTO8ohan80CealThTMuRoGMYpXha0sx -d+mv13sm+vRwEMaRU0FTmxtE9nrM/DNfRoeDd+ZW+Q/hNRuQ0mf0xvmY/h25M+It -uaDbAD3m+UhmOCC1nzdwyBOxm4DQONMwMGtfCOZ8OkIVsKkubx3/pgRB/LdJZRdL -1KWGucjMFxEaTGdwAIxdRyPS9pIX9g+B3zC7T3sYk7YbCGyvi1KLVR45Lm1MPcFY -Gy3hU+CVHiljT6+87N+c98lv8wjnTFJXDkouLm6CxyxGgfGop8fHzpMpGcNmcN5t -Yb3exRWn9u9BfNVH1YEOfiRVB+ylAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQe5E9CeqoDpGgJ1u++mp72Ajvt6DAN -BgkqhkiG9w0BAQsFAAOCAQEAUCj9oKV43RyjvcqKSs00mFKctHZih4Kf0HWGC47M -ny8c/FzCcC66q9TZx1vuf2PHkLsY8Z8f7Rjig2G6hbPKwU05JSFzKCwJhnRSxX4f -ELDqQXbidlQ6wOcj2zoLSVC6WIjVmLyXCu0Zrcp+YwHyGb5x7SQcA1wNmJKOba+h -ooXl5Ea4R1bxK+43lB2bsFovJVhS+6iyBih6oMlLycaSu6c5X38i0mcxQu6Ul/Ua -I8nW1cAXnQC53VzQGkhfxnvWsc98XU/NzF778EaLwLECE7R4zkHWKSUktge1x+co -bRXtQ/C7BoEVaTmQnl211O3rA8gnZ0cmmNBO1S0hIiZIBQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs/gencert.json b/tests/manual/docker-dns-srv/certs/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns-srv/certs/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns-srv/certs/gencerts.sh b/tests/manual/docker-dns-srv/certs/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns-srv/certs/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns-srv/certs/run.sh b/tests/manual/docker-dns-srv/certs/run.sh deleted file mode 100755 index d70f355baeb..00000000000 --- a/tests/manual/docker-dns-srv/certs/run.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m{1,2,3,4,5,6}.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - put abc def - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - get abc - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - --discovery-srv-name c1 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - --discovery-srv-name c1 \ - put ghi jkl - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --discovery-srv etcd.local \ - --discovery-srv-name c1 \ - get ghi diff --git a/tests/manual/docker-dns-srv/certs/server-ca-csr.json b/tests/manual/docker-dns-srv/certs/server-ca-csr.json deleted file mode 100644 index 661de379991..00000000000 --- a/tests/manual/docker-dns-srv/certs/server-ca-csr.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "m1.etcd.local", - "m2.etcd.local", - "m3.etcd.local", - "m4.etcd.local", - "m5.etcd.local", - "m6.etcd.local", - "etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns-srv/certs/server.crt b/tests/manual/docker-dns-srv/certs/server.crt deleted file mode 100644 index 83c2fd9d42c..00000000000 --- a/tests/manual/docker-dns-srv/certs/server.crt +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEZTCCA02gAwIBAgIULBrfr3JYYypJkYr+LK0oWAqHsCowDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODExMDkxNzQ3MDBaFw0yODExMDYxNzQ3 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALBFVoY2gbx/ -z9ciHrH6LzxIwIDmeVbOIMyooTun3iCtM8OjSkw15fl6WvM0KLKb6D2B+N7MLGa8 -T+KqKHIrzCudK21WGV8g5Pwc56fjRT796zQsyMjcjMlf9AEtP4ZdY4aap4r0d28m -ZiUx9hccUtC6b0AFVgBuHjGNw4Ym6zmz38ZWEfnJ/R71uccmQpB5CoOZ7dN1bCJa -gZqaWwRCYNG5XAQD2GMcn6r7oFijhlVO99auT04Et2lpoOzg2P4a8pPGgzsUCFOP -WnuqNh78p61AHnEpUM0eLzzENFAmSSzwMr9jFkNF4gMgLrn0t3M1JUrbzXWIk9EX -5G6pafkxXlkCAwEAAaOCAQQwggEAMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAU -BggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUtaRC -6qucn6KvF+/u/esMahneemswHwYDVR0jBBgwFoAUHuRPQnqqA6RoCdbvvpqe9gI7 -7egwgYAGA1UdEQR5MHeCDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0z -LmV0Y2QubG9jYWyCDW00LmV0Y2QubG9jYWyCDW01LmV0Y2QubG9jYWyCDW02LmV0 -Y2QubG9jYWyCCmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0B -AQsFAAOCAQEAFfdWclzi+J7vI0p/7F0UaJ54JkRx4zR9+qcmDHqRbLdiGOYsTiDq -AudryZjbCsl8koj9k1f7MvDGSIQpCp3jyAJpv/NE9NxuSagDO3vIvAuKfox2HcHV -RPyoo6igp9FY6F8af0h7CyCXgX0+4PFaLnyJgpQ3tV4jCKduyjCYkAiC1QwoNB8H -wZEw0zlyFml/5GlQoqtjJyZ7JFIJhrFIUbRIFO7gZZSIipsON7teOjA2HvYme33Y -uvx/FWr7GBXqpHUamQqWS6ixWBM/rj0lEViYtuWkitek41YHJuktxKs1+peXPjpb -rYCK5H6Bn/zLKOo2zikqfq41+g/mui3/jQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns-srv/certs/server.key.insecure b/tests/manual/docker-dns-srv/certs/server.key.insecure deleted file mode 100644 index 5030a91dc42..00000000000 --- a/tests/manual/docker-dns-srv/certs/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAsEVWhjaBvH/P1yIesfovPEjAgOZ5Vs4gzKihO6feIK0zw6NK -TDXl+Xpa8zQospvoPYH43swsZrxP4qoocivMK50rbVYZXyDk/Bznp+NFPv3rNCzI -yNyMyV/0AS0/hl1jhpqnivR3byZmJTH2FxxS0LpvQAVWAG4eMY3DhibrObPfxlYR -+cn9HvW5xyZCkHkKg5nt03VsIlqBmppbBEJg0blcBAPYYxyfqvugWKOGVU731q5P -TgS3aWmg7ODY/hryk8aDOxQIU49ae6o2HvynrUAecSlQzR4vPMQ0UCZJLPAyv2MW -Q0XiAyAuufS3czUlStvNdYiT0Rfkbqlp+TFeWQIDAQABAoIBAHj6LacRc7secVPy -a+S0k4SpXc1Z4L9N2z77ISVjUdVVaiiEQnLJrxuo+RDfpGrpC9xi/p5SvMqJxb4I -EJhDLO5mAS8aH3GljuLlJ6yXE6hm9u0pK2iHzexLeZjxKB8cqzjvnbuFiw7y6Lnw -bzhvTPtKaR4kS2EiMoDKDf5daaWAhaJSlLpVnSW7COrVd12vF8YhKkGeyoXEVrAH -GjdHmpZKI3qzvNJNe8ZQq8VXxMmQs8bryKFO1k7rN6ypMFILYuze7+x+DQ1/Kbee -UoCN6HIja5GGF77ZggDdyDMrcWv0t1ib/6mFV03m+Iv6n2GBOOkrDSi+rRACdRtF -5YRXSQECgYEA1sxrz6w0Etg6VrYsOZGSHKh8b/9agGSCtV2T3Jo4LSndFvzy78s0 -lVSuR6irflnaXdngSl8OyZ3s90egWzVpKTq9VCV/Mwk5cscUYaMSBNw75H1Yxfrq -wyiygL38m/gKZ6L1kjGEdFH80ODr26tQM/npruYRyd33R7/YO6QRJrkCgYEA0hUI -uEiZDdudtpifAODzVvuxptCH1V1sXJLpdy2XSk2PWBKJ+ePHVhkx9sOnIX1m7DuE -RsdFVunUz1jVEy4UYxMwpQK7KxZZB05daiwhUI5qMSuwwOKqQbyjCOBIAlgcRWow -fVbIWbOsza/a9bjgZ3QQFWAbCVxrAuG9Re2mJKECgYBUnZjG6YZl+goZSJBpaUAO -zAyhLg2f0HhxK9joqVQB7qDqwmCNOBaR0RcKoZZVIt5T5FVn1sSDhhPoYa344DR6 -Cmq08ESIfVTFM0mDIPMjOQLbAsnqy+qZULno327YnkCzDM4CdkFAdV/LhR9EnNru -br+wp29Qf4E/IYL0E7Cx+QKBgHpbGdsLHWl+0Zp5xZHjcpbkvRFlPte8M9KvFh79 -hLIX/jbThVzvlzfEMN+CEKNmwD0yZNY8VVxLkFC7ck5bdjBGCvzwXEa6G1wv/iRK -U5TxfVPqGGYfHf5veZ0/03DaFI0xTdCSbNoh1bFujN60sK5QYNWyRczr8L+a7nv9 -79hBAoGBAMytiRzt0hj06ww3oJSQjxwotJ19pnV52p84BQfsGEAgfVRqqADMyn5U -dkpT9q+IADivb1ELNWUl4af2levage/rBnaDzer0ywnl50J0TRu+DJppIGJIi3r4 -IufVehZ6F+pntM+UbMcBxNXr3cLzAaEHoIhyKq0UG4P4Ef3v6DeI ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns-srv/etcd.zone b/tests/manual/docker-dns-srv/etcd.zone deleted file mode 100644 index c80a07f0be8..00000000000 --- a/tests/manual/docker-dns-srv/etcd.zone +++ /dev/null @@ -1,32 +0,0 @@ -$TTL 86400 -@ IN SOA etcdns.local. root.etcdns.local. ( - 100500 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 86400 ) ; Negative Cache TTL - IN NS ns.etcdns.local. - IN A 127.0.0.1 - -ns IN A 127.0.0.1 -m1 IN A 127.0.0.1 -m2 IN A 127.0.0.1 -m3 IN A 127.0.0.1 -m4 IN A 127.0.0.1 -m5 IN A 127.0.0.1 -m6 IN A 127.0.0.1 - -_etcd-client-ssl._tcp IN SRV 0 0 2379 m1.etcd.local. -_etcd-server-ssl._tcp IN SRV 0 0 2380 m1.etcd.local. -_etcd-client-ssl._tcp IN SRV 0 0 22379 m2.etcd.local. -_etcd-server-ssl._tcp IN SRV 0 0 22380 m2.etcd.local. -_etcd-client-ssl._tcp IN SRV 0 0 32379 m3.etcd.local. -_etcd-server-ssl._tcp IN SRV 0 0 32380 m3.etcd.local. - -; discovery-srv-name=c1 -_etcd-client-ssl-c1._tcp IN SRV 0 0 13791 m4.etcd.local. -_etcd-server-ssl-c1._tcp IN SRV 0 0 13880 m4.etcd.local. -_etcd-client-ssl-c1._tcp IN SRV 0 0 23791 m5.etcd.local. -_etcd-server-ssl-c1._tcp IN SRV 0 0 23880 m5.etcd.local. -_etcd-client-ssl-c1._tcp IN SRV 0 0 33791 m6.etcd.local. -_etcd-server-ssl-c1._tcp IN SRV 0 0 33880 m6.etcd.local. diff --git a/tests/manual/docker-dns-srv/named.conf b/tests/manual/docker-dns-srv/named.conf deleted file mode 100644 index 76ce0caa165..00000000000 --- a/tests/manual/docker-dns-srv/named.conf +++ /dev/null @@ -1,23 +0,0 @@ -options { - directory "/var/bind"; - listen-on { 127.0.0.1; }; - listen-on-v6 { none; }; - allow-transfer { - none; - }; - // If you have problems and are behind a firewall: - query-source address * port 53; - pid-file "/var/run/named/named.pid"; - allow-recursion { none; }; - recursion no; -}; - -zone "etcd.local" IN { - type main; - file "/etc/bind/etcd.zone"; -}; - -zone "0.0.127.in-addr.arpa" { - type main; - file "/etc/bind/rdns.zone"; -}; diff --git a/tests/manual/docker-dns-srv/rdns.zone b/tests/manual/docker-dns-srv/rdns.zone deleted file mode 100644 index d129188e400..00000000000 --- a/tests/manual/docker-dns-srv/rdns.zone +++ /dev/null @@ -1,17 +0,0 @@ -$TTL 86400 -@ IN SOA etcdns.local. root.etcdns.local. ( - 100500 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 86400 ) ; Negative Cache TTL - IN NS ns.etcdns.local. - IN A 127.0.0.1 - -1 IN PTR m1.etcd.local. -1 IN PTR m2.etcd.local. -1 IN PTR m3.etcd.local. -1 IN PTR m4.etcd.local. -1 IN PTR m5.etcd.local. -1 IN PTR m6.etcd.local. - diff --git a/tests/manual/docker-dns-srv/resolv.conf b/tests/manual/docker-dns-srv/resolv.conf deleted file mode 100644 index bbc8559cd54..00000000000 --- a/tests/manual/docker-dns-srv/resolv.conf +++ /dev/null @@ -1 +0,0 @@ -nameserver 127.0.0.1 diff --git a/tests/manual/docker-dns/Dockerfile b/tests/manual/docker-dns/Dockerfile deleted file mode 100644 index 76dfe60b2c0..00000000000 --- a/tests/manual/docker-dns/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -FROM ubuntu:18.04 - -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN apt-get -y update \ - && apt-get -y install \ - build-essential \ - gcc \ - apt-utils \ - pkg-config \ - software-properties-common \ - apt-transport-https \ - libssl-dev \ - sudo \ - bash \ - curl \ - tar \ - git \ - netcat \ - bind9 \ - dnsutils \ - lsof \ - && apt-get -y update \ - && apt-get -y upgrade \ - && apt-get -y autoremove \ - && apt-get -y autoclean - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} -ENV GO_VERSION REPLACE_ME_GO_VERSION -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang -RUN rm -rf ${GOROOT} \ - && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ - && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ - && go version \ - && go get -v -u github.com/mattn/goreman - -RUN mkdir -p /var/bind /etc/bind -RUN chown root:bind /var/bind /etc/bind - -ADD named.conf etcd.zone rdns.zone /etc/bind/ -RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone -ADD resolv.conf /etc/resolv.conf diff --git a/tests/manual/docker-dns/certs-common-name-auth/Procfile b/tests/manual/docker-dns/certs-common-name-auth/Procfile deleted file mode 100644 index 2fb95f5fefa..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-auth/server.crt --peer-key-file=/certs-common-name-auth/server.key.insecure --peer-trusted-ca-file=/certs-common-name-auth/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn test-common-name --cert-file=/certs-common-name-auth/server.crt --key-file=/certs-common-name-auth/server.key.insecure --trusted-ca-file=/certs-common-name-auth/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/certs-common-name-auth/ca-csr.json b/tests/manual/docker-dns/certs-common-name-auth/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs-common-name-auth/ca.crt b/tests/manual/docker-dns/certs-common-name-auth/ca.crt deleted file mode 100644 index 00faeca22a5..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUdASu5zT1US/6LPyKmczbC3NgdY4wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDBbE44RP/Tk9l7KShzxQAypatoqDJQL32hyw8plZIfni5XFIlG2GwyjNvX -wiP6u0YcsApZKc58ytqcHQqMyk68OTTxcM+HVWvKHMKOBPBYgXeeVnD+7Ixuinq/ -X6RK3n2jEipFgE9FiAXDNICF3ZQz+HVNBSbzwCjBtIcYkinWHX+kgnQkFT1NnmuZ -uloz6Uh7/Ngn/XPNSsoMyLrh4TwDsx/fQEpVcrXMbxWux1xEHmfDzRKvE7VhSo39 -/mcpKBOwTg4jwh9tDjxWX4Yat+/cX0cGxQ7JSrdy14ESV5AGBmesGHd2SoWhZK9l -tWm1Eq0JYWD+Cd5yNrODTUxWRNs9AgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSZMjlLnc7Vv2mxRMebo5ezJ7gt -pzANBgkqhkiG9w0BAQsFAAOCAQEAA2d2nV4CXjp7xpTQrh8sHzSBDYUNr9DY5hej -52X6q8WV0N3QC7Utvv2Soz6Ol72/xoGajIJvqorsIBB5Ms3dgCzPMy3R01Eb3MzI -7KG/4AGVEiAKUBkNSD8PWD7bREnnv1g9tUftE7jWsgMaPIpi6KhzhyJsClT4UsKQ -6Lp+Be80S293LrlmUSdZ/v7FAvMzDGOLd2iTlTr1fXK6YJJEXpk3+HIi8nbUPvYQ -6O8iOtf5QoCm1yMLJQMFvNr51Z1EeF935HRj8U2MJP5jXPW4/UY2TAUBcWEhlNsK -6od+f1B8xGe/6KHvF0C8bg23kj8QphM/E7HCZiVgdm6FNf54AQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-auth/gencert.json b/tests/manual/docker-dns/certs-common-name-auth/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs-common-name-auth/gencerts.sh b/tests/manual/docker-dns/certs-common-name-auth/gencerts.sh deleted file mode 100755 index 09819cf2239..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: m1/m2/m3.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs-common-name-auth/run.sh b/tests/manual/docker-dns/certs-common-name-auth/run.sh deleted file mode 100755 index f46d642ae92..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/run.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-common-name-auth/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc - -sleep 1s && printf "\n" -echo "Step 1. creating root role" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - role add root - -sleep 1s && printf "\n" -echo "Step 2. granting readwrite 'foo' permission to role 'root'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - role grant-permission root readwrite foo - -sleep 1s && printf "\n" -echo "Step 3. getting role 'root'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - role get root - -sleep 1s && printf "\n" -echo "Step 4. creating user 'root'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --interactive=false \ - user add root:123 - -sleep 1s && printf "\n" -echo "Step 5. granting role 'root' to user 'root'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - user grant-role root root - -sleep 1s && printf "\n" -echo "Step 6. getting user 'root'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - user get root - -sleep 1s && printf "\n" -echo "Step 7. enabling auth" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - auth enable - -sleep 1s && printf "\n" -echo "Step 8. writing 'foo' with 'root:123'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - put foo bar - -sleep 1s && printf "\n" -echo "Step 9. writing 'aaa' with 'root:123'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - put aaa bbb - -sleep 1s && printf "\n" -echo "Step 10. writing 'foo' without 'root:123'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put foo bar - -sleep 1s && printf "\n" -echo "Step 11. reading 'foo' with 'root:123'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - get foo - -sleep 1s && printf "\n" -echo "Step 12. reading 'aaa' with 'root:123'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - get aaa - -sleep 1s && printf "\n" -echo "Step 13. creating a new user 'test-common-name:test-pass'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - --interactive=false \ - user add test-common-name:test-pass - -sleep 1s && printf "\n" -echo "Step 14. creating a role 'test-role'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - role add test-role - -sleep 1s && printf "\n" -echo "Step 15. granting readwrite 'aaa' --prefix permission to role 'test-role'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - role grant-permission test-role readwrite aaa --prefix - -sleep 1s && printf "\n" -echo "Step 16. getting role 'test-role'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - role get test-role - -sleep 1s && printf "\n" -echo "Step 17. granting role 'test-role' to user 'test-common-name'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=root:123 \ - user grant-role test-common-name test-role - -sleep 1s && printf "\n" -echo "Step 18. writing 'aaa' with 'test-common-name:test-pass'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=test-common-name:test-pass \ - put aaa bbb - -sleep 1s && printf "\n" -echo "Step 19. writing 'bbb' with 'test-common-name:test-pass'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=test-common-name:test-pass \ - put bbb bbb - -sleep 1s && printf "\n" -echo "Step 20. reading 'aaa' with 'test-common-name:test-pass'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=test-common-name:test-pass \ - get aaa - -sleep 1s && printf "\n" -echo "Step 21. reading 'bbb' with 'test-common-name:test-pass'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - --user=test-common-name:test-pass \ - get bbb - -sleep 1s && printf "\n" -echo "Step 22. writing 'aaa' with CommonName 'test-common-name'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put aaa ccc - -sleep 1s && printf "\n" -echo "Step 23. reading 'aaa' with CommonName 'test-common-name'" -./etcdctl \ - --cacert=/certs-common-name-auth/ca.crt \ - --cert=/certs-common-name-auth/server.crt \ - --key=/certs-common-name-auth/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get aaa diff --git a/tests/manual/docker-dns/certs-common-name-auth/server-ca-csr.json b/tests/manual/docker-dns/certs-common-name-auth/server-ca-csr.json deleted file mode 100644 index 6a57789b1ab..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/server-ca-csr.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "test-common-name", - "hosts": [ - "m1.etcd.local", - "m2.etcd.local", - "m3.etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns/certs-common-name-auth/server.crt b/tests/manual/docker-dns/certs-common-name-auth/server.crt deleted file mode 100644 index b9719b2f013..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/server.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIERDCCAyygAwIBAgIUO500NxhwBHJsodbGKbo5NsW9/p8wDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTQwNjIzMDBaFw0yNzExMTIwNjIz -MDBaMH0xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTEZMBcGA1UEAxMQdGVzdC1jb21tb24tbmFtZTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAMRvVMj3+5jAhRng4izVm4zrvMBnHNMh2MOFVTp7 -wdhEF2en7pFsKzWgczewil6v4d6QzJpgB9yQzPT2q0SOvetpbqP950y6MdPHAF9D -qZd0+wC+RLdSmK5oQKzgZER/vH3eSbTa1UdwaLBHlT6PiTzGm+gEYL43gr3kle+A -9c7aT9pkJWQFTCSdqwcQopyHEwgrfPHC8Bdn804soG4HtR9Gg/R4xtlu7ir6LTHn -vpPBScaMZDUQ5UNrEMh8TM8/sXG6oxqo86r5wpVQt6vscnTMrTTUqq+Mo/OJnDAf -plaqkWX5NfIJ9tmE2V06hq1/ptQkl714Wb+ske+aJ2Poc/UCAwEAAaOByTCBxjAO -BgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwG -A1UdEwEB/wQCMAAwHQYDVR0OBBYEFEG2hXyVTpxLXTse3fXe0U/g0F8kMB8GA1Ud -IwQYMBaAFJkyOUudztW/abFEx5ujl7MnuC2nMEcGA1UdEQRAMD6CDW0xLmV0Y2Qu -bG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0Y2QubG9jYWyCCWxvY2FsaG9zdIcE -fwAAATANBgkqhkiG9w0BAQsFAAOCAQEADtH0NZBrWfXTUvTa3WDsa/JPBhiPu/kH -+gRxOD5UNeDX9+QAx/gxGHrCh4j51OUx55KylUe0qAPHHZ4vhgD2lCRBqFLYx69m -xRIzVnt5NCruriskxId1aFTZ5pln5KK5tTVkAp04MBHZOgv8giXdRWn+7TtMyJxj -wVGf8R7/bwJGPPJFrLNtN4EWwXv/a2/SEoZd8fkTxzw12TeJ8w1PnkH4Zer+nzNb -dH5f+OIBGGZ2fIWANX5g9JEJvvsxBBL8uoCrFE/YdnD0fLyhoplSOVEIvncQLHd8 -3QoIVQ5GXnreMF9vuuEU5LlSsqd/Zv5mAQNrbEAfAL+QZQsnHY12qQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-auth/server.key.insecure b/tests/manual/docker-dns/certs-common-name-auth/server.key.insecure deleted file mode 100644 index 07417b2552c..00000000000 --- a/tests/manual/docker-dns/certs-common-name-auth/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAxG9UyPf7mMCFGeDiLNWbjOu8wGcc0yHYw4VVOnvB2EQXZ6fu -kWwrNaBzN7CKXq/h3pDMmmAH3JDM9ParRI6962luo/3nTLox08cAX0Opl3T7AL5E -t1KYrmhArOBkRH+8fd5JtNrVR3BosEeVPo+JPMab6ARgvjeCveSV74D1ztpP2mQl -ZAVMJJ2rBxCinIcTCCt88cLwF2fzTiygbge1H0aD9HjG2W7uKvotMee+k8FJxoxk -NRDlQ2sQyHxMzz+xcbqjGqjzqvnClVC3q+xydMytNNSqr4yj84mcMB+mVqqRZfk1 -8gn22YTZXTqGrX+m1CSXvXhZv6yR75onY+hz9QIDAQABAoIBABiq+nS6X4gRNSXI -zd5ffMc3m152FHKXH4d+KPPNMsyb0Gyd9CGi+dIkMhPeQaIeaDjw6iDAynvyWyqw -B1X2rvbvKIvDiNZj03oK1YshDh0M/bBcNHjpEG9mfCi5jR3lBKCx14O0r2/nN95b -Puy6TbuqHU4HrrZ0diCuof2Prk6pd0EhQC+C3bZCcoWXOaRTqrMBTT6DdSMQrVKD -eGTXYqCzs/AlGKkOiErKtKWouNpkPpPiba1qp7YWXUasrXqPgPi4d97TmOShGIfc -zXNJT+e2rDX4OEVAJtOt6U2l9QG+PIhpH4P/ZYsvindm4VZBs+Vysrj4xkLgGBBP -ygOfBIECgYEA0IfP9Z9mzvCXiGrkrx2tN/k31cX674P/KwxPgSWM/AdXenYYzsmj -rVcoFx2eCFnBFdPz4BAqEfH70gtsG7OoTmoJSwN6wurIdGcFQwItrghgt9Qp46Dq -AIT9RXSpcB9AjM6p2reCjWcNeBVMrrHU3eaQitCxZbzuxvMMhMs/zzECgYEA8Sak -UhXFtNjxBW6EMNmTpjhShIZmxtPNzTJ5DtmARr8F+SMELp3JGJj/9Bm4TsvqJmGs -j9g/MVvSTjJlOuYPGJ5DBl3egZ5ZlRJx3I2qA4lFFCb71OJzuoR8YdHRlHnhJOu9 -2Jyrki1wrAefby8Fe/+5vswxq2u+Qurjya716AUCgYB+E06ZGzmmLfH/6Vi/wzqC -F+w5FAzGGNECbtv2ogReL/YktRgElgaee45ig2aTd+h0UQQmWL+Gv/3XHU7MZM+C -MTvTHZRwGlD9h3e37q49hRUsr1pwJE6157HU91al0k9NknlBIigNY9vR2VbWW+/u -BUMomkpWz2ax5CqScuvuUQKBgQCE+zYqPe9kpy1iPWuQNKuDQhPfGO6cPjiDK44u -biqa2MRGetTXkBNRCS48QeKtMS3SNJKgUDOo2GXE0W2ZaTxx6vQzEpidCeGEn0NC -yKw0fwIk9spwvt/qvxyIJNhZ9Ev/vDBYvyyt03kKpLl66ocvtfmMCbZqPWQSKs2q -bl0UsQKBgQDDrsPnuVQiv6l0J9VrZc0f5DYZIJmQij1Rcg/fL1Dv2mEpADrH2hkY -HI27Q15dfgvccAGbGXbZt3xi7TCLDDm+Kl9V9bR2e2EhqA84tFryiBZ5XSDRAWPU -UIjejblTgtzrTqUd75XUkNoKvJIGrLApmQiBJRQbcbwtmt2pWbziyQ== ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/Procfile b/tests/manual/docker-dns/certs-common-name-multi/Procfile deleted file mode 100644 index ac1094c0897..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-1.crt --peer-key-file=/certs-common-name-multi/server-1.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-1.crt --key-file=/certs-common-name-multi/server-1.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-2.crt --peer-key-file=/certs-common-name-multi/server-2.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-2.crt --key-file=/certs-common-name-multi/server-2.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-common-name-multi/server-3.crt --peer-key-file=/certs-common-name-multi/server-3.key.insecure --peer-trusted-ca-file=/certs-common-name-multi/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-common-name-multi/server-3.crt --key-file=/certs-common-name-multi/server-3.key.insecure --trusted-ca-file=/certs-common-name-multi/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/certs-common-name-multi/ca-csr.json b/tests/manual/docker-dns/certs-common-name-multi/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs-common-name-multi/ca.crt b/tests/manual/docker-dns/certs-common-name-multi/ca.crt deleted file mode 100644 index 2e9b32003df..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/ca.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0jCCArqgAwIBAgIUd3UZnVmZFo8x9MWWhUrYQvZHLrQwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCqgFTgSFl+ugXkZuiN5PXp84Zv05crwI5x2ePMnc2/3u1s7cQBvXQGCJcq -OwWD7tjcy4K2PDC0DLRa4Mkd8JpwADmf6ojbMH/3a1pXY2B3BJQwmNPFnxRJbDZL -Iti6syWKwyfLVb1KFCU08G+ZrWmGIXPWDiE+rTn/ArD/6WbQI1LYBFJm25NLpttM -mA3HnWoErNGY4Z/AR54ROdQSPL7RSUZBa0Kn1riXeOJ40/05qosR2O/hBSAGkD+m -5Rj+A6oek44zZqVzCSEncLsRJAKqgZIqsBrErAho72irEgTwv4OM0MyOCsY/9erf -hNYRSoQeX+zUvEvgToalfWGt6kT3AgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRDePNja5CK4zUfO5x1vzGvdmUF -CzAfBgNVHSMEGDAWgBRDePNja5CK4zUfO5x1vzGvdmUFCzANBgkqhkiG9w0BAQsF -AAOCAQEAZu0a3B7Ef/z5Ct99xgzPy4z9RwglqPuxk446hBWR5TYT9fzm+voHCAwb -MJEaQK3hvAz47qAjyR9/b+nBw4LRTMxg0WqB+UEEVwBGJxtfcOHx4mJHc3lgVJnR -LiEWtIND7lu5Ql0eOjSehQzkJZhUb4SnXD7yk64zukQQv9zlZYZCHPDAQ9LzR2vI -ii4yhwdWl7iiZ0lOyR4xqPB3Cx/2kjtuRiSkbpHGwWBJLng2ZqgO4K+gL3naNgqN -TRtdOSK3j/E5WtAeFUUT68Gjsg7yXxqyjUFq+piunFfQHhPB+6sPPy56OtIogOk4 -dFCfFAygYNrFKz366KY+7CbpB+4WKA== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/gencert.json b/tests/manual/docker-dns/certs-common-name-multi/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs-common-name-multi/gencerts.sh b/tests/manual/docker-dns/certs-common-name-multi/gencerts.sh deleted file mode 100755 index b2318fd0865..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/gencerts.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: m1/m2/m3.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-1.json | cfssljson --bare ./server-1 -mv server-1.pem server-1.crt -mv server-1-key.pem server-1.key.insecure - -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-2.json | cfssljson --bare ./server-2 -mv server-2.pem server-2.crt -mv server-2-key.pem server-2.key.insecure - -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-3.json | cfssljson --bare ./server-3 -mv server-3.pem server-3.crt -mv server-3-key.pem server-3.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs-common-name-multi/run.sh b/tests/manual/docker-dns/certs-common-name-multi/run.sh deleted file mode 100755 index 92c4ca0d8df..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/run.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-common-name-multi/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-common-name-multi/ca.crt \ - --cert=/certs-common-name-multi/server-1.crt \ - --key=/certs-common-name-multi/server-1.key.insecure \ - --endpoints=https://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-common-name-multi/ca.crt \ - --cert=/certs-common-name-multi/server-2.crt \ - --key=/certs-common-name-multi/server-2.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs-common-name-multi/ca.crt \ - --cert=/certs-common-name-multi/server-3.crt \ - --key=/certs-common-name-multi/server-3.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-1.crt b/tests/manual/docker-dns/certs-common-name-multi/server-1.crt deleted file mode 100644 index f10b2727753..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-1.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIUaDLXBmJpHrElwENdnVk9hvAvlKcwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAOb5CdovL9QCdgsxnCBikTbJko6r5mrF+eA47gDLcVbWrRW5 -d8eZYV1Fyn5qe80O6LB6LKPrRftxyAGABKqIBCHR57E97UsICC4lGycBWaav6cJ+ -7Spkpf8cSSDjjgb4KC6VVPf9MCsHxBYSTfme8JEFE+6KjlG8Mqt2yv/5aIyRYITN -WzXvV7wxS9aOgDdXLbojW9FJQCuzttOPfvINTyhtvUvCM8S61La5ymCdAdPpx1U9 -m5KC23k6ZbkAC8/jcOV+68adTUuMWLefPf9Ww3qMT8382k86gJgQjZuJDGUl3Xi5 -GXmO0GfrMh+v91yiaiqjsJCDp3uVcUSeH7qSkb0CAwEAAaOBqzCBqDAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFEwLLCuIHilzynJ7DlTrikyhy2TAMB8GA1UdIwQYMBaA -FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0xLmV0Y2QubG9jYWyC -CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAkERnrIIvkZHWsyih -mFNf/JmFHC+0/UAG9Ti9msRlr9j1fh+vBIid3FAIShX0zFXf+AtN/+Bz5SVvQHUT -tm71AK/vER1Ue059SIty+Uz5mNAjwtXy0WaUgSuF4uju7MkYD5yUnSGv1iBfm88a -q+q1Vd5m6PkOCfuyNQQm5RKUiJiO4OS+2F9/JOpyr0qqdQthOWr266CqXuvVhd+Z -oZZn5TLq5GHCaTxfngSqS3TXl55QEGl65SUgYdGqpIfaQt3QKq2dqVg/syLPkTJt -GNJVLxJuUIu0PLrfuWynUm+1mOOfwXd8NZVZITUxC7Tl5ecFbTaOzU/4a7Cyssny -Wr3dUg== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-1.key.insecure b/tests/manual/docker-dns/certs-common-name-multi/server-1.key.insecure deleted file mode 100644 index 61f2da4dfa4..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-1.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA5vkJ2i8v1AJ2CzGcIGKRNsmSjqvmasX54DjuAMtxVtatFbl3 -x5lhXUXKfmp7zQ7osHoso+tF+3HIAYAEqogEIdHnsT3tSwgILiUbJwFZpq/pwn7t -KmSl/xxJIOOOBvgoLpVU9/0wKwfEFhJN+Z7wkQUT7oqOUbwyq3bK//lojJFghM1b -Ne9XvDFL1o6AN1ctuiNb0UlAK7O2049+8g1PKG29S8IzxLrUtrnKYJ0B0+nHVT2b -koLbeTpluQALz+Nw5X7rxp1NS4xYt589/1bDeoxPzfzaTzqAmBCNm4kMZSXdeLkZ -eY7QZ+syH6/3XKJqKqOwkIOne5VxRJ4fupKRvQIDAQABAoIBAQCYQsXm6kJqTbEJ -kgutIa0+48TUfqen7Zja4kyrg3HU4DI75wb6MreHqFFj4sh4FoL4i6HP8XIx3wEN -VBo/XOj0bo6BPiSm2MWjvdxXa0Fxa/f6uneYAb+YHEps/vWKzJ6YjuLzlBnj0/vE -3Q5AJzHJOAK6tuY5JYp1lBsggYcVWiQSW6wGQRReU/B/GdFgglL1chqL33Dt11Uv -Y6+oJz/PyqzPLPHcPbhqyQRMOZXnhx+8/+ooq5IojqOHfpa9JQURcHY7isBnpI/G -ZAa8tZctgTqtL4hB1rxDhdq1fS2YC12lxkBZse4jszcm0tYzy2gWmNTH480uo/0J -GOxX7eP1AoGBAO7O+aLhQWrspWQ//8YFbPWNhyscQub+t6WYjc0wn9j0dz8vkhMw -rh5O8uMcZBMDQdq185BcB3aHInw9COWZEcWNIen4ZyNJa5VCN4FY0a2GtFSSGG3f -ilKmQ7cjB950q2jl1AR3t2H7yah+i1ZChzPx+GEe+51LcJZX8mMjGvwjAoGBAPeZ -qJ2W4O2dOyupAfnKpZZclrEBqlyg7Xj85u20eBMUqtaIEcI/u2kaotQPeuaekUH0 -b1ybr3sJBTp3qzHUaNV3iMfgrnbWEOkIV2TCReWQb1Fk93o3gilMIkhGLIhxwWpM -UpQy3JTjGG/Y6gIOs7YnOBGVMA0o+RvouwooU6ifAoGAH6D6H0CGUYsWPLjdP3To -gX1FMciEc+O4nw4dede+1BVM1emPB0ujRBBgywOvnXUI+9atc6k8s84iGyJaU056 -tBeFLl/gCSRoQ1SJ1W/WFY2JxMm0wpig0WGEBnV1TVlWeoY2FoFkoG2gv9hCzCHz -lkWuB+76lFKxjrgHOmoj4NECgYB+COmbzkGQsoh8IPuwe0bu0xKh54cgv4oiHBow -xbyZedu8eGcRyf9L8RMRfw/AdNbcC+Dj8xvQNTdEG8Y5BzaV8tLda7FjLHRPKr/R -ulJ6GJuRgyO2Qqsu+mI5B/+DNOSPh2pBpeJCp5a42GHFylYQUsZnrNlY2ZJ0cnND -KGPtYQKBgQDL30+BB95FtRUvFoJIWwASCp7TIqW7N7RGWgqmsXU0EZ0Mya4dquqG -rJ1QuXQIJ+xV060ehwJR+iDUAY2xUg3/LCoDD0rwBzSdh+NEKjOmRNFRtn7WT03Q -264E80r6VTRSN4sWQwAAbd1VF1uGO5tkzZdJGWGhQhvTUZ498dE+9Q== ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-2.crt b/tests/manual/docker-dns/certs-common-name-multi/server-2.crt deleted file mode 100644 index e319fade463..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-2.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIUHXDUS+Vry/Tquc6S6OoaeuGozrEwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAOO+FsO+6pwpv+5K+VQTYQb0lT0BjnM7Y2qSZIiTGCDp/M0P -yHSed4oTzxBeA9hEytczH/oddAUuSZNgag5sGFVgjFNdiZli4wQqJaMQRodivuUl -ZscqnWwtP3GYVAfg+t/4YdGB+dQRDQvHBl9BRYmUh2ixOA98OXKfNMr+u+3sh5Gy -dwx5ZEBRvgBcRrgCaIMsvVeIzHQBMHrNySAD1bGgm3xGdLeVPhAp24yUKZ5IbN6/ -+5hyCRARtGwLH/1Q/h10Sr5jxQi00eEXH+CNOvcerH6b2II/BxHIcqKd0u36pUfG -0KsY+ia0fvYi510V6Q0FAn45luEjHEk5ITN/LnMCAwEAAaOBqzCBqDAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFE69SZun6mXZe6cd3Cb2HWrK281MMB8GA1UdIwQYMBaA -FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0yLmV0Y2QubG9jYWyC -CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAI5nHHULV7eUJMsvv -zk1shv826kOwXbMX10iRaf49/r7TWBq0pbPapvf5VXRsZ5wlDrDzjaNstpsaow/j -fhZ1zpU0h1bdifxE+omFSWZjpVM8kQD/yzT34VdyA+P2HuxG8ZTa8r7wTGrooD60 -TjBBM5gFV4nGVe+KbApQ26KWr+P8biKaWe6MM/jAv6TNeXiWReHqyM5v404PZQXK -cIN+fBb8bQfuaKaN1dkOUI3uSHmVmeYc5OGNJ2QKL9Uzm1VGbbM+1BOLhmF53QSm -5m2B64lPKy+vpTcRLN7oW1FHZOKts+1OEaLMCyjWFKFbdcrmJI+AP2IB+V6ODECn -RwJDtA== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-2.key.insecure b/tests/manual/docker-dns/certs-common-name-multi/server-2.key.insecure deleted file mode 100644 index 57c3e78cb32..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-2.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA474Ww77qnCm/7kr5VBNhBvSVPQGOcztjapJkiJMYIOn8zQ/I -dJ53ihPPEF4D2ETK1zMf+h10BS5Jk2BqDmwYVWCMU12JmWLjBColoxBGh2K+5SVm -xyqdbC0/cZhUB+D63/hh0YH51BENC8cGX0FFiZSHaLE4D3w5cp80yv677eyHkbJ3 -DHlkQFG+AFxGuAJogyy9V4jMdAEwes3JIAPVsaCbfEZ0t5U+ECnbjJQpnkhs3r/7 -mHIJEBG0bAsf/VD+HXRKvmPFCLTR4Rcf4I069x6sfpvYgj8HEchyop3S7fqlR8bQ -qxj6JrR+9iLnXRXpDQUCfjmW4SMcSTkhM38ucwIDAQABAoIBAQCHYF6N2zYAwDyL -/Ns65A4gIVF5Iyy3SM0u83h5St7j6dNRXhltYSlz1ZSXiRtF+paM16IhflKSJdKs -nXpNumm4jpy7jXWWzRZfSmJ3DNyv673H3rS6nZVYUYlOEBubV1wpuK8E5/tG2R/l -KVibVORuBPF9BSNq6RAJF6Q9KrExmvH4MmG/3Y+iYbZgn0OK1WHxzbeMzdI8OO4z -eg4gTKuMoRFt5B4rZmC5QiXGHdnUXRWfy+yPLTH3hfTek4JT98akFNS01Q4UAi9p -5cC3TOqDNiZdAkN83UKhW9TNAc/vJlq6d5oXW5R+yPt+d8yMvEch4KfpYo33j0oz -qB40pdJRAoGBAP8ZXnWXxhzLhZ4o+aKefnsUUJjaiVhhSRH/kGAAg65lc4IEnt+N -nzyNIwz/2vPv2Gq2BpStrTsTNKVSZCKgZhoBTavP60FaszDSM0bKHTWHW7zaQwc0 -bQG6YvvCiP0iwEzXw7S4BhdAl+x/5C30dUZgKMSDFzuBI187h6dQQNZpAoGBAOSL -/MBuRYBgrHIL9V1v9JGDBeawGc3j2D5c56TeDtGGv8WGeCuE/y9tn+LcKQ+bCGyi -qkW+hobro/iaXODwUZqSKaAVbxC7uBLBTRB716weMzrnD8zSTOiMWg/gh+FOnr/4 -ZfcBco2Pmm5qQ3ZKwVk2jsfLhz6ZKwMrjSaO1Zp7AoGBAJZsajPjRHI0XN0vgkyv -Mxv2lbQcoYKZE1JmpcbGZt/OePdBLEHcq/ozq2h98qmHU9FQ9r5zT0QXhiK6W8vD -U5GgFSHsH+hQyHtQZ+YlRmYLJEBPX9j+xAyR0M5uHwNNm6F0VbXaEdViRHOz0mR6 -0zClgUSnnGp9MtN0MgCqJSGJAoGAJYba3Jn+rYKyLhPKmSoN5Wq3KFbYFdeIpUzJ -+GdB1aOjj4Jx7utqn1YHv89YqqhRLM1U2hjbrAG7LdHi2Eh9jbzcOt3qG7xHEEVP -Kxq6ohdfYBean44UdMa+7wZ2KUeoh2r5CyLgtV/UArdOFnlV4Bk2PpYrwdqSlnWr -Op6PcksCgYEA6HmIHLRTGyOUzS82BEcs5an2mzhQ8XCNdYS6sDaYSiDu2qlPukyZ -jons6P4qpOxlP9Cr6DW7px2fUZrEuPUV8fRJOc+a5AtZ5TmV6N1uH/G1rKmmAMCc -jGAmTJW87QguauTpuUto5u6IhyO2CRsYEy8K1A/1HUQKl721faZBIMA= ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-3.crt b/tests/manual/docker-dns/certs-common-name-multi/server-3.crt deleted file mode 100644 index 294de533239..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-3.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIURfpNMXGb1/oZVwEWyc0Ofn7IItQwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xODAxMjAwNjAwMDBaFw0yODAxMTgwNjAw -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALgCDkDM4qayF6CFt1ZScKR8B+/7qrn1iQ/qYnzRHQ1hlkuS -b3TkQtt7amGAuoD42d8jLYYvHn2Pbmdhn0mtgYZpFfLFCg4O67ZbX54lBHi+yDEh -QhneM9Ovsc42A0EVvabINYtKR6B2YRN00QRXS5R1t+QmclpshFgY0+ITsxlJeygs -wojXthPEfjTQK04JUi5LTHP15rLVzDEd7MguCWdEWRnOu/mSfPHlyz2noUcKuy0M -awsnSMwf+KBwQMLbJhTXtA4MG2FYsm/2en3/oAc8/0Z8sMOX05F+b0MgHl+a31aQ -UHM5ykfDNm3hGQfzjQCx4y4hjDoFxbuXvsey6GMCAwEAAaOBqzCBqDAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFDMydqyg/s43/dJTMt25zJubI/CUMB8GA1UdIwQYMBaA -FEN482NrkIrjNR87nHW/Ma92ZQULMCkGA1UdEQQiMCCCDW0zLmV0Y2QubG9jYWyC -CWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAVs3VQjgx9CycaWKS -P6EvMtlqOkanJEe3zr69sI66cc2ZhfJ5xK38ox4oYpMOA131WRvwq0hjKhhZoVQ8 -aQ4yALi1XBltuIyEyrTX9GWAMeDzY95MdWKhyI8ps6/OOoXN596g9ZdOdIbZAMT4 -XAXm43WccM2W2jiKCEKcE4afIF8RiMIaFwG8YU8oHtnnNvxTVa0wrpcObtEtIzC5 -RJxzX9bkHCTHTgJog4OPChU4zffn18U/AVJ7MZ8gweVwhc4gGe0kwOJE+mLHcC5G -uoFSuVmAhYrH/OPpZhSDOaCED4dsF5jN25CbR3NufEBFRXBH20ZHNkNvbbBnYCBU -4+Rx5w== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-3.key.insecure b/tests/manual/docker-dns/certs-common-name-multi/server-3.key.insecure deleted file mode 100644 index f931adb3881..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-3.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAuAIOQMziprIXoIW3VlJwpHwH7/uqufWJD+pifNEdDWGWS5Jv -dORC23tqYYC6gPjZ3yMthi8efY9uZ2GfSa2BhmkV8sUKDg7rtltfniUEeL7IMSFC -Gd4z06+xzjYDQRW9psg1i0pHoHZhE3TRBFdLlHW35CZyWmyEWBjT4hOzGUl7KCzC -iNe2E8R+NNArTglSLktMc/XmstXMMR3syC4JZ0RZGc67+ZJ88eXLPaehRwq7LQxr -CydIzB/4oHBAwtsmFNe0DgwbYViyb/Z6ff+gBzz/Rnyww5fTkX5vQyAeX5rfVpBQ -cznKR8M2beEZB/ONALHjLiGMOgXFu5e+x7LoYwIDAQABAoIBAQCY54RmjprNAHKn -vlXCEpFt7W8/GXcePg2ePxuGMtKcevpEZDPgA4oXDnAxA6J3Z9LMHFRJC8Cff9+z -YqjVtatLQOmvKdMYKYfvqfBD3ujfWVHLmaJvEnkor/flrnZ30BQfkoED9T6d9aDn -ZQwHOm8gt82OdfBSeZhkCIWReOM73622qJhmLWUUY3xEucRAFF6XffOLvJAT87Vu -pXKtCnQxhzxkUsCYNIOeH/pTX+XoLkysFBKxnrlbTeM0cEgWpYMICt/vsUrp6DHs -jygxR1EnT2/4ufe81aFSO4SzUZKJrz8zj4yIyDOR0Mp6FW+xMp8S0fDOywHhLlXn -xQOevmGBAoGBAOMQaWWs2FcxWvLfX95RyWPtkQ+XvmWlL5FR427TlLhtU6EPs0xZ -eeanMtQqSRHlDkatwc0XQk+s30/UJ+5i1iz3shLwtnZort/pbnyWrxkE9pcR0fgr -IklujJ8e8kQHpY75gOLmEiADrUITqvfbvSMsaG3h1VydPNU3JYTUuYmjAoGBAM91 -Atnri0PH3UKonAcMPSdwQ5NexqAD1JUk6KUoX2poXBXO3zXBFLgbMeJaWthbe+dG -Raw/zjBET/oRfDOssh+QTD8TutI9LA2+EN7TG7Kr6NFciz4Q2pioaimv9KUhJx+8 -HH2wCANYgkv69IWUFskF0uDCW9FQVvpepcctCJJBAoGAMlWxB5kJXErUnoJl/iKj -QkOnpI0+58l2ggBlKmw8y6VwpIOWe5ZaL4dg/Sdii1T7lS9vhsdhK8hmuIuPToka -cV13XDuANz99hKV6mKPOrP0srNCGez0UnLKk+aEik3IegVNN/v6BhhdKkRtLCybr -BqERhUpKwf0ZPyq6ZnfBqYECgYEAsiD2YcctvPVPtnyv/B02JTbvzwoB4kNntOgM -GkOgKe2Ro+gNIEq5T5uKKaELf9qNePeNu2jN0gPV6BI7YuNVzmRIE6ENOJfty573 -PVxm2/Nf5ORhatlt2MZC4aiDl4Xv4f/TNth/COBmgHbqngeZyOGHQBWiYQdqp2+9 -SFgSlAECgYEA1zLhxj6f+psM5Gpx56JJIEraHfyuyR1Oxii5mo7I3PLsbF/s6YDR -q9E64GoR5PdgCQlMm09f6wfT61NVwsYrbLlLET6tAiG0eNxXe71k1hUb6aa4DpNQ -IcS3E3hb5KREXUH5d+PKeD2qrf52mtakjn9b2aH2rQw2e2YNkIDV+XA= ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-1.json b/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-1.json deleted file mode 100644 index ae9fe36e980..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-1.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m1.etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-2.json b/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-2.json deleted file mode 100644 index 5d938fb8a45..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-2.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m2.etcd.local", - "127.0.0.1", - "localhost" - ] - } diff --git a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-3.json b/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-3.json deleted file mode 100644 index 7b8ffcfae9f..00000000000 --- a/tests/manual/docker-dns/certs-common-name-multi/server-ca-csr-3.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m3.etcd.local", - "127.0.0.1", - "localhost" - ] - } diff --git a/tests/manual/docker-dns/certs-gateway/Procfile b/tests/manual/docker-dns/certs-gateway/Procfile deleted file mode 100644 index 47b2aeba263..00000000000 --- a/tests/manual/docker-dns/certs-gateway/Procfile +++ /dev/null @@ -1,8 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-gateway/server.crt --peer-key-file=/certs-gateway/server.key.insecure --peer-trusted-ca-file=/certs-gateway/ca.crt --peer-client-cert-auth --cert-file=/certs-gateway/server.crt --key-file=/certs-gateway/server.key.insecure --trusted-ca-file=/certs-gateway/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -gateway: ./etcd gateway start --endpoints https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 --trusted-ca-file /certs-gateway/ca.crt --listen-addr 127.0.0.1:23790 diff --git a/tests/manual/docker-dns/certs-gateway/ca-csr.json b/tests/manual/docker-dns/certs-gateway/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs-gateway/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs-gateway/ca.crt b/tests/manual/docker-dns/certs-gateway/ca.crt deleted file mode 100644 index 7e3814e92d6..00000000000 --- a/tests/manual/docker-dns/certs-gateway/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUClliB9ECLPuQpOrlqLkeI1ib7zYwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCjClF0TCk2qrHUTjFgFv2jmV0yUqnP3SG/7eVCptcFKE7kcGAx+j06GfEP -UXmCV13cgE0dYYLtz7/g29BiZzlBLlLsmpBMM+S4nfVH9BGLbKCSnwp5ba816AuS -rc8+qmJ0fAo56snLQWoAlnZxZ1tVjAtj5ZrQP9QDK2djgyviPS4kqWQ7Ulbeqgs7 -rGz56xAsyMTWYlotgZTnnZ3Pckr1FHXhwkO1rFK5+oMZPh2HhvXL9wv0/TMAypUv -oQqDzUfUvYeaKr6qy1ADc53SQjqeTXg0jOShmnWM2zC7MwX+VPh+6ZApk3NLXwgv -6wT0U1tNfvctp8JvC7FqqCEny9hdAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQWI6eUGqKWkCjOKGAYd+5K6eh5 -GTANBgkqhkiG9w0BAQsFAAOCAQEAS3nIyLoGMsioLb89T1KMq+0NDDCx7R20EguT -qUvFUYKjzdxDA1RlZ2HzPxBJRwBc0Vf98pNtkWCkwUl5hxthndNQo7F9lLs/zNzp -bL4agho6kadIbcb4v/3g9XPSzqJ/ysfrwxZoBd7D+0PVGJjRTIJiN83Kt68IMx2b -8mFEBiMZiSJW+sRuKXMSJsubJE3QRn862y2ktq/lEJyYR6zC0MOeYR6BPIs/B6vU -8/iUbyk5ULc7NzWGytC+QKC3O9RTuA8MGF1aFaNSK7wDyrAlBZdxjWi52Mz3lJCK -ffBaVfvG55WKjwAqgNU17jK/Rxw1ev9mp4aCkXkD0KUTGLcoZw== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-gateway/gencert.json b/tests/manual/docker-dns/certs-gateway/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs-gateway/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs-gateway/gencerts.sh b/tests/manual/docker-dns/certs-gateway/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns/certs-gateway/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs-gateway/run.sh b/tests/manual/docker-dns/certs-gateway/run.sh deleted file mode 100755 index 3110f3b4662..00000000000 --- a/tests/manual/docker-dns/certs-gateway/run.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-gateway/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=127.0.0.1:23790 \ - put ghi jkl - -./etcdctl \ - --cacert=/certs-gateway/ca.crt \ - --cert=/certs-gateway/server.crt \ - --key=/certs-gateway/server.key.insecure \ - --endpoints=127.0.0.1:23790 \ - get ghi diff --git a/tests/manual/docker-dns/certs-gateway/server-ca-csr.json b/tests/manual/docker-dns/certs-gateway/server-ca-csr.json deleted file mode 100644 index 77cdb408cf0..00000000000 --- a/tests/manual/docker-dns/certs-gateway/server-ca-csr.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "m1.etcd.local", - "m2.etcd.local", - "m3.etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns/certs-gateway/server.crt b/tests/manual/docker-dns/certs-gateway/server.crt deleted file mode 100644 index 688a5afe641..00000000000 --- a/tests/manual/docker-dns/certs-gateway/server.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEKTCCAxGgAwIBAgIUDOkW+H3KLeHEwsovqOUMKKfEuqQwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDExOTE3MDBaFw0yNzExMjkxOTE3 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANfu298kCxFY -KXAmdG5BeqnFoezAJQCtgv+ZRS0+OB4hVsahnNSsztEfIJnVSvYJTr1u+TGSbzBZ -q85ua3S92Mzo/71yoDlFjj1JfBmPdL1Ij1256LAwUYoPXgcACyiKpI1DnTlhwTvU -G41teQBo+u4sxr9beuNpLlehVbknH9JkTNaTbF9/B5hy5hQPomGvzPzzBNAfrb2B -EyqabnzoX4qv6cMsQSJrcOYQ8znnTPWa5WFP8rWujsvxOUjxikQn8d7lkzy+PHwq -zx69L9VzdoWyJgQ3m73SIMTgP+HL+OsxDfmbu++Ds+2i2Dgf/vdJku/rP+Wka7vn -yCM807xi96kCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFAH+dsuv -L6qvUmB/w9eKl83+MGTtMB8GA1UdIwQYMBaAFBYjp5QaopaQKM4oYBh37krp6HkZ -MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 -Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAh049 -srxFkiH9Lp8le3fJkuY25T/MUrmfa10RdNSKgj3qcpCMnf9nQjIWtaQsjoZJ5MQc -VIT3gWDWK8SWlpx+O2cVEQDG0ccv7gc38YGywVhMoQ5HthTAjLCbNk4TdKJOIk7D -hmfs7BHDvjRPi38CFklLzdUQaVCcvB43TNA3Y9M75oP/UGOSe3lJz1KKXOI/t+vA -5U3yxwXlVNJVsZgeWAbXN9F6WbCZDsz+4Obpk/LV1NLqgLd/hHXzoOOWNw977S2b -+dOd95OJ/cq09OzKn/g26NgtHOl0xqol7wIwqJhweEEiVueyFxXD04jcsxdAFZSJ -9H6q3inNQaLyJHSYWQ== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-gateway/server.key.insecure b/tests/manual/docker-dns/certs-gateway/server.key.insecure deleted file mode 100644 index 6c0c16c0ba7..00000000000 --- a/tests/manual/docker-dns/certs-gateway/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA1+7b3yQLEVgpcCZ0bkF6qcWh7MAlAK2C/5lFLT44HiFWxqGc -1KzO0R8gmdVK9glOvW75MZJvMFmrzm5rdL3YzOj/vXKgOUWOPUl8GY90vUiPXbno -sDBRig9eBwALKIqkjUOdOWHBO9QbjW15AGj67izGv1t642kuV6FVuScf0mRM1pNs -X38HmHLmFA+iYa/M/PME0B+tvYETKppufOhfiq/pwyxBImtw5hDzOedM9ZrlYU/y -ta6Oy/E5SPGKRCfx3uWTPL48fCrPHr0v1XN2hbImBDebvdIgxOA/4cv46zEN+Zu7 -74Oz7aLYOB/+90mS7+s/5aRru+fIIzzTvGL3qQIDAQABAoIBABO8azA79R8Ctdbg -TOf+6B04SRKAhWFIep6t/ZqjAzINzgadot31ZXnLpIkq640NULsTt4cGYU9EAuX9 -RakH6RbhfO5t2aMiblu/qa4UZJEgXqosYc4ovGsn+GofYOW1tlCLC4XBH44+Vr5Y -cSTOc5DtWsUGsXazmF6+Cj3AC7KI+VWegHexGezyO0not8Q5L55TuH2lCW4sx9th -W4Q7jg2lrCvz4x8ZRIAXOGmBaDTZmMtVlEjezu+7xr8QDQsvUwj7a87HPjgXFesj -CbbCr8kaqEdZ23AVDZuLAKS4hWQlbacRhRAxMkomZkg5U6J/PC3ikIqfOda1zu1D -MTIOuwECgYEA8hFkISWVEzbaIZgO1BZl36wNaOLYIpX0CzlycptcEssbefLy7Nxo -TZ+m9AjF6TBPl4fO4edo00iiJMy6ZdhItduNWLO+usJEY9UdzHex7fCUeG8usUXQ -g4VGEvPGg88VEM45pkAgbga7kzkG2Ihfu6La5apbXeOpNpuC58DdlzkCgYEA5Fxl -/qGzLlTwioaaE+qpEX46MfbJl38nkeSf9B7J1ISc/fnDPcBPvcHaYELqyHM+7OFa -Gt9oBDrLgyP4ZgOTaHKHdofXjAMC97b9oa/Lrors5dMrf/fxTTe2X+Kab94E1Wbo -39kA3qzV/CT7EZWuqbHO3Bqkv/qe6ks0Tbahc/ECgYBuB2OpAWkyc6NQ08ohsxCZ -S55Ix5uQlPJ5y6Hu4BlI3ZNeqgSrjz/F0MTVdctnxDLZYLyzyDjImOJCseAj/NyH -9QTZhdIzF6x4aF2EG///dHQ4Del+YIp3zbNdV/sq3Izpt6NSoyFagarvL2OiNtK0 -+kBfVkDze1Dl5mfpKaxPWQKBgQC+gXqxJxKE92VIGyxUqzHqHwTLg9b/ZJuNMU5j -aH/1o8AYfJFtZY7gfeUA4zJckRAQq5rwyilLRgVbXNmvuRHzU4BA2OhvrF+Aag9D -IJXqAYnJ3RXwBtcuFOk3KqKt6mjb4qMpgy4flc5aMDunmtiARo6MvklswtZqHN0A -a/ha8QKBgQCqF/xCf5ORzVkikYYGsO910QXlzsyPdRJbhrBCRTsdhz/paT5GQQXr -y3ToUuKEoHfjFudUeGNOstjchWw+WgT9iqMJhtwV1nU1lkPyjmCQ2ONIP+13dZ+i -I/LDyMngtOKzvD5qpswY1Er+84+RVrtseQjXDC2NlrvDr5LnZDtGag== ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-san-dns/Procfile b/tests/manual/docker-dns/certs-san-dns/Procfile deleted file mode 100644 index 32298f8cbb9..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-1.crt --peer-key-file=/certs-san-dns/server-1.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-1.crt --key-file=/certs-san-dns/server-1.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-2.crt --peer-key-file=/certs-san-dns/server-2.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-2.crt --key-file=/certs-san-dns/server-2.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-san-dns/server-3.crt --peer-key-file=/certs-san-dns/server-3.key.insecure --peer-trusted-ca-file=/certs-san-dns/ca.crt --peer-client-cert-auth --peer-cert-allowed-cn etcd.local --cert-file=/certs-san-dns/server-3.crt --key-file=/certs-san-dns/server-3.key.insecure --trusted-ca-file=/certs-san-dns/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/certs-san-dns/ca-csr.json b/tests/manual/docker-dns/certs-san-dns/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs-san-dns/ca.crt b/tests/manual/docker-dns/certs-san-dns/ca.crt deleted file mode 100644 index 2eaf8172cf4..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDrjCCApagAwIBAgIUV77P/m6U+QIMz7Ql0Q6xC3GO/fAwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDEN9lZnkS16bi42zl+iGlYSHGJn0uxiqhff1KRJlwbEBXr3ywllJLgnAA3 -XEQsBMYk0yEB82380flVJd6UMt+0n6bo5Mp2Z+X8eXZgVgB4uLz0APRhozO89I2D -wk74aTrV3wseCmN9ZOvG+2b1AzM6rwwnozhnoC2qlZ5yNZRSKMTRX+ZcDQ6FQopk -Kg+ACGyiU94bLJkd4Vj7oSOiParjtj1laGE88QAL8clkcT6enHlwVJDs7BF3SRBI -sBKlUnyC47mjR4v9KKkeZ7LHBcW9D7FZZYNg85mubVHfj8rZb1EAF+Kqskd6YpYz -ZezQVdJOyUrp8/+mSBaS2HpF4HjpAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTr390x+ChxCV+AkCnxh+5vgtoiyTAN -BgkqhkiG9w0BAQsFAAOCAQEAq+o4uF9xkJ/SzGgBePb3r/F0aNcBIY3XmCsGE4gd -0U/tqkGP10BKlermi87ADLxjBux+2n6eAHycac9mDynOr1d5GUVHK8BrAzKeabuP -Q8J2NQyVXpRF9z2EolLpw7J1n5CYJqsVMBjov33AKk9SmCFg3O4wD6oladWXT/Ie -ld2+EUS6TLzPNsU+AoPx64L0Aru05ynpPnlUB+DSXCBUckffmGgv0HEd5bU3QOl4 -9SUx35lk8nh7x+sHQblijuNNLi7bTIhzQTolJTCo3rd8YgSdnof0z5bROVTwymD5 -tWshIE4BP+ri+1NPKCe2KlcP3MIynKtx+obr5cLZjDHWoA== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-san-dns/gencert.json b/tests/manual/docker-dns/certs-san-dns/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs-san-dns/gencerts.sh b/tests/manual/docker-dns/certs-san-dns/gencerts.sh deleted file mode 100755 index b2318fd0865..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/gencerts.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: m1/m2/m3.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-1.json | cfssljson --bare ./server-1 -mv server-1.pem server-1.crt -mv server-1-key.pem server-1.key.insecure - -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-2.json | cfssljson --bare ./server-2 -mv server-2.pem server-2.crt -mv server-2-key.pem server-2.key.insecure - -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr-3.json | cfssljson --bare ./server-3 -mv server-3.pem server-3.crt -mv server-3-key.pem server-3.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs-san-dns/run.sh b/tests/manual/docker-dns/certs-san-dns/run.sh deleted file mode 100755 index f4227c58e1e..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/run.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts -echo "127.0.0.1 m1.etcd.local" >> /etc/hosts -echo "127.0.0.1 m2.etcd.local" >> /etc/hosts -echo "127.0.0.1 m3.etcd.local" >> /etc/hosts - -goreman -f /certs-san-dns/Procfile start & -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-san-dns/ca.crt \ - --cert=/certs-san-dns/server-1.crt \ - --key=/certs-san-dns/server-1.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - endpoint health --cluster - -printf "\nPut abc \n" -./etcdctl \ - --cacert=/certs-san-dns/ca.crt \ - --cert=/certs-san-dns/server-2.crt \ - --key=/certs-san-dns/server-2.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -printf "\nGet abc \n" -./etcdctl \ - --cacert=/certs-san-dns/ca.crt \ - --cert=/certs-san-dns/server-3.crt \ - --key=/certs-san-dns/server-3.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc - -printf "\nKill etcd server 1...\n" -kill $(lsof -t -i:2379) -sleep 7s - -printf "\nGet abc after killing server 1\n" -./etcdctl \ - --cacert=/certs-san-dns/ca.crt \ - --cert=/certs-san-dns/server-2.crt \ - --key=/certs-san-dns/server-2.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc -printf "\n\nDone!!!\n\n" - diff --git a/tests/manual/docker-dns/certs-san-dns/server-1.crt b/tests/manual/docker-dns/certs-san-dns/server-1.crt deleted file mode 100644 index c99fef83403..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-1.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIUYSODFGYUNAEskvyamAAxpZ8/86swDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMNEq66ZcntXibYne3W9L53JyMmGrJJi2FbVAEv76OraVnO5 -7qJNXjXZ3bOhQ3WDawbWBA5lNi1mwZcKVxM41PQXpez/6/ZkZliwNQFsDZ3WgPIx -mfcWWnoVPEKFrJTnKZm5/o+50w07yMGZLCgIS66oIcOGJ3G35/NKm+T94yKnRV2m -M1YvkmgU69MwQwbvGh1fypKB734wVp9Yz46FTuAoY8I63feYrSHKHXZf70rm3Kqm -iTU3jixWq86aI1dIRbAqObc5pgSoBwAczLjWvhhcO7n9KRkyzxjg+ZFPwRHiBWi1 -ZU70D4XHZMdcAgu+2/IBXfGBZbKOyq9WN65N9tUCAwEAAaOBmjCBlzAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFJzBC8YE22RmOwKyxnH0SPC08zE4MB8GA1UdIwQYMBaA -FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0xLmV0Y2QubG9jYWww -DQYJKoZIhvcNAQELBQADggEBAKvIARZDTNcGAcu5SkrjB/mWlq7GaLqgnGARvMQ0 -O5IC6hPsOcIsTnGKzert2xkc6y7msYMOl4ddP5PgSIfpCtkmL6bACoros4ViWwl5 -Lg0YF3PQvwSL+h2StTE2pGrNp/eQL8HJD2Lhyac2vTAq01Vbh3ySrfQP9zjoH8U7 -+mJJk9VWAagU+ww17kq5VZL9iJnlFSxVLNo6dcNo/dU6eWqKWoZjAHl+/zhoSOuZ -tBRshTcFuLbBe59ULFoZ+Mt5Sa4+OuN5Jir4hQH6DS1ETd7hwsSvHf6KcIw9fIXz -h+PZ0ssNDq4Yr7i3dQS5xAQO1aO35Ru9q2ABt20E1dQGIyY= ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-1.key.insecure b/tests/manual/docker-dns/certs-san-dns/server-1.key.insecure deleted file mode 100644 index 575ee5e82f7..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-1.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAw0Srrplye1eJtid7db0vncnIyYaskmLYVtUAS/vo6tpWc7nu -ok1eNdnds6FDdYNrBtYEDmU2LWbBlwpXEzjU9Bel7P/r9mRmWLA1AWwNndaA8jGZ -9xZaehU8QoWslOcpmbn+j7nTDTvIwZksKAhLrqghw4Yncbfn80qb5P3jIqdFXaYz -Vi+SaBTr0zBDBu8aHV/KkoHvfjBWn1jPjoVO4Chjwjrd95itIcoddl/vSubcqqaJ -NTeOLFarzpojV0hFsCo5tzmmBKgHABzMuNa+GFw7uf0pGTLPGOD5kU/BEeIFaLVl -TvQPhcdkx1wCC77b8gFd8YFlso7Kr1Y3rk321QIDAQABAoIBAQCl3c4LqNKDDQ+w -SAdqMsKgwIerD5fFXOsxjwsKgDgQTljDQrv+58NP8PmOnTxFNNWT3/VgGP8VP8TP -vPvMGylhEjligN151TzOtxa/V36VhWDQ2etT5IwEScd/Jjc74MQIjeI7SfiJtC/K -q4bDlpBbEvxjLrCQu0vu8IBN2o+2nWx8l7Jy0VrDuw5LQM90ZA7OcU7H2kE1ehbp -M5waHE0tdgHzlLqrVl0RlXh/FlIG7/cfQRL1rpD5T8llD7XshF2BhtXerk+QtC9b -It8xGnhd6e9Yk96KIN/3U/W5DORYwtq1r54r1OxZkUX3C0RqU2P3EcNvBHbbZydm -6xq6EfDBAoGBAM3LIHo4v96YPNHtj+GI9ZRr+o9UMrl3dcRTMkbEjFIqBdON8GrS -fdLSvZms+wqU8v7gNEZhhk7U9Y71pHUZsN5WAGHMCC6Q6/5lY2ObEEitrV7btrUe -75JNlSq52JT7L9NZRhD5ACqw9qrdUq0mNyPtrSV/J2DfubuBWcSLf58lAoGBAPLo -MGLyzuG5WTwPAkcB/T3Z5kNFlr8po9tuso5WDuXws7nIPR8yb6UIvP7JqWOgaHyh -YBA4aKC1T8gpAwVxZxJ9bbntxt13sxyuMZgA/CGn6FXCPbhAztnQDle81QcsMGXK -y2YbeMUVuMrowcjK6g8J9E9AkB4SDvme+xhEQgHxAoGBAIxtzRa5/Ov3dKFH+8PK -QtJqMIt3yDlZNEqo/wjdfGdg96LaG7G5O1UOq4TfTlt1MrAL7IAOcqj+lyZbp0Kl -KlU92Hrj0L199RwesYi5uo3tvf2Z7n5/wrlSKbUDJrDbC1Kse6x/TcbUBS6pYo53 -Im9o85s/vm5TnJk/9jKxgn/lAoGAVUbutc5IkzZe/ZbHVeZ84Zn+HN/xbGtR+1eB -mDbeRBuc/TwvOSSbzXSj5U8nCLLn+9krwIYNNV5yA/Nh/Ccz6Gnge8XeayH637bH -8nVmDurDxlfLE0StWgqQ/nxszXfWBeaMQeyjGY3mslXEspmKUn1MKAaikewFFd2a -iYptIgECgYEAr81jSoXyHSKpEEHzy5hyH+JOsUeWZVFduqkTTHNZe7MlXSSSZdhW -6TCjnA9HpzBlgTI8PwXXKEa2G7OCr4dHFBJSWCgzQTfd1hf5xiE7ca2bxiEC7SKF -H3TvfLCi9Dky9uFAXsp6SlI/x6Abm6CpqTlR19KyCo64LztaAmRkmNU= ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-2.crt b/tests/manual/docker-dns/certs-san-dns/server-2.crt deleted file mode 100644 index 9c15aa05bb7..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-2.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIUDrW+8pB5rh4jfT8GQ3R9EqRLuzkwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBANHtpC3LDlC1MqHx/LT6vWA9DJApziy9Vh0f0SC1hFjRiFGp -yA8d4uWHg7ebEVj/hWyJPrYpNMSDXhmJVa8UtE6G3B2ZS4WZsjfKMYs0ydu8mjjV -FlfC6vuDGX3gUdI7XhW1KCmnFI0XfRaskS/khY31SMyblAZ0hDpRz/nQ3vyMSS7+ -xYgPn7SHNrJFz8+K3NB35lbvkBvYZvVJ0mONeIMB1BffHILzexiaXyHXeKTPw9yI -FSRTDlXQqY9afNpAAv12xW2Xa9chuQ5Q+5P8syRqePgjR+TVJkeUCpLunNHcxZTD -DoXqJjOlqy6OzdFGnGzvtDh/1/QL880/e6jOCcUCAwEAAaOBmjCBlzAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFNoiUFY7gFUJUtJpBXFVIFipiFo/MB8GA1UdIwQYMBaA -FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0yLmV0Y2QubG9jYWww -DQYJKoZIhvcNAQELBQADggEBAGUisaOqg4ps1XTqRnqnk/zajC0MeyayE4X2VLtO -gq04wT7N9nUmFAiL2uUBzK4IKrb68ZIGQbm/3NNJrKxauWvK79J9xbeOAnOQWIAx -VFA7uGw0JpiYFk6W9YzTCpNlIWEOEw5RaNIj8F5dAFqgqNDqd1zw1+04jIGlBTpD -v3LQjr8IvB/cmvnugwAnb8cKDlr1GO322/1otrJi2BpmjAi4FQmuxdyQTmgkQU7T -k2whauuwDrwVmc+LyoObbiiaJPi60lSABIttbUmFqWo9U+mBcbAtFE6EW6Wo1gFR -q7uKqwYjARW/h/amHhyiHkNnu+TjY1SL2+kk+EBAt0SSmq8= ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-2.key.insecure b/tests/manual/docker-dns/certs-san-dns/server-2.key.insecure deleted file mode 100644 index 131ea6f4bf3..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-2.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA0e2kLcsOULUyofH8tPq9YD0MkCnOLL1WHR/RILWEWNGIUanI -Dx3i5YeDt5sRWP+FbIk+tik0xINeGYlVrxS0TobcHZlLhZmyN8oxizTJ27yaONUW -V8Lq+4MZfeBR0jteFbUoKacUjRd9FqyRL+SFjfVIzJuUBnSEOlHP+dDe/IxJLv7F -iA+ftIc2skXPz4rc0HfmVu+QG9hm9UnSY414gwHUF98cgvN7GJpfIdd4pM/D3IgV -JFMOVdCpj1p82kAC/XbFbZdr1yG5DlD7k/yzJGp4+CNH5NUmR5QKku6c0dzFlMMO -heomM6WrLo7N0UacbO+0OH/X9AvzzT97qM4JxQIDAQABAoIBAQCYEZ9mlLsv97JP -4a1/pErelhqtq7rwac8hldS17weKF266SVTkrm+YeYwOysPMRnzuXJUS+9J/r/HQ -ac2p4EOkxshYoJ02kFmrVEqDXqADDyJgnOtsc4Qo4ZTrvD1JHzxOWUZYtfGLK0Kv -1B3wJYghh1dO8DxQWMMYQ/92JboCEoVmO/vAcUH5V4qhZMEGvFm8AiaXnVi13myt -OAlfyQQ1CsnOoxxQhomzqNVrMjPelv5yLAq1Z5gXSeylc6y8NVWKsLbWJUj5IhqH -bmCw2V/1snJCJews/S/4wgDBibjldlUEPfjNwBoeRTl9DB6uCHzUiF98PB8MoDx5 -VaJiRHZZAoGBAOqVcgB+3gJ9Pf+6bUdL4NhKdr4wje2IAbeidQMXOsbp455b7NLj -/Z92tKOGJ2HBdGBzGkA4JbHcy/HBxDm6DXKWIIqYcOubDDDiBAYtEJhLG3Mqz4p8 -sp1QUICQoskCAP4gHc8/AeXKp1CQoU1dJksC4mZ66KQMdYaJ1f7gNxJ7AoGBAOUX -9mLDFjqpJ7IPt02I4yn/tlFI3GLwuO/yxEuCGt8T2CAXkc/cp+ojEI29ckwYpqv6 -D+FRPYqNN+c6OJWAR4U4OiuRQlShGZmBvn11BIn7ILZ3KnxvFXKkOzzFNU5oYczE -/L/z2SSKQfGlgDWmKWIoWt5D3TjMA7xysTgQIcC/AoGAFgyV+pXyKCm9ehv7yYfI -Sow1PQszS/BMuQX8GZ5FWA0D6A6b4/aqECMIN5aUfQvB9I7dGMwuPtmSEdc0qnhi -azLRPDW3521bZ/zWg/4YYTguDFUpzMqLv12dM3hk1J/rl/dM1f4GH6M8tsXhY3Qt -9T8AKMHEvCavpUWvZ5WLl6ECgYAgxmzZdE+Z1Nl5AAaZcRwOxiavOl1NSmMq8PBk -XRi7EXu6G6Ugt9DODnYv0QqpGF2//OaItba4O7vjuNCfktqolIK9+OokcWfYLley -WytrEiJ7+FB7vOi0ngpbh1s4/HYBda0zSQ+nyp/kkmjlRABnqp5VbiAYIBfovf/c -pXIuwQKBgQCGJBX7vmFcsL1qdG5d8jQr2K/dbTcU8sXQzUIXGQcCxePYOrO8Rcn2 -EMXAGIdOn6i2x0/rNn+EnPHhT6XC0hSOu52srL8BB9tbDYk3i+3ghUG5QI4dp+GQ -D1+HZD3SVrqjWlTU0aBB/NYMldIo9e3LU1ZUXTm2Rmg6Mre9ann6/w== ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-3.crt b/tests/manual/docker-dns/certs-san-dns/server-3.crt deleted file mode 100644 index 3fb516db993..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-3.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIUNPjXxMAkrxdr1sZA7Gw+gYbVeLAwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xOTEwMDcyMjIyMDBaFw0yOTEwMDQyMjIy -MDBaMHcxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTETMBEGA1UEAxMKZXRjZC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALwQOtWoCcO13D/7i96Bkb376WvoqYJw+yN9kYwVkpM1+EQd -3hzSNT0byRGeNtlXAd8tY/SpjTM7mnq5yIhNjhJ2eo5GO1YuJyDJe9WnfQ30rVfv -WzCV/BiwloaqX/tlgCJ3PVNAZdyCZ+ouRIggBUHCQo88LuKwpM9QrUmBCGFLD/M2 -PYKewGv+h9JwMRLxp5mARBS+bkUsQy9F7U/GZs/9xULXIo9l3Bj8Zqz6UMmtW+Y2 -lkK5wawG04bZwkr8lUzMC2AVKFidTuZsda9GP4OxKclW0ro0HtlYaiI7+a0xONZ6 -yuj4cYrs1KZ9z3uYji1Li8XFUb4g/v9dar0oK70CAwEAAaOBmjCBlzAOBgNVHQ8B -Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB -/wQCMAAwHQYDVR0OBBYEFATpeRk6Bxgf8LHU/wlw0iLQltEoMB8GA1UdIwQYMBaA -FOvf3TH4KHEJX4CQKfGH7m+C2iLJMBgGA1UdEQQRMA+CDW0zLmV0Y2QubG9jYWww -DQYJKoZIhvcNAQELBQADggEBADjH3ytTogX2BqnhYaVia31Zjy240iViU6BNCARq -PdBB5WCtti7yzonfS9Uytc9YLB4ln4Z0wZpRk3O0QGehHX5CDT5EL5zKwDQdoYG3 -oKx9qOu2VyxDA/1hYdPvMW3aq4g/oE8nFjNbrFEVCuGLbJdfDnyJJFsvNRNqs8hS -xpfYLNH9lD4sD13vul7RJQJrvCjbaqQp9oLe9NZ9f+cBPGqATkicMWbABq4xbpCE -IY19SHk0WHRSem5jlbfF3O58Ow+LRR/Bn2/IYKpyidEixxu9VX06BDRH5GmG7wBd -5Y9YhmeyPCXiHHPar7m/Rmel82RLI+/qomKh9pii3u357yY= ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-3.key.insecure b/tests/manual/docker-dns/certs-san-dns/server-3.key.insecure deleted file mode 100644 index b64e3bad369..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-3.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvBA61agJw7XcP/uL3oGRvfvpa+ipgnD7I32RjBWSkzX4RB3e -HNI1PRvJEZ422VcB3y1j9KmNMzuaernIiE2OEnZ6jkY7Vi4nIMl71ad9DfStV+9b -MJX8GLCWhqpf+2WAInc9U0Bl3IJn6i5EiCAFQcJCjzwu4rCkz1CtSYEIYUsP8zY9 -gp7Aa/6H0nAxEvGnmYBEFL5uRSxDL0XtT8Zmz/3FQtcij2XcGPxmrPpQya1b5jaW -QrnBrAbThtnCSvyVTMwLYBUoWJ1O5mx1r0Y/g7EpyVbSujQe2VhqIjv5rTE41nrK -6PhxiuzUpn3Pe5iOLUuLxcVRviD+/11qvSgrvQIDAQABAoIBAG1ny7JsFXIjpEQc -pJwHKLArkvnR2nsmGxPkgv3JtwGpDgsijQqbR5mLRofXUPVTZqVdFJ9K2/gIHrBy -0DRrWdFn15hZRz+1jdHHJSGAVIH/67AScSxstMHwSUGCcGAiBk8Gq0h5WEjWHHnh -/MBsUGKXDn2hd20tclOhDY6LYEKolRPFjfBmPRdhdR5A6RS+U+jx1yFsWa6cUjv6 -kInlE5yMdhEOuA/QnVvcaAsKb5CKAuCtAkmFH3fjDp3nkhYFXJy4DTsVRMAfsr5s -SpsKt272URd5fLeZ5QlOb82QCvJr9GushkkKk7N5TMh5C/r74zpROdLTRlXD4I2q -yvnSv8kCgYEA+HRjeRRxujVWo7YSnHYJ/xConrCSekfRMvIXvSq43E+I/t5SlPl8 -YoJYhGWzZ7A/szqTvTW/v2blScd+X4KiK0TX8tTQFvWEBBcZhLILUB/ZiIfi/6ZG -fxe+BAmTMSBThknnRsvAA4jkTvErdpBhhRltyjdLunEEjnfSzJJORHMCgYEAwcZU -TpAfo4ni1Am9Nskk/5LjmPX5u+qfPNJfe6dfO+BoMA51XuAagqZhdsSwTGoxs5xQ -cKmNFA6QmAQnPZK7+QYwmDUXb8/Dtz/d5jylsZdYRHYr4hx3DcKFFEyhlPqrj44k -HxparrkDIq7nVz1t3YMVXYJM/5k2cx/VHlTD8w8CgYEA6Ypl0nNwL4thpENKHT4r -SVG8XmY1WbHWKCA+Rjc5SwWMDZ6nW5dj3ykM0W7Tg5y9U9i09L7oPZ8X2hEmbdra -Wve8UWrPKzWe4UVhXEULs0Ys8VRiANKoI2EK4LqrXBs5x9oCBp8RH4F2semqZCl1 -MWpktBbkHR2NHenuARNpdJcCgYBzlY3sXuPAdRssR7Lp3wmGuWOxdefFQ6pAaWwz -Ih8YZD9Bix5PvXWSwRQZ+DEBI8cJ0A/bZAeXEykExFVz0Pb3D84kvGaCd3fS8vG1 -yC89w30POT3r3fbV6lXfSeaIKw3yz2KUeu/kkM9h/NpZm3bRTsOLx5GOVSG5gh9p -vD412QKBgFxq4rsxJC6+QZvRZaJDcmTHSytbAw3B5Lyv6G+xLBUqc27KjQzCved1 -9Ofzy7KEC3AtKiq3Y0q5q01Rzk5ZYCh6lVe2tw36Muw1bvZjqblGm9X2VRO8Ui2Q -4WOdvIP4z5ZTJQXdIahKAYOyxiYFIvCkvS5SYoKkgWNSzFNKvQtH ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-1.json b/tests/manual/docker-dns/certs-san-dns/server-ca-csr-1.json deleted file mode 100644 index 6927354930b..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-1.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m1.etcd.local" - ] -} diff --git a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-2.json b/tests/manual/docker-dns/certs-san-dns/server-ca-csr-2.json deleted file mode 100644 index 4e6debef1ed..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-2.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m2.etcd.local" - ] - } diff --git a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-3.json b/tests/manual/docker-dns/certs-san-dns/server-ca-csr-3.json deleted file mode 100644 index af67a615b8e..00000000000 --- a/tests/manual/docker-dns/certs-san-dns/server-ca-csr-3.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "etcd.local", - "hosts": [ - "m3.etcd.local" - ] - } diff --git a/tests/manual/docker-dns/certs-wildcard/Procfile b/tests/manual/docker-dns/certs-wildcard/Procfile deleted file mode 100644 index d53a22a799e..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs-wildcard/server.crt --peer-key-file=/certs-wildcard/server.key.insecure --peer-trusted-ca-file=/certs-wildcard/ca.crt --peer-client-cert-auth --cert-file=/certs-wildcard/server.crt --key-file=/certs-wildcard/server.key.insecure --trusted-ca-file=/certs-wildcard/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/certs-wildcard/ca-csr.json b/tests/manual/docker-dns/certs-wildcard/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs-wildcard/ca.crt b/tests/manual/docker-dns/certs-wildcard/ca.crt deleted file mode 100644 index 23ee34f4a4d..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUanA77pXfEz2idrPSlIoPrSo6MmcwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDqtw5G6XZ4N2uuc7TAoiXI+IXA/H+IJIbHrVFQ3LIzLDaS6AmVWw4yT4o2 -X/1IbR5TU6dCnGxuHPutnfnG87is5Oxk1HfIy5cfpf75St3uQycJRcr3Bui/fEZ0 -IZaoRyklcYGI8Y+VfaSADl++EP7UU0X7cc263rZulJXkqp4HihDTPixBgVDruNWf -Yfa2K/Zhiq+zj3hE6s/cBn2pIdY6SMlQ1P0uT/Y5oBTTJFBxeqw+Sz/NXgKgErQg -Za/gNHQWzyRoYHiOGQylvsiXr6tgdk29f0Z6gTQy8FQpwOXYERJr45zh8KvE+FJK -MaWUhGW7hkv85JDZSsmDZ6lVYIfhAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBS+p7B3RLjI8HOOPvVhqtBQNRmH -ZTANBgkqhkiG9w0BAQsFAAOCAQEAFWHLvzzTRQJYjVDxBuXrNZkhFsGAoCYoXhAK -1nXmqLb9/dPMxjkB4ptkQNuP8cMCMPMlapoLkHxEihN1sWZwJRfWShRTK2cQ2kd6 -IKH/M3/ido1PqN/CxhfqvMj3ap3ZkV81nvwn3XhciCGca1CyLzij9RroO0Ee+R3h -mK5A38I1YeRMNOnNAJAW+5scaVtPe6famG2p/OcswobF+ojeZIQJcuk7/FP5iXGA -UfG5WaW3bVfSr5aUGtf/RYZvYu3kWZlAzGaey5iLutRc7f63Ma4jjEEauiGLqQ+6 -F17Feafs2ibRr1wes11O0B/9Ivx9qM/CFgEYhJfp/nBgY/UZXw== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-wildcard/gencert.json b/tests/manual/docker-dns/certs-wildcard/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs-wildcard/gencerts.sh b/tests/manual/docker-dns/certs-wildcard/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs-wildcard/run.sh b/tests/manual/docker-dns/certs-wildcard/run.sh deleted file mode 100755 index f0374d90996..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/run.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs-wildcard/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs-wildcard/ca.crt \ - --cert=/certs-wildcard/server.crt \ - --key=/certs-wildcard/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc diff --git a/tests/manual/docker-dns/certs-wildcard/server-ca-csr.json b/tests/manual/docker-dns/certs-wildcard/server-ca-csr.json deleted file mode 100644 index 616bf11f8f1..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/server-ca-csr.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "*.etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns/certs-wildcard/server.crt b/tests/manual/docker-dns/certs-wildcard/server.crt deleted file mode 100644 index a51cd0b9492..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIECjCCAvKgAwIBAgIUQ0AgAKntDzHW4JxYheDkVMow5ykwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA5MDBaFw0yNzExMTEwNDA5 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANMqNEozhdLm -K5ATSkgIOyQmBmoUCgiWB+P52YWzfmwaWwQP2FFs3qih2c3DHHH7s2zdceXKT2ZN -lvSO8yj08slLPYSC4LQ3su8njGJlasJ28JMjRqshnH3umxFXf9+aPcZ5yYkoXE9V -fzsnBMJz8hI6K2j4Q6sJe+v/0pdz8MpbdIPnmL9qfVpuD6JqmDCZiQOJ8lpMuqqD -60uLjtLv/JKjgdqe5C4psERVm09fg3vOZckv9CC6a4MupeXo2il6femZnPrxC8LX -u2KT3njEjoyzEu2NSdy+BUJDVLgKSh8s2TC8ViNfiFONQo6L1y78ZAyCDrRbTgN9 -Nu1Ou/yzqHkCAwEAAaOBqjCBpzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFC83cRfE -/EKcz7GJKmgDLUBi3kRSMB8GA1UdIwQYMBaAFL6nsHdEuMjwc44+9WGq0FA1GYdl -MCgGA1UdEQQhMB+CDCouZXRjZC5sb2NhbIIJbG9jYWxob3N0hwR/AAABMA0GCSqG -SIb3DQEBCwUAA4IBAQCI7estG86E9IEGREfYul1ej8hltpiAxucmsI0i0lyRHOGa -dss3CKs6TWe5LWXThCIJ2WldI/VgPe63Ezz7WuP3EJxt9QclYArIklS/WN+Bjbn7 -6b8KAtGQkFh7hhjoyilBixpGjECcc7lbriXoEpmUZj9DYQymXWtjKeUJCfQjseNS -V/fmsPph8QveN+pGCypdQ9EA4LGXErg4DQMIo40maYf9/uGBMIrddi930llB0wAh -lsGNUDkrKKJVs2PiVsy8p8sF1h7zAQ+gSqk3ZuWjrTqIIMHtRfIaNICimc7wEy1t -u5fbySMusy1PRAwHVdl5yPxx++KlHyBNowh/9OJh ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs-wildcard/server.key.insecure b/tests/manual/docker-dns/certs-wildcard/server.key.insecure deleted file mode 100644 index ac56ed4ea32..00000000000 --- a/tests/manual/docker-dns/certs-wildcard/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA0yo0SjOF0uYrkBNKSAg7JCYGahQKCJYH4/nZhbN+bBpbBA/Y -UWzeqKHZzcMccfuzbN1x5cpPZk2W9I7zKPTyyUs9hILgtDey7yeMYmVqwnbwkyNG -qyGcfe6bEVd/35o9xnnJiShcT1V/OycEwnPyEjoraPhDqwl76//Sl3Pwylt0g+eY -v2p9Wm4PomqYMJmJA4nyWky6qoPrS4uO0u/8kqOB2p7kLimwRFWbT1+De85lyS/0 -ILprgy6l5ejaKXp96Zmc+vELwte7YpPeeMSOjLMS7Y1J3L4FQkNUuApKHyzZMLxW -I1+IU41CjovXLvxkDIIOtFtOA3027U67/LOoeQIDAQABAoIBAH/sM104NTv8QCu5 -4+gbRGizuHMOzL1C1mjfdU0v3chzduvRBYTeZUzXL/Ec3+CVUK8Ev/krREp/epGQ -//Gx4lrbf9sExkem7nk/Biadtb00/KzGVAtcA0evArXQwiCdegsAwHycvL861ibp -jlKWlvE/2AhxTd0Rk8b2ZYdmr1qGTesIy7S4ilj1B8aYWnZglhSyyU7TqLhYmsWo -3B1ufNpkPCzo97bJmc1/bqXCIQXi/HkkDxJRFa/vESebiy2wdgkWflybW37vLaN0 -mox44uXpVYtZuuGyxdKjX6T2EOglZztXlC8gdxrnFS5leyBEu+7ABS5OvHgnlOX5 -80MyUpkCgYEA/4xpEBltbeJPH52Lla8VrcW3nGWPnfY8xUSnjKBspswTQPu389EO -ayM3DewcpIfxFu/BlMzKz0lm77QQZIu3gIJoEu8IXzUa3fJ2IavRKPSvbNFj5Icl -kVX+mE4BtF+tnAjDWiX9qaNXZcU7b0/q0yXzL35WB4H7Op4axqBir/sCgYEA04m3 -4LtRKWgObQXqNaw+8yEvznWdqVlJngyKoJkSVnqwWRuin9eZDfc84genxxT0rGI9 -/3Fw8enfBVIYGLR5V5aYmGfYyRCkN4aeRc0zDlInm0x2UcZShT8D0LktufwRYZh8 -Ui6+iiIBELwxyyWfuybH5hhstbdFazfu1yNA+xsCgYB47tORYNceVyimh4HU9iRG -NfjsNEvArxSXLiQ0Mn74eD7sU7L72QT/wox9NC1h10tKVW/AoSGg8tWZvha73jqa -wBvMSf4mQBVUzzcEPDEhNpoF3xlsvmAS5SU0okXAPD8GRkdcU/o02y2y5aF4zdMM -1Tq+UQUZTHO9i7CUKrZJHQKBgQC+FueRn0ITv1oXRlVs3dfDi3L2SGLhJ0csK4D3 -SBZed+m4aUj98jOrhRzE0LRIBeDId4/W2A3ylYK/uUHGEYdo2f9OFSONqtKmwuW/ -O+JBYDoPJ+q7GUhWTIYVLhKVKppD5U7yWucGIgBrFXJ5Ztnex76iWhh2Qray3pRV -52whOQKBgHVBI4F7pkn6id9W4sx2LqrVjpjw6vTDepIRK0SXBIQp34WnCL5CERDJ -pks203i42Ww7IadufepkGQOfwuik9wVRNWrNp4oKle6oNK9oK3ihuyb+5DtyKwDm -5sQUYUXc5E3qDQhHCGDzbT7wP+bCDnWKgvV6smshuQSW8M+tFIOQ ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/certs/Procfile b/tests/manual/docker-dns/certs/Procfile deleted file mode 100644 index a7f68a6c989..00000000000 --- a/tests/manual/docker-dns/certs/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=https://m1.etcd.local:2380,m2=https://m2.etcd.local:22380,m3=https://m3.etcd.local:32380 --initial-cluster-state new --peer-cert-file=/certs/server.crt --peer-key-file=/certs/server.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server.crt --key-file=/certs/server.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/certs/ca-csr.json b/tests/manual/docker-dns/certs/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-dns/certs/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-dns/certs/ca.crt b/tests/manual/docker-dns/certs/ca.crt deleted file mode 100644 index 4a17292de68..00000000000 --- a/tests/manual/docker-dns/certs/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUCeu/ww6+XbCM3m8m6fp17t8bjOcwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2 -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCgH8KMvldAoQjWA5YQoEOQgRyjZ3hkKdTQcFBj3OR8OyhiNJ+4oEJ/AqyJ -b41G9NGd+88hRSrcCeUBrUY3nWVhqzclCe7mQ1IyordmuKxekmPD/uvzcbySzpJT -qGEwNEiiBcr4mSQiGA5yMgBLKLpKw27t0ncVn/Qt0rKtqwLUYYWGEfADLw7+6iDK -xzCxLeXV/cB1VtFZa62j3KRJR4XQ/QosqwZw2dRGF/jUZgmsRYYK8noOvqY/uRPV -sqwGAKq0B0zOMp185dFrzJVD+LHZgSS9GLGmvRgttwayDuYSOny7WXugQ28fCaRX -p+53s1eBb5cHCGSko48f2329cnlFAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgglhjDWaAJm9ju5x1YMArtH7c -yjANBgkqhkiG9w0BAQsFAAOCAQEAK6IGimbnP9oFSvwNGmXjEtn/vE82dDhQJv8k -oiAsx0JurXBYybvu/MLaBJVQ6bF77hW/fzvhMOzLNEMGY1ql80TmfaTqyPpTN85I -6YhXOViZEQJvH17lVA8d57aSve0WPZqBqS3xI0dGpn/Ji6JPrjKCrgjeukXXHR+L -MScK1lpxaCjD45SMJCzANsMnIKTiKN8RnIcSmnrr/gGl7bC6Y7P84xUGgYu2hvNG -1DZBcelmzbZYk2DtbrR0Ed6IFD1Tz4RAEuKJfInjgAP2da41j4smoecXOsJMGVl5 -5RX7ba3Hohys6la8jSS3opCPKkwEN9mQaB++iN1qoZFY4qB9gg== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs/gencert.json b/tests/manual/docker-dns/certs/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-dns/certs/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-dns/certs/gencerts.sh b/tests/manual/docker-dns/certs/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-dns/certs/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-dns/certs/run.sh b/tests/manual/docker-dns/certs/run.sh deleted file mode 100755 index 05be3e64251..00000000000 --- a/tests/manual/docker-dns/certs/run.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /certs/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs/ca.crt \ - --cert=/certs/server.crt \ - --key=/certs/server.key.insecure \ - --endpoints=https://m1.etcd.local:2379,https://m2.etcd.local:22379,https://m3.etcd.local:32379 \ - get abc - -printf "\nWriting v2 key...\n" -curl -L https://127.0.0.1:2379/v2/keys/queue \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -X POST \ - -d value=data - -printf "\nWriting v2 key...\n" -curl -L https://m1.etcd.local:2379/v2/keys/queue \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -X POST \ - -d value=data - -printf "\nWriting v3 key...\n" -curl -L https://127.0.0.1:2379/v3/kv/put \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -X POST \ - -d '{"key": "Zm9v", "value": "YmFy"}' - -printf "\n\nWriting v3 key...\n" -curl -L https://m1.etcd.local:2379/v3/kv/put \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -X POST \ - -d '{"key": "Zm9v", "value": "YmFy"}' - -printf "\n\nReading v3 key...\n" -curl -L https://m1.etcd.local:2379/v3/kv/range \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -X POST \ - -d '{"key": "Zm9v"}' - -printf "\n\nFetching 'curl https://m1.etcd.local:2379/metrics'...\n" -curl \ - --cacert /certs/ca.crt \ - --cert /certs/server.crt \ - --key /certs/server.key.insecure \ - -L https://m1.etcd.local:2379/metrics | grep Put | tail -3 - -printf "\n\nDone!!!\n\n" diff --git a/tests/manual/docker-dns/certs/server-ca-csr.json b/tests/manual/docker-dns/certs/server-ca-csr.json deleted file mode 100644 index 77cdb408cf0..00000000000 --- a/tests/manual/docker-dns/certs/server-ca-csr.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "hosts": [ - "m1.etcd.local", - "m2.etcd.local", - "m3.etcd.local", - "127.0.0.1", - "localhost" - ] -} diff --git a/tests/manual/docker-dns/certs/server.crt b/tests/manual/docker-dns/certs/server.crt deleted file mode 100644 index 928e3cf5db6..00000000000 --- a/tests/manual/docker-dns/certs/server.crt +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEKTCCAxGgAwIBAgIUUwtQlOqMccWY8MOaSaWutEjlMrgwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzExMTMwNDA2MDBaFw0yNzExMTEwNDA2 -MDBaMGIxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALyYH7bL79If -75AezzSpkuTOPAGBzPlGFLM5QS4jrt6fJBpElAUV2VmZm+isVsTs2X63md1t4s3Y -44soYK02HONUxUXxbeW7S8yJYSplG5hCJpFiSVP0GyVojQ04OLO1yI5m82fWJNi6 -9PgTmb3+/YD08TKbjjJ4FB0kqoFJE4qoUNNpbkpQxHW4cx9iyWbE9gwyGoC76ftr -DC4J5HavmZ/y51rq1VWrO/d9rmCEUN++M8FcGt6D4WVQ54sWafl4Q1HafBq3FAT5 -swpqi6aDDFKYYTdvjFEmJ2uWacak8NO+vjTt8fTfSFBUYcxweVWIDm6xU8kR8Lwy -aNxD26jQ9GMCAwEAAaOByTCBxjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI -KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFELi+Ig+ -uxXrOvjoacXjcCjtfHcsMB8GA1UdIwQYMBaAFKCCWGMNZoAmb2O7nHVgwCu0ftzK -MEcGA1UdEQRAMD6CDW0xLmV0Y2QubG9jYWyCDW0yLmV0Y2QubG9jYWyCDW0zLmV0 -Y2QubG9jYWyCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAn6e8 -LPd53xQGiicDHN8+WkUS7crr+A+bIfY0nbWUf1H7zwNxpHHnKgVRHc4MKpRY4f+E -M2bEYdNJZDrjFYIWWlFDteVKZevH2dB3weiCAYWPYuiR9dGH6NvVbPcEMwarPBW4 -mLsm9Nl/r7YBxXx73rhfxyBbhTuDwKtY/BAMi+ZO4msnuWiiSiQEUrEmzm9PWhAD -CgNjxCL3xoGyIJGj1xev0PYo+iFrAd9Pkfg2+FaSYXtNPbZX229yHmxU7GbOJumx -5vGQMRtzByq7wqw1dZpITlgbDPJc5jdIRKGnusQ96GXLORSQcP+tfG4NhreYYpI1 -69Y78gNCTl0uGmI21g== ------END CERTIFICATE----- diff --git a/tests/manual/docker-dns/certs/server.key.insecure b/tests/manual/docker-dns/certs/server.key.insecure deleted file mode 100644 index 08784a7c65d..00000000000 --- a/tests/manual/docker-dns/certs/server.key.insecure +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAvJgftsvv0h/vkB7PNKmS5M48AYHM+UYUszlBLiOu3p8kGkSU -BRXZWZmb6KxWxOzZfreZ3W3izdjjiyhgrTYc41TFRfFt5btLzIlhKmUbmEImkWJJ -U/QbJWiNDTg4s7XIjmbzZ9Yk2Lr0+BOZvf79gPTxMpuOMngUHSSqgUkTiqhQ02lu -SlDEdbhzH2LJZsT2DDIagLvp+2sMLgnkdq+Zn/LnWurVVas7932uYIRQ374zwVwa -3oPhZVDnixZp+XhDUdp8GrcUBPmzCmqLpoMMUphhN2+MUSYna5ZpxqTw076+NO3x -9N9IUFRhzHB5VYgObrFTyRHwvDJo3EPbqND0YwIDAQABAoIBAQC0YCbM9YZ9CRBe -Xik9rAYTknBv3I6Hx5BaziLaF0TUJY8pFHgh2QDVooYsZlBi7kki2kVuNAAdcxhG -ayrz33KHtvcq6zt54sYfbTGik6tt1679k+ygQDOKdtGZWDFbKD0Wfb7FjFoXc9CC -SHTd9DjPkvXxujepa5GJQh1Vo+ftz2I+8e6LeoiBZJM1IosfrpxKg02UnWrLia7o -i8eoXIyMAJHuNUGpGl33WnckyMGDsVKMc2DVG2exfVBZ37lAemYOLRKmd4AwUk2l -ztd71sXQodLk++1BqaS9cc9yvsNiBjGL3Ehm7uUcLH1k3VHd4ArcGhiqffKzQuSE -Dhm8GXNZAoGBAMrXOAdnfLlxYKCqOaj0JwN0RusWBP3cC7jluS5UmeTROPnBa0Fb -219YtiXkDrWtoiwLvvPXobem0/juPkiGnprGcOsPUGa6pV3TPJ40BiIfh9/vt7fr -Bko2SqEA9U0FxredcOFoCPxX9k9EDWxhF/nD20amvRHKK/wv995iXKxHAoGBAO4F -GILNxBHlH5F++dbSSSTcZUTXvuBr7JQkbMK+luSikEtaSW9IO2bf65LtqjaWp4Ds -rENCQAB3PmI111Rjwrk7925W0JCHw/+UArlVoM3K2q1zhYUWAn9L3v+qUTN2TLu1 -Tso3OkCrQ5aa812tffW3hZHOWJ+aZp2nnBnruDEFAoGAGJDCD1uAJnFNs4eKHiUb -iHaPlC8BgcEhhk4EBFFopeaU0OKU28CFK+HxhVs+UNBrgIwXny5xPm2s5EvuLRho -ovP/fuhG43odRuSrRbmlOIK7EOrWRCbphxlWJnOYQbC+ZURjBFl2JSF+ChGC0qpb -nfsTVlYhNcNXWl5w1XTyJkcCgYEAp07XquJeh0GqTgiWL8XC+nEdkiWhG3lhY8Sy -2rVDtdT7XqxJYDrC3o5Ztf7vnc2KUpqKgACqomkvZbN49+3j63bWdy35Dw8P27A7 -tfEVxnJoAnJokWMmQDqhts8OowDt8SgCCSyG+vwn10518QxJtRXaguIr84yBwyIV -HTdPUs0CgYBIAxoPD9/6R2swClvln15sjaIXDp5rYLbm6mWU8fBURU2fdUw3VBlJ -7YVgQ4GnKiCI7NueBBNRhjXA3KDkFyZw0/oKe2uc/4Gdyx1/L40WbYOaxJD2vIAf -FZ4pK9Yq5Rp3XiCNm0eURBlNM+fwXOQin2XdzDRoEq1B5JalQO87lA== ------END RSA PRIVATE KEY----- diff --git a/tests/manual/docker-dns/etcd.zone b/tests/manual/docker-dns/etcd.zone deleted file mode 100644 index 03c15fe8e66..00000000000 --- a/tests/manual/docker-dns/etcd.zone +++ /dev/null @@ -1,14 +0,0 @@ -$TTL 86400 -@ IN SOA etcdns.local. root.etcdns.local. ( - 100500 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 86400 ) ; Negative Cache TTL - IN NS ns.etcdns.local. - IN A 127.0.0.1 - -ns IN A 127.0.0.1 -m1 IN A 127.0.0.1 -m2 IN A 127.0.0.1 -m3 IN A 127.0.0.1 diff --git a/tests/manual/docker-dns/insecure/Procfile b/tests/manual/docker-dns/insecure/Procfile deleted file mode 100644 index 46d15367bd6..00000000000 --- a/tests/manual/docker-dns/insecure/Procfile +++ /dev/null @@ -1,6 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://m1.etcd.local:2379 --listen-peer-urls http://127.0.0.1:2380 --initial-advertise-peer-urls=http://m1.etcd.local:2380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://m2.etcd.local:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls=http://m2.etcd.local:22380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://m3.etcd.local:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls=http://m3.etcd.local:32380 --initial-cluster-token tkn --initial-cluster=m1=http://m1.etcd.local:2380,m2=http://m2.etcd.local:22380,m3=http://m3.etcd.local:32380 --host-whitelist "localhost,127.0.0.1,m1.etcd.local" --logger=zap --log-outputs=stderr \ No newline at end of file diff --git a/tests/manual/docker-dns/insecure/run.sh b/tests/manual/docker-dns/insecure/run.sh deleted file mode 100755 index 32c963487d1..00000000000 --- a/tests/manual/docker-dns/insecure/run.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data - -/etc/init.d/bind9 start - -# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost -cat /dev/null >/etc/hosts - -goreman -f /insecure/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --endpoints=http://m1.etcd.local:2379 \ - endpoint health --cluster - -./etcdctl \ - --endpoints=http://m1.etcd.local:2379,http://m2.etcd.local:22379,http://m3.etcd.local:32379 \ - put abc def - -./etcdctl \ - --endpoints=http://m1.etcd.local:2379,http://m2.etcd.local:22379,http://m3.etcd.local:32379 \ - get abc - -printf "\nWriting v2 key...\n" -curl \ - -L http://127.0.0.1:2379/v2/keys/queue \ - -X POST \ - -d value=data - -printf "\nWriting v2 key...\n" -curl \ - -L http://m1.etcd.local:2379/v2/keys/queue \ - -X POST \ - -d value=data - -printf "\nWriting v3 key...\n" -curl \ - -L http://127.0.0.1:2379/v3/kv/put \ - -X POST \ - -d '{"key": "Zm9v", "value": "YmFy"}' - -printf "\n\nWriting v3 key...\n" -curl \ - -L http://m1.etcd.local:2379/v3/kv/put \ - -X POST \ - -d '{"key": "Zm9v", "value": "YmFy"}' - -printf "\n\nReading v3 key...\n" -curl \ - -L http://m1.etcd.local:2379/v3/kv/range \ - -X POST \ - -d '{"key": "Zm9v"}' - -printf "\n\nFetching 'curl http://m1.etcd.local:2379/metrics'...\n" -curl \ - -L http://m1.etcd.local:2379/metrics | grep Put | tail -3 - -name1=$(base64 <<< "/election-prefix") -val1=$(base64 <<< "v1") -data1="{\"name\":\"${name1}\", \"value\":\"${val1}\"}" - -printf "\n\nCampaign: ${data1}\n" -result1=$(curl -L http://m1.etcd.local:2379/v3/election/campaign -X POST -d "${data1}") -echo ${result1} - -# should not panic servers -val2=$(base64 <<< "v2") -data2="{\"value\": \"${val2}\"}" -printf "\n\nProclaim (wrong-format): ${data2}\n" -curl \ - -L http://m1.etcd.local:2379/v3/election/proclaim \ - -X POST \ - -d "${data2}" - -printf "\n\nProclaim (wrong-format)...\n" -curl \ - -L http://m1.etcd.local:2379/v3/election/proclaim \ - -X POST \ - -d '}' - -printf "\n\nProclaim (wrong-format)...\n" -curl \ - -L http://m1.etcd.local:2379/v3/election/proclaim \ - -X POST \ - -d '{"value": "Zm9v"}' - -printf "\n\nDone!!!\n\n" diff --git a/tests/manual/docker-dns/named.conf b/tests/manual/docker-dns/named.conf deleted file mode 100644 index 76ce0caa165..00000000000 --- a/tests/manual/docker-dns/named.conf +++ /dev/null @@ -1,23 +0,0 @@ -options { - directory "/var/bind"; - listen-on { 127.0.0.1; }; - listen-on-v6 { none; }; - allow-transfer { - none; - }; - // If you have problems and are behind a firewall: - query-source address * port 53; - pid-file "/var/run/named/named.pid"; - allow-recursion { none; }; - recursion no; -}; - -zone "etcd.local" IN { - type main; - file "/etc/bind/etcd.zone"; -}; - -zone "0.0.127.in-addr.arpa" { - type main; - file "/etc/bind/rdns.zone"; -}; diff --git a/tests/manual/docker-dns/rdns.zone b/tests/manual/docker-dns/rdns.zone deleted file mode 100644 index fb71b30b1fa..00000000000 --- a/tests/manual/docker-dns/rdns.zone +++ /dev/null @@ -1,13 +0,0 @@ -$TTL 86400 -@ IN SOA etcdns.local. root.etcdns.local. ( - 100500 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 86400 ) ; Negative Cache TTL - IN NS ns.etcdns.local. - IN A 127.0.0.1 - -1 IN PTR m1.etcd.local. -1 IN PTR m2.etcd.local. -1 IN PTR m3.etcd.local. diff --git a/tests/manual/docker-dns/resolv.conf b/tests/manual/docker-dns/resolv.conf deleted file mode 100644 index bbc8559cd54..00000000000 --- a/tests/manual/docker-dns/resolv.conf +++ /dev/null @@ -1 +0,0 @@ -nameserver 127.0.0.1 diff --git a/tests/manual/docker-static-ip/Dockerfile b/tests/manual/docker-static-ip/Dockerfile deleted file mode 100644 index d5f7913be7b..00000000000 --- a/tests/manual/docker-static-ip/Dockerfile +++ /dev/null @@ -1,37 +0,0 @@ -FROM ubuntu:18.04 - -RUN rm /bin/sh && ln -s /bin/bash /bin/sh -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -RUN apt-get -y update \ - && apt-get -y install \ - build-essential \ - gcc \ - apt-utils \ - pkg-config \ - software-properties-common \ - apt-transport-https \ - libssl-dev \ - sudo \ - bash \ - curl \ - tar \ - git \ - netcat \ - bind9 \ - dnsutils \ - && apt-get -y update \ - && apt-get -y upgrade \ - && apt-get -y autoremove \ - && apt-get -y autoclean - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH} -ENV GO_VERSION REPLACE_ME_GO_VERSION -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang -RUN rm -rf ${GOROOT} \ - && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \ - && mkdir -p ${GOPATH}/src ${GOPATH}/bin \ - && go version \ - && go get -v -u github.com/mattn/goreman diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/Procfile b/tests/manual/docker-static-ip/certs-metrics-proxy/Procfile deleted file mode 100644 index 4cf163c174b..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/Procfile +++ /dev/null @@ -1,8 +0,0 @@ -# Use goreman to run `go get github.com/mattn/goreman` -etcd1: ./etcd --name m1 --data-dir /tmp/m1.data --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://localhost:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://localhost:2380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:2378,http://localhost:9379 --logger=zap --log-outputs=stderr - -etcd2: ./etcd --name m2 --data-dir /tmp/m2.data --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://localhost:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://localhost:22380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:22378,http://localhost:29379 --logger=zap --log-outputs=stderr - -etcd3: ./etcd --name m3 --data-dir /tmp/m3.data --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://localhost:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://localhost:32380 --initial-cluster-token tkn --initial-cluster=m1=https://localhost:2380,m2=https://localhost:22380,m3=https://localhost:32380 --initial-cluster-state new --peer-cert-file=/certs-metrics-proxy/server.crt --peer-key-file=/certs-metrics-proxy/server.key.insecure --peer-trusted-ca-file=/certs-metrics-proxy/ca.crt --peer-client-cert-auth --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --client-cert-auth --listen-metrics-urls=https://localhost:32378,http://localhost:39379 --logger=zap --log-outputs=stderr - -proxy: ./etcd grpc-proxy start --advertise-client-url=localhost:23790 --listen-addr=localhost:23790 --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 --data-dir=/tmp/proxy.data --cacert=/certs-metrics-proxy/ca.crt --cert=/certs-metrics-proxy/server.crt --key=/certs-metrics-proxy/server.key.insecure --trusted-ca-file=/certs-metrics-proxy/ca.crt --cert-file=/certs-metrics-proxy/server.crt --key-file=/certs-metrics-proxy/server.key.insecure --metrics-addr=http://localhost:9378 diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/ca-csr.json b/tests/manual/docker-static-ip/certs-metrics-proxy/ca-csr.json deleted file mode 100644 index ecafabaadd3..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/ca-csr.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "O": "etcd", - "OU": "etcd Security", - "L": "San Francisco", - "ST": "California", - "C": "USA" - } - ], - "CN": "ca", - "ca": { - "expiry": "87600h" - } -} diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/ca.crt b/tests/manual/docker-static-ip/certs-metrics-proxy/ca.crt deleted file mode 100644 index 0d8dc386b38..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIUYWIIesEznr7VfYawvmttxxmOfeUwDQYJKoZIhvcNAQEL -BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl -Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzEyMDYyMTUzMDBaFw0yNzEyMDQyMTUz -MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE -BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT -ZWN1cml0eTELMAkGA1UEAxMCY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDDN/cW7rl/qz59gF3csnDhp5BAxVY7n0+inzZO+MZIdkCFuus6Klc6mWMY -/ZGvpWxVDgQvYBs310eq4BrM2BjwWNfgqIn6bHVwwGfngojcDEHlZHw1e9sdBlO5 -e/rNONpNtMUjUeukhzFwPOdsUfweAGsqj4VYJV+kkS3uGmCGIj+3wIF411FliiQP -WiyLG16BwR1Vem2qOotCRgCawKSb4/wKfF8dvv00IjP5Jcy+aXLQ4ULW1fvj3cRR -JLdZmZ/PF0Cqm75qw2IqzIhRB5b1e8HyRPeNtEZ7frNLZyFhLgHJbRFF5WooFX79 -q9py8dERBXOxCKrSdqEOre0OU/4pAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwIBBjAS -BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBS+CaA8UIkIJT9xhXff4p143UuW -7TANBgkqhkiG9w0BAQsFAAOCAQEAK7lScAUi+R68oxxmgZ/pdEr9wsMj4xtss+GO -UDgzxudpT1nYQ2iBudC3LIuTiaUHUSseVleXEKeNbKhKhVhlIwhmPxiOgbbFu9hr -e2Z87SjtdlbE/KcYFw0W/ukWYxYrq08BB19w2Mqd8J5CnLcj4/0iiH1uARo1swFy -GUYAJ2I147sHIDbbmLKuxbdf4dcrkf3D4inBOLcRhS/MzaXfdMFntzJDQAo5YwFI -zZ4TRGOhj8IcU1Cn5SVufryWy3qJ+sKHDYsGQQ/ArBXwQnO3NAFCpEN9rDDuQVmH -+ATHDFBQZcGfN4GDh74FGnliRjip2sO4oWTfImmgJGGAn+P2CA== ------END CERTIFICATE----- diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/gencert.json b/tests/manual/docker-static-ip/certs-metrics-proxy/gencert.json deleted file mode 100644 index 09b67267bb2..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/gencert.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "signing": { - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "87600h" - } - } -} diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/gencerts.sh b/tests/manual/docker-static-ip/certs-metrics-proxy/gencerts.sh deleted file mode 100755 index af8663e09eb..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/gencerts.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -if ! [[ "$0" =~ "./gencerts.sh" ]]; then - echo "must be run from 'fixtures'" - exit 255 -fi - -if ! which cfssl; then - echo "cfssl is not installed" - exit 255 -fi - -cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca -mv ca.pem ca.crt -openssl x509 -in ca.crt -noout -text - -# generate wildcard certificates DNS: *.etcd.local -cfssl gencert \ - --ca ./ca.crt \ - --ca-key ./ca-key.pem \ - --config ./gencert.json \ - ./server-ca-csr.json | cfssljson --bare ./server -mv server.pem server.crt -mv server-key.pem server.key.insecure - -rm -f *.csr *.pem *.stderr *.txt diff --git a/tests/manual/docker-static-ip/certs-metrics-proxy/run.sh b/tests/manual/docker-static-ip/certs-metrics-proxy/run.sh deleted file mode 100755 index 9b171f19028..00000000000 --- a/tests/manual/docker-static-ip/certs-metrics-proxy/run.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/sh -rm -rf /tmp/m1.data /tmp/m2.data /tmp/m3.data /tmp/proxy.data - -goreman -f /certs-metrics-proxy/Procfile start & - -# TODO: remove random sleeps -sleep 7s - -./etcdctl \ - --cacert=/certs-metrics-proxy/ca.crt \ - --cert=/certs-metrics-proxy/server.crt \ - --key=/certs-metrics-proxy/server.key.insecure \ - --endpoints=https://localhost:2379 \ - endpoint health --cluster - -./etcdctl \ - --cacert=/certs-metrics-proxy/ca.crt \ - --cert=/certs-metrics-proxy/server.crt \ - --key=/certs-metrics-proxy/server.key.insecure \ - --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 \ - put abc def - -./etcdctl \ - --cacert=/certs-metrics-proxy/ca.crt \ - --cert=/certs-metrics-proxy/server.crt \ - --key=/certs-metrics-proxy/server.key.insecure \ - --endpoints=https://localhost:2379,https://localhost:22379,https://localhost:32379 \ - get abc - -################# -sleep 3s && printf "\n\n" && echo "curl https://localhost:2378/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:2378/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl https://localhost:2379/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:2379/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl http://localhost:9379/metrics" -curl -L http://localhost:9379/metrics | grep Put | tail -3 -################# - -################# -sleep 3s && printf "\n\n" && echo "curl https://localhost:22378/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:22378/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl https://localhost:22379/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:22379/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl http://localhost:29379/metrics" -curl -L http://localhost:29379/metrics | grep Put | tail -3 -################# - -################# -sleep 3s && printf "\n\n" && echo "curl https://localhost:32378/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:32378/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl https://localhost:32379/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:32379/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "curl http://localhost:39379/metrics" -curl -L http://localhost:39379/metrics | grep Put | tail -3 -################# - -################# -sleep 3s && printf "\n\n" && echo "Requests to gRPC proxy localhost:23790" -./etcdctl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - --endpoints=localhost:23790 \ - put ghi jkl - -./etcdctl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - --endpoints=localhost:23790 \ - get ghi - -sleep 3s && printf "\n" && echo "Requests to gRPC proxy https://localhost:23790/metrics" -curl \ - --cacert /certs-metrics-proxy/ca.crt \ - --cert /certs-metrics-proxy/server.crt \ - --key /certs-metrics-proxy/server.key.insecure \ - -L https://localhost:23790/metrics | grep Put | tail -3 - -sleep 3s && printf "\n" && echo "Requests to gRPC proxy http://localhost:9378/metrics" -curl -L http://localhost:9378/metrics | grep Put | tail -3 -< 0 { - watchPutLimit = rate.Limit(watchPutRate) - } - - limit := rate.NewLimiter(watchPutLimit, 1) + limit := rate.NewLimiter(rate.Limit(watchPutRate), 1) for _, cc := range clients { go func(c *clientv3.Client) { for op := range putreqc { @@ -233,7 +230,6 @@ func benchPutWatches(clients []*clientv3.Client, wk *watchedKeys) { bar.Finish() close(r.Results()) fmt.Printf("Watch events received summary:\n%s", <-rc) - } func recvWatchChan(wch clientv3.WatchChan, results chan<- report.Result, nrRxed *int32) { diff --git a/tools/benchmark/cmd/watch_get.go b/tools/benchmark/cmd/watch_get.go index c70091b9c63..f4ec4d6573b 100644 --- a/tools/benchmark/cmd/watch_get.go +++ b/tools/benchmark/cmd/watch_get.go @@ -20,11 +20,11 @@ import ( "sync" "time" - v3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/report" + v3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/report" - "github.com/cheggaaa/pb/v3" "github.com/spf13/cobra" + "gopkg.in/cheggaaa/pb.v1" ) // watchGetCmd represents the watch command @@ -72,6 +72,7 @@ func watchGetFunc(cmd *cobra.Command, args []string) { } bar = pb.New(watchGetTotalWatchers * watchEvents) + bar.Format("Bom !") bar.Start() // report from trying to do serialized gets with concurrent watchers diff --git a/tools/benchmark/cmd/watch_latency.go b/tools/benchmark/cmd/watch_latency.go index 31ceb3b327e..54f49e7e84b 100644 --- a/tools/benchmark/cmd/watch_latency.go +++ b/tools/benchmark/cmd/watch_latency.go @@ -21,12 +21,12 @@ import ( "sync" "time" - "github.com/cheggaaa/pb/v3" + "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/pkg/report" + "github.com/spf13/cobra" "golang.org/x/time/rate" - - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/pkg/v3/report" + "gopkg.in/cheggaaa/pb.v1" ) // watchLatencyCmd represents the watch latency command @@ -67,6 +67,7 @@ func watchLatencyFunc(cmd *cobra.Command, args []string) { } bar = pb.New(watchLTotal) + bar.Format("Bom !") bar.Start() limiter := rate.NewLimiter(rate.Limit(watchLPutRate), watchLPutRate) diff --git a/tools/benchmark/doc.go b/tools/benchmark/doc.go deleted file mode 100644 index 0e4dc629450..00000000000 --- a/tools/benchmark/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// benchmark is a program for benchmarking etcd v3 API performance. -package main diff --git a/tools/benchmark/main.go b/tools/benchmark/main.go index 97c03141fa7..ab69117a535 100644 --- a/tools/benchmark/main.go +++ b/tools/benchmark/main.go @@ -18,7 +18,7 @@ import ( "fmt" "os" - "go.etcd.io/etcd/v3/tools/benchmark/cmd" + "github.com/ls-2018/etcd_cn/tools/benchmark/cmd" ) func main() { diff --git a/tools/benchmark/read.md b/tools/benchmark/read.md new file mode 100644 index 00000000000..089a9ee1539 --- /dev/null +++ b/tools/benchmark/read.md @@ -0,0 +1,24 @@ +串行化的读请求 +线性读请求 + + + +#线性读请求 +export IP_1=127.0.0.1:2379 +benchmark --endpoints=${IP_1} --conns=1 --clients=1 range KEY --total=10000 +benchmark --endpoints=${IP_1},${IP_2},${IP_3} --conns=1 --clients=1 range KEY --total=10000 +benchmark --endpoints=${IP_1},${IP_2},${IP_3} --conns=1 --clients=100 range KEY --total=10000 + +#对每个etcd节点发起可串行化读请求,并对各个节点的数据求和 +for endpoint in ${IP_1} ${IP_2} ${IP_3};do + benchmark --endpoints=$endpoint conns=1 --clients=1 range KEY --total=10000 +done +for endpoint in ${IP_1} ${IP_2} ${IP_3};do + benchmark --endpoints=$endpoint conns=100 --clients=100 range KEY --total=10000 +done + +benchmark --endpoints=${IP_1} --conns=1 --clients=1 put --key-size=8 --sequential-keys --total=10000 --val-size=256 +benchmark --endpoints=${IP_1} --conns=100 --clients=100 put --key-size=8 --sequential-keys --total=10000 --val-size=256 +benchmark --endpoints=${IP_1},${IP_2},${IP_3} --conns=100 --clients=100 put --key-size=8 --sequential-keys --total=10000 --val-size=256 + + diff --git a/tools/etcd-dump-db/README.md b/tools/etcd-dump-db/README.md index 2def87ba4ee..31583490781 100644 --- a/tools/etcd-dump-db/README.md +++ b/tools/etcd-dump-db/README.md @@ -1,36 +1,6 @@ -# etcd-dump-db +### etcd-dump-db -`etcd-dump-db` inspects etcd db files. - -## Installation - -Install the tool by running the following command from the etcd source directory. - -``` - $ go install -v ./tools/etcd-dump-db -``` - -The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment. - -``` - $ go list -f "{{.Target}}" ./tools/etcd-dump-db -``` - -Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory. - -``` - $ go run ./tools/etcd-dump-db -``` - -## Usage - -The following command should output the usage per the latest development. - -``` - $ etcd-dump-db --help -``` - -An example of usage detail is provided below. +etcd-dump-db inspects etcd db files. ``` Usage: @@ -47,7 +17,6 @@ Flags: Use "etcd-dump-db [command] --help" for more information about a command. ``` - #### list-bucket [data dir or db file path] Lists all buckets. @@ -67,7 +36,6 @@ members_removed meta ``` - #### hash [data dir or db file path] Computes the hash of db file. @@ -90,7 +58,6 @@ db path: agent03/agent.etcd/member/snap/db Hash: 3700260467 ``` - #### iterate-bucket [data dir or db file path] Lists key-value pairs in reverse order. diff --git a/tools/etcd-dump-db/backend.go b/tools/etcd-dump-db/backend.go index 50619d95da7..d87b293f7bd 100644 --- a/tools/etcd-dump-db/backend.go +++ b/tools/etcd-dump-db/backend.go @@ -19,14 +19,14 @@ import ( "fmt" "path/filepath" - "go.uber.org/zap" + "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets" + "github.com/ls-2018/etcd_cn/offical/api/v3/authpb" + + "github.com/ls-2018/etcd_cn/etcd/lease/leasepb" + "github.com/ls-2018/etcd_cn/etcd/mvcc/backend" + "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb" bolt "go.etcd.io/bbolt" - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/storage/backend" - "go.etcd.io/etcd/server/v3/storage/schema" ) func snapDir(dataDir string) string { @@ -34,7 +34,7 @@ func snapDir(dataDir string) string { } func getBuckets(dbPath string) (buckets []string, err error) { - db, derr := bolt.Open(dbPath, 0600, &bolt.Options{Timeout: flockTimeout}) + db, derr := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: flockTimeout}) if derr != nil { return nil, fmt.Errorf("failed to open bolt DB %v", derr) } @@ -53,19 +53,17 @@ func getBuckets(dbPath string) (buckets []string, err error) { type decoder func(k, v []byte) -// key is the bucket name, and value is the function to decode K/V in the bucket. var decoders = map[string]decoder{ "key": keyDecoder, "lease": leaseDecoder, "auth": authDecoder, "authRoles": authRolesDecoder, "authUsers": authUsersDecoder, - "meta": metaDecoder, } type revision struct { - main int64 - sub int64 + main int64 // 一个全局递增的主版本号,随put/txn/delete事务递增,一个事务内的key main版本号是一致的 + sub int64 // 一个事务内的子版本号,从0开始随事务内put/delete操作递增 } func bytesToRev(bytes []byte) revision { @@ -75,10 +73,6 @@ func bytesToRev(bytes []byte) revision { } } -func defaultDecoder(k, v []byte) { - fmt.Printf("key=%q, value=%q\n", k, v) -} - func keyDecoder(k, v []byte) { rev := bytesToRev(k) var kv mvccpb.KeyValue @@ -90,7 +84,7 @@ func keyDecoder(k, v []byte) { func bytesToLeaseID(bytes []byte) int64 { if len(bytes) != 8 { - panic(fmt.Errorf("lease ID must be 8-byte")) + panic(fmt.Errorf("lease ID必须是8-byte")) } return int64(binary.BigEndian.Uint64(bytes)) } @@ -101,7 +95,7 @@ func leaseDecoder(k, v []byte) { if err := lpb.Unmarshal(v); err != nil { panic(err) } - fmt.Printf("lease ID=%016x, TTL=%ds, remaining TTL=%ds\n", leaseID, lpb.TTL, lpb.RemainingTTL) + fmt.Printf("lease ID=%016x, TTL=%ds\n", leaseID, lpb.TTL) } func authDecoder(k, v []byte) { @@ -128,22 +122,11 @@ func authUsersDecoder(k, v []byte) { if err != nil { panic(err) } - fmt.Printf("user=%q, roles=%q, option=%v\n", user.Name, user.Roles, user.Options) -} - -func metaDecoder(k, v []byte) { - if string(k) == string(schema.MetaConsistentIndexKeyName) || string(k) == string(schema.MetaTermKeyName) { - fmt.Printf("key=%q, value=%v\n", k, binary.BigEndian.Uint64(v)) - } else if string(k) == string(schema.ScheduledCompactKeyName) || string(k) == string(schema.FinishedCompactKeyName) { - rev := bytesToRev(v) - fmt.Printf("key=%q, value=%v\n", k, rev) - } else { - defaultDecoder(k, v) - } + fmt.Printf("user=%q, roles=%q, password=%q, option=%v\n", user.Name, user.Roles, string(user.Password), user.Options) } func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error) { - db, err := bolt.Open(dbPath, 0600, &bolt.Options{Timeout: flockTimeout}) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: flockTimeout}) if err != nil { return fmt.Errorf("failed to open bolt DB %v", err) } @@ -164,7 +147,7 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error) if dec, ok := decoders[bucket]; decode && ok { dec(k, v) } else { - defaultDecoder(k, v) + fmt.Printf("key=%q, value=%q\n", k, v) } limit-- @@ -179,8 +162,8 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error) } func getHash(dbPath string) (hash uint32, err error) { - b := backend.NewDefaultBackend(zap.NewNop(), dbPath) - return b.Hash(schema.DefaultIgnores) + b := backend.NewDefaultBackend(dbPath) + return b.Hash(buckets.DefaultIgnores) } // TODO: revert by revision and find specified hash value diff --git a/tools/etcd-dump-db/main.go b/tools/etcd-dump-db/main.go index f82d91f7689..a56120c9d9d 100644 --- a/tools/etcd-dump-db/main.go +++ b/tools/etcd-dump-db/main.go @@ -47,9 +47,11 @@ var ( } ) -var flockTimeout time.Duration -var iterateBucketLimit uint64 -var iterateBucketDecode bool +var ( + flockTimeout time.Duration + iterateBucketLimit uint64 + iterateBucketDecode bool +) func init() { rootCommand.PersistentFlags().DurationVar(&flockTimeout, "timeout", 10*time.Second, "time to wait to obtain a file lock on db file, 0 to block indefinitely") diff --git a/tools/etcd-dump-logs/README.md b/tools/etcd-dump-logs/README.md index d92ba1b5e3e..d917039fa23 100644 --- a/tools/etcd-dump-logs/README.md +++ b/tools/etcd-dump-logs/README.md @@ -1,36 +1,6 @@ -# etcd-dump-logs +### etcd-dump-logs -`etcd-dump-logs` dumps the log from data directory. - -## Installation - -Install the tool by running the following command from the etcd source directory. - -``` - $ go install -v ./tools/etcd-dump-logs -``` - -The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the etcd source directory. Make sure that $PATH is set accordingly in your environment. - -``` - $ go list -f "{{.Target}}" ./tools/etcd-dump-logs -``` - -Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source directory. - -``` - $ go run ./tools/etcd-dump-logs -``` - -## Usage - -The following command should output the usage per the latest development. - -``` - $ etcd-dump-logs --help -``` - -An example of usage detail is provided below. +etcd-dump-logs dumps the log from data directory. ``` Usage: @@ -43,11 +13,9 @@ Usage: - data_dir/member/wal/0000000000000000-0000000000000000.wal Flags: - -wal-dir string - If set, dumps WAL from the informed path, rather than following the - standard 'data_dir/member/wal/' location + -entry-type string - If set, filters output by entry type. Must be one or more than one of: + If set, filters output by entry type.必须是one or more than one of: ConfigChange, Normal, Request, InternalRaftRequest, IRRRange, IRRPut, IRRDeleteRange, IRRTxn, IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke @@ -60,6 +28,7 @@ Flags: must process hex encoded lines of binary input (from etcd-dump-logs) and output a hex encoded line of binary for each input line ``` + #### etcd-dump-logs -entry-type [data dir] Filter entries by type from WAL log. @@ -95,12 +64,15 @@ term index type data Entry types (ConfigChange,IRRCompaction) count is : 5 ``` -#### etcd-dump-logs -stream-decoder [data dir] -Decode each entry based on logic in the passed decoder. Decoder status and decoded data are listed in separated tab/columns in the ouput. For parsing purpose, the output from decoder are expected to be in format of "|". Please refer to [decoder_correctoutputformat.sh] as an example. +#### etcd-dump-logs -stream-decoder [data dir] -However, if the decoder output format is not as expected, "decoder_status" will be "decoder output format is not right, print output anyway", and all output from decoder will be considered as "decoded_data" +Decode each entry based on logic in the passed decoder. Decoder status and decoded data are listed in separated +tab/columns in the ouput. For parsing purpose, the output from decoder are expected to backend in format of " +|". Please refer to [decoder_correctoutputformat.sh] as an example. +However, if the decoder output format is not as expected, "decoder_status" will backend "decoder output format is not right, +print output anyway", and all output from decoder will backend considered as "decoded_data" ``` $ etcd-dump-logs -stream-decoder decoder_correctoutputformat.sh /tmp/datadir @@ -144,7 +116,8 @@ term index type data decoder_status decoded_data Entry types () count is : 8 ``` -#### etcd-dump-logs -start-index [data dir] + +#### etcd-dump-logs -start-index [data dir] Only shows WAL log entries after the specified start-index number, exclusively. @@ -162,4 +135,5 @@ term index type data 27 34 norm ??? Entry types () count is : 4 ``` + [decoder_correctoutputformat.sh]: ./testdecoder/decoder_correctoutputformat.sh diff --git a/tools/etcd-dump-logs/doc.go b/tools/etcd-dump-logs/doc.go index 234ee92fa6f..87ca8db708d 100644 --- a/tools/etcd-dump-logs/doc.go +++ b/tools/etcd-dump-logs/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// etcd-dump-logs is a program for analyzing etcd server write ahead logs. +// etcd-dump-logs is a program for analyzing etcd etcd write ahead logs. package main diff --git a/tools/etcd-dump-logs/etcd-dump-log_test.go b/tools/etcd-dump-logs/etcd-dump-log_test.go deleted file mode 100644 index ae79d3fea94..00000000000 --- a/tools/etcd-dump-logs/etcd-dump-log_test.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "go.uber.org/zap/zaptest" - - "go.etcd.io/etcd/api/v3/authpb" - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/raft/v3/raftpb" -) - -func TestEtcdDumpLogEntryType(t *testing.T) { - // directory where the command is - binDir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - // TODO(ptabor): The test does not run by default from ./scripts/test.sh. - dumpLogsBinary := path.Join(binDir + "/etcd-dump-logs") - if !fileutil.Exist(dumpLogsBinary) { - t.Skipf("%q does not exist", dumpLogsBinary) - } - - decoderCorrectOutputFormat := filepath.Join(binDir, "/testdecoder/decoder_correctoutputformat.sh") - decoderWrongOutputFormat := filepath.Join(binDir, "/testdecoder/decoder_wrongoutputformat.sh") - - p := t.TempDir() - - mustCreateWalLog(t, p) - - argtests := []struct { - name string - args []string - fileExpected string - }{ - {"no entry-type", []string{p}, "expectedoutput/listAll.output"}, - {"confchange entry-type", []string{"-entry-type", "ConfigChange", p}, "expectedoutput/listConfigChange.output"}, - {"normal entry-type", []string{"-entry-type", "Normal", p}, "expectedoutput/listNormal.output"}, - {"request entry-type", []string{"-entry-type", "Request", p}, "expectedoutput/listRequest.output"}, - {"internalRaftRequest entry-type", []string{"-entry-type", "InternalRaftRequest", p}, "expectedoutput/listInternalRaftRequest.output"}, - {"range entry-type", []string{"-entry-type", "IRRRange", p}, "expectedoutput/listIRRRange.output"}, - {"put entry-type", []string{"-entry-type", "IRRPut", p}, "expectedoutput/listIRRPut.output"}, - {"del entry-type", []string{"-entry-type", "IRRDeleteRange", p}, "expectedoutput/listIRRDeleteRange.output"}, - {"txn entry-type", []string{"-entry-type", "IRRTxn", p}, "expectedoutput/listIRRTxn.output"}, - {"compaction entry-type", []string{"-entry-type", "IRRCompaction", p}, "expectedoutput/listIRRCompaction.output"}, - {"lease grant entry-type", []string{"-entry-type", "IRRLeaseGrant", p}, "expectedoutput/listIRRLeaseGrant.output"}, - {"lease revoke entry-type", []string{"-entry-type", "IRRLeaseRevoke", p}, "expectedoutput/listIRRLeaseRevoke.output"}, - {"confchange and txn entry-type", []string{"-entry-type", "ConfigChange,IRRCompaction", p}, "expectedoutput/listConfigChangeIRRCompaction.output"}, - {"decoder_correctoutputformat", []string{"-stream-decoder", decoderCorrectOutputFormat, p}, "expectedoutput/decoder_correctoutputformat.output"}, - {"decoder_wrongoutputformat", []string{"-stream-decoder", decoderWrongOutputFormat, p}, "expectedoutput/decoder_wrongoutputformat.output"}, - } - - for _, argtest := range argtests { - t.Run(argtest.name, func(t *testing.T) { - cmd := exec.Command(dumpLogsBinary, argtest.args...) - actual, err := cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - expected, err := os.ReadFile(path.Join(binDir, argtest.fileExpected)) - if err != nil { - t.Fatal(err) - } - - assert.EqualValues(t, string(expected), string(actual)) - // The output files contains a lot of trailing whitespaces... difficult to diagnose without printing them explicitly. - // TODO(ptabor): Get rid of the whitespaces both in code and the test-files. - assert.EqualValues(t, strings.ReplaceAll(string(expected), " ", "_"), strings.ReplaceAll(string(actual), " ", "_")) - }) - } - -} - -func mustCreateWalLog(t *testing.T, path string) { - memberdir := filepath.Join(path, "member") - err := os.Mkdir(memberdir, 0744) - if err != nil { - t.Fatal(err) - } - waldir := walDir(path) - snapdir := snapDir(path) - - w, err := wal.Create(zaptest.NewLogger(t), waldir, nil) - if err != nil { - t.Fatal(err) - } - - err = os.Mkdir(snapdir, 0744) - if err != nil { - t.Fatal(err) - } - - ents := make([]raftpb.Entry, 0) - - // append entries into wal log - appendConfigChangeEnts(&ents) - appendNormalRequestEnts(&ents) - appendNormalIRREnts(&ents) - appendUnknownNormalEnts(&ents) - - // force commit newly appended entries - err = w.Save(raftpb.HardState{}, ents) - if err != nil { - t.Fatal(err) - } - w.Close() -} - -func appendConfigChangeEnts(ents *[]raftpb.Entry) { - configChangeData := []raftpb.ConfChange{ - {ID: 1, Type: raftpb.ConfChangeAddNode, NodeID: 2, Context: []byte("")}, - {ID: 2, Type: raftpb.ConfChangeRemoveNode, NodeID: 2, Context: []byte("")}, - {ID: 3, Type: raftpb.ConfChangeUpdateNode, NodeID: 2, Context: []byte("")}, - {ID: 4, Type: raftpb.ConfChangeAddLearnerNode, NodeID: 3, Context: []byte("")}, - } - configChangeEntries := []raftpb.Entry{ - {Term: 1, Index: 1, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(&configChangeData[0])}, - {Term: 2, Index: 2, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(&configChangeData[1])}, - {Term: 2, Index: 3, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(&configChangeData[2])}, - {Term: 2, Index: 4, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(&configChangeData[3])}, - } - *ents = append(*ents, configChangeEntries...) -} - -func appendNormalRequestEnts(ents *[]raftpb.Entry) { - a := true - b := false - - requests := []etcdserverpb.Request{ - {ID: 0, Method: "", Path: "/path0", Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: true, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 9, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b}, - {ID: 1, Method: "QGET", Path: "/path1", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 9, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b}, - {ID: 2, Method: "SYNC", Path: "/path2", Val: "{\"0\":\"1\",\"2\":[\"3\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b}, - {ID: 3, Method: "DELETE", Path: "/path3", Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &a, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b}, - {ID: 4, Method: "RANDOM", Path: "/path4/superlong" + strings.Repeat("/path", 30), Val: "{\"hey\":\"ho\",\"hi\":[\"yo\"]}", Dir: false, PrevValue: "", PrevIndex: 0, PrevExist: &b, Expiration: 2, Wait: false, Since: 1, Recursive: false, Sorted: false, Quorum: false, Time: 1, Stream: false, Refresh: &b}, - } - - for i, request := range requests { - var currentry raftpb.Entry - currentry.Term = 3 - currentry.Index = uint64(i + 5) - currentry.Type = raftpb.EntryNormal - currentry.Data = pbutil.MustMarshal(&request) - *ents = append(*ents, currentry) - } -} - -func appendNormalIRREnts(ents *[]raftpb.Entry) { - irrrange := &etcdserverpb.RangeRequest{Key: []byte("1"), RangeEnd: []byte("hi"), Limit: 6, Revision: 1, SortOrder: 1, SortTarget: 0, Serializable: false, KeysOnly: false, CountOnly: false, MinModRevision: 0, MaxModRevision: 20000, MinCreateRevision: 0, MaxCreateRevision: 20000} - - irrput := &etcdserverpb.PutRequest{Key: []byte("foo1"), Value: []byte("bar1"), Lease: 1, PrevKv: false, IgnoreValue: false, IgnoreLease: true} - - irrdeleterange := &etcdserverpb.DeleteRangeRequest{Key: []byte("0"), RangeEnd: []byte("9"), PrevKv: true} - - delInRangeReq := &etcdserverpb.RequestOp{Request: &etcdserverpb.RequestOp_RequestDeleteRange{ - RequestDeleteRange: &etcdserverpb.DeleteRangeRequest{ - Key: []byte("a"), RangeEnd: []byte("b"), - }, - }, - } - - irrtxn := &etcdserverpb.TxnRequest{Success: []*etcdserverpb.RequestOp{delInRangeReq}, Failure: []*etcdserverpb.RequestOp{delInRangeReq}} - - irrcompaction := &etcdserverpb.CompactionRequest{Revision: 0, Physical: true} - - irrleasegrant := &etcdserverpb.LeaseGrantRequest{TTL: 1, ID: 1} - - irrleaserevoke := &etcdserverpb.LeaseRevokeRequest{ID: 2} - - irralarm := &etcdserverpb.AlarmRequest{Action: 3, MemberID: 4, Alarm: 5} - - irrauthenable := &etcdserverpb.AuthEnableRequest{} - - irrauthdisable := &etcdserverpb.AuthDisableRequest{} - - irrauthenticate := &etcdserverpb.InternalAuthenticateRequest{Name: "myname", Password: "password", SimpleToken: "token"} - - irrauthuseradd := &etcdserverpb.AuthUserAddRequest{Name: "name1", Password: "pass1", Options: &authpb.UserAddOptions{NoPassword: false}} - - irrauthuserdelete := &etcdserverpb.AuthUserDeleteRequest{Name: "name1"} - - irrauthuserget := &etcdserverpb.AuthUserGetRequest{Name: "name1"} - - irrauthuserchangepassword := &etcdserverpb.AuthUserChangePasswordRequest{Name: "name1", Password: "pass2"} - - irrauthusergrantrole := &etcdserverpb.AuthUserGrantRoleRequest{User: "user1", Role: "role1"} - - irrauthuserrevokerole := &etcdserverpb.AuthUserRevokeRoleRequest{Name: "user2", Role: "role2"} - - irrauthuserlist := &etcdserverpb.AuthUserListRequest{} - - irrauthrolelist := &etcdserverpb.AuthRoleListRequest{} - - irrauthroleadd := &etcdserverpb.AuthRoleAddRequest{Name: "role2"} - - irrauthroledelete := &etcdserverpb.AuthRoleDeleteRequest{Role: "role1"} - - irrauthroleget := &etcdserverpb.AuthRoleGetRequest{Role: "role3"} - - perm := &authpb.Permission{ - PermType: authpb.WRITE, - Key: []byte("Keys"), - RangeEnd: []byte("RangeEnd"), - } - - irrauthrolegrantpermission := &etcdserverpb.AuthRoleGrantPermissionRequest{Name: "role3", Perm: perm} - - irrauthrolerevokepermission := &etcdserverpb.AuthRoleRevokePermissionRequest{Role: "role3", Key: []byte("key"), RangeEnd: []byte("rangeend")} - - irrs := []etcdserverpb.InternalRaftRequest{ - {ID: 5, Range: irrrange}, - {ID: 6, Put: irrput}, - {ID: 7, DeleteRange: irrdeleterange}, - {ID: 8, Txn: irrtxn}, - {ID: 9, Compaction: irrcompaction}, - {ID: 10, LeaseGrant: irrleasegrant}, - {ID: 11, LeaseRevoke: irrleaserevoke}, - {ID: 12, Alarm: irralarm}, - {ID: 13, AuthEnable: irrauthenable}, - {ID: 14, AuthDisable: irrauthdisable}, - {ID: 15, Authenticate: irrauthenticate}, - {ID: 16, AuthUserAdd: irrauthuseradd}, - {ID: 17, AuthUserDelete: irrauthuserdelete}, - {ID: 18, AuthUserGet: irrauthuserget}, - {ID: 19, AuthUserChangePassword: irrauthuserchangepassword}, - {ID: 20, AuthUserGrantRole: irrauthusergrantrole}, - {ID: 21, AuthUserRevokeRole: irrauthuserrevokerole}, - {ID: 22, AuthUserList: irrauthuserlist}, - {ID: 23, AuthRoleList: irrauthrolelist}, - {ID: 24, AuthRoleAdd: irrauthroleadd}, - {ID: 25, AuthRoleDelete: irrauthroledelete}, - {ID: 26, AuthRoleGet: irrauthroleget}, - {ID: 27, AuthRoleGrantPermission: irrauthrolegrantpermission}, - {ID: 28, AuthRoleRevokePermission: irrauthrolerevokepermission}, - } - - for i, irr := range irrs { - var currentry raftpb.Entry - currentry.Term = uint64(i + 4) - currentry.Index = uint64(i + 10) - currentry.Type = raftpb.EntryNormal - currentry.Data = pbutil.MustMarshal(&irr) - *ents = append(*ents, currentry) - } -} - -func appendUnknownNormalEnts(ents *[]raftpb.Entry) { - var currentry raftpb.Entry - currentry.Term = 27 - currentry.Index = 34 - currentry.Type = raftpb.EntryNormal - currentry.Data = []byte("?") - *ents = append(*ents, currentry) -} diff --git a/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output b/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output index a9fcd1a3ac6..1937054cad0 100644 --- a/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output +++ b/tools/etcd-dump-logs/expectedoutput/decoder_correctoutputformat.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data decoder_status decoded_data 1 1 conf method=ConfChangeAddNode id=2 ERROR jhjaajjjahjbbbjj @@ -29,7 +29,7 @@ term index type data decoder_status decoded_data 15 21 norm ID:16 auth_user_add: > OK jhajebddajjajefefafdfecaabjegjfagcgccaaajj 16 22 norm ID:17 auth_user_delete: OK jhaaeaddjgjajefefafdfeca 17 23 norm ID:18 auth_user_get: OK jhabfbddjgjajefefafdfeca - 18 24 norm ID:19 auth_user_change_password:" > OK jhacfaddjejajefefafdfecaabjegjfagcgccb + 18 24 norm ID:19 auth_user_change_password: OK jhacfaddjejajefefafdfecaabjegjfagcgccb 19 25 norm ID:20 auth_user_grant_role: OK jhadhbdejejajegegcfegbcaabjegbfffcfeca 20 26 norm ID:21 auth_user_revoke_role: OK jhaehadejejajegegcfegbcbabjegbfffcfecb 21 27 norm ID:22 auth_user_list:<> ERROR jhafibdejj diff --git a/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output b/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output index 65e7dd3a4f9..90341eb62c1 100644 --- a/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output +++ b/tools/etcd-dump-logs/expectedoutput/decoder_wrongoutputformat.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data decoder_status decoded_data 1 1 conf method=ConfChangeAddNode id=2 decoder output format is not right, print output anyway jhjaajjjahjbbbjj @@ -29,7 +29,7 @@ term index type data decoder_status decoded_data 15 21 norm ID:16 auth_user_add: > decoder output format is not right, print output anyway jhajebddajjajefefafdfecaabjegjfagcgccaaajj 16 22 norm ID:17 auth_user_delete: decoder output format is not right, print output anyway jhaaeaddjgjajefefafdfeca 17 23 norm ID:18 auth_user_get: decoder output format is not right, print output anyway jhabfbddjgjajefefafdfeca - 18 24 norm ID:19 auth_user_change_password:" > decoder output format is not right, print output anyway jhacfaddjejajefefafdfecaabjegjfagcgccb + 18 24 norm ID:19 auth_user_change_password: decoder output format is not right, print output anyway jhacfaddjejajefefafdfecaabjegjfagcgccb 19 25 norm ID:20 auth_user_grant_role: decoder output format is not right, print output anyway jhadhbdejejajegegcfegbcaabjegbfffcfeca 20 26 norm ID:21 auth_user_revoke_role: decoder output format is not right, print output anyway jhaehadejejajegegcfegbcbabjegbfffcfecb 21 27 norm ID:22 auth_user_list:<> decoder output format is not right, print output anyway jhafibdejj diff --git a/tools/etcd-dump-logs/expectedoutput/listAll.output b/tools/etcd-dump-logs/expectedoutput/listAll.output index a1168ec20c2..3a6f41bb9c9 100644 --- a/tools/etcd-dump-logs/expectedoutput/listAll.output +++ b/tools/etcd-dump-logs/expectedoutput/listAll.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 1 1 conf method=ConfChangeAddNode id=2 @@ -29,7 +29,7 @@ term index type data 15 21 norm ID:16 auth_user_add: > 16 22 norm ID:17 auth_user_delete: 17 23 norm ID:18 auth_user_get: - 18 24 norm ID:19 auth_user_change_password:" > + 18 24 norm ID:19 auth_user_change_password: 19 25 norm ID:20 auth_user_grant_role: 20 26 norm ID:21 auth_user_revoke_role: 21 27 norm ID:22 auth_user_list:<> diff --git a/tools/etcd-dump-logs/expectedoutput/listConfigChange.output b/tools/etcd-dump-logs/expectedoutput/listConfigChange.output index acfb23c1535..1b2dc9ec90f 100644 --- a/tools/etcd-dump-logs/expectedoutput/listConfigChange.output +++ b/tools/etcd-dump-logs/expectedoutput/listConfigChange.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 1 1 conf method=ConfChangeAddNode id=2 diff --git a/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output b/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output index 15b34dccd9b..c262c2e8aac 100644 --- a/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output +++ b/tools/etcd-dump-logs/expectedoutput/listConfigChangeIRRCompaction.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 1 1 conf method=ConfChangeAddNode id=2 diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output b/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output index cc764466dbb..493545884a9 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRCompaction.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 8 14 norm ID:9 compaction: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output b/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output index 67716775300..85e71d39602 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRDeleteRange.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 6 12 norm ID:7 delete_range: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output index 6f9f8ecb9a3..5cba3a497a7 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseGrant.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 9 15 norm ID:10 lease_grant: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output index 33fafaf3417..d67d05d0232 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRLeaseRevoke.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 10 16 norm ID:11 lease_revoke: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRPut.output b/tools/etcd-dump-logs/expectedoutput/listIRRPut.output index 672dae54c3c..397ae60c7b6 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRPut.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRPut.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 5 11 norm ID:6 put: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRRange.output b/tools/etcd-dump-logs/expectedoutput/listIRRRange.output index 832587c91b1..422e0374930 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRRange.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRRange.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 4 10 norm ID:5 range: diff --git a/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output b/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output index 9c5c1183319..92a7d0a26e7 100644 --- a/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output +++ b/tools/etcd-dump-logs/expectedoutput/listIRRTxn.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 7 13 norm ID:8 txn: > failure: > > diff --git a/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output b/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output index de2a0b41574..da1ccb9a6e3 100644 --- a/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output +++ b/tools/etcd-dump-logs/expectedoutput/listInternalRaftRequest.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 4 10 norm ID:5 range: @@ -20,7 +20,7 @@ term index type data 15 21 norm ID:16 auth_user_add: > 16 22 norm ID:17 auth_user_delete: 17 23 norm ID:18 auth_user_get: - 18 24 norm ID:19 auth_user_change_password:" > + 18 24 norm ID:19 auth_user_change_password: 19 25 norm ID:20 auth_user_grant_role: 20 26 norm ID:21 auth_user_revoke_role: 21 27 norm ID:22 auth_user_list:<> diff --git a/tools/etcd-dump-logs/expectedoutput/listNormal.output b/tools/etcd-dump-logs/expectedoutput/listNormal.output index 5a584c31209..37a1bb80cd6 100644 --- a/tools/etcd-dump-logs/expectedoutput/listNormal.output +++ b/tools/etcd-dump-logs/expectedoutput/listNormal.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 3 5 norm noop @@ -25,7 +25,7 @@ term index type data 15 21 norm ID:16 auth_user_add: > 16 22 norm ID:17 auth_user_delete: 17 23 norm ID:18 auth_user_get: - 18 24 norm ID:19 auth_user_change_password:" > + 18 24 norm ID:19 auth_user_change_password: 19 25 norm ID:20 auth_user_grant_role: 20 26 norm ID:21 auth_user_revoke_role: 21 27 norm ID:22 auth_user_list:<> diff --git a/tools/etcd-dump-logs/expectedoutput/listRequest.output b/tools/etcd-dump-logs/expectedoutput/listRequest.output index 04defbdc0eb..5201ff9d366 100644 --- a/tools/etcd-dump-logs/expectedoutput/listRequest.output +++ b/tools/etcd-dump-logs/expectedoutput/listRequest.output @@ -3,7 +3,7 @@ empty Start dumping log entries from snapshot. WAL metadata: nodeID=0 clusterID=0 term=0 commitIndex=0 vote=0 -WAL entries: 34 +WAL entries: lastIndex=34 term index type data 3 5 norm noop diff --git a/tools/etcd-dump-logs/main.go b/tools/etcd-dump-logs/main.go index 6eb6dcf3610..2e012f351d7 100644 --- a/tools/etcd-dump-logs/main.go +++ b/tools/etcd-dump-logs/main.go @@ -29,15 +29,14 @@ import ( "strings" "time" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/types" + "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap" + "github.com/ls-2018/etcd_cn/etcd/wal" + "github.com/ls-2018/etcd_cn/etcd/wal/walpb" + "github.com/ls-2018/etcd_cn/offical/etcdserverpb" + "github.com/ls-2018/etcd_cn/pkg/pbutil" + "github.com/ls-2018/etcd_cn/raft/raftpb" "go.uber.org/zap" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" ) const ( @@ -46,20 +45,17 @@ const ( func main() { snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping") - waldir := flag.String("wal-dir", "", "If set, dumps WAL from the informed path, rather than following the standard 'data_dir/member/wal/' location") index := flag.Uint64("start-index", 0, "The index to start dumping") // Default entry types are Normal and ConfigChange - entrytype := flag.String("entry-type", defaultEntryTypes, `If set, filters output by entry type. Must be one or more than one of: + entrytype := flag.String("entry-type", defaultEntryTypes, `If set, filters output by entry type.必须是one or more than one of: ConfigChange, Normal, Request, InternalRaftRequest, IRRRange, IRRPut, IRRDeleteRange, IRRTxn, IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke, IRRLeaseCheckpoint`) streamdecoder := flag.String("stream-decoder", "", `The name of an executable decoding tool, the executable must process hex encoded lines of binary input (from etcd-dump-logs) and output a hex encoded line of binary for each input line`) - raw := flag.Bool("raw", false, "Read the logs in the low-level form") flag.Parse() - lg := zap.NewExample() if len(flag.Args()) != 1 { log.Fatalf("Must provide data-dir argument (got %+v)", flag.Args()) @@ -70,37 +66,6 @@ and output a hex encoded line of binary for each input line`) log.Fatal("start-snap and start-index flags cannot be used together.") } - if !*raw { - ents := readUsingReadAll(lg, index, snapfile, dataDir, waldir) - - fmt.Printf("WAL entries: %d\n", len(ents)) - if len(ents) > 0 { - fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index) - } - - fmt.Printf("%4s\t%10s\ttype\tdata", "term", "index") - if *streamdecoder != "" { - fmt.Print("\tdecoder_status\tdecoded_data") - } - fmt.Println() - - listEntriesType(*entrytype, *streamdecoder, ents) - } else { - if *snapfile != "" || - *entrytype != defaultEntryTypes || - *streamdecoder != "" { - log.Fatalf("Flags --entry-type, --stream-decoder, --entrytype not supported in the RAW mode.") - } - - wd := *waldir - if wd == "" { - wd = walDir(dataDir) - } - readRaw(index, wd, os.Stdout) - } -} - -func readUsingReadAll(lg *zap.Logger, index *uint64, snapfile *string, dataDir string, waldir *string) []raftpb.Entry { var ( walsnap walpb.Snapshot snapshot *raftpb.Snapshot @@ -114,36 +79,31 @@ func readUsingReadAll(lg *zap.Logger, index *uint64, snapfile *string, dataDir s walsnap.Index = *index } else { if *snapfile == "" { - ss := snap.New(lg, snapDir(dataDir)) + ss := snap.New(zap.NewExample(), snapDir(dataDir)) snapshot, err = ss.Load() } else { - snapshot, err = snap.Read(lg, filepath.Join(snapDir(dataDir), *snapfile)) + snapshot, err = snap.Read(zap.NewExample(), filepath.Join(snapDir(dataDir), *snapfile)) } switch err { case nil: walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term nodes := genIDSlice(snapshot.Metadata.ConfState.Voters) - confStateJSON, err := json.Marshal(snapshot.Metadata.ConfState) + confstateJson, err := json.Marshal(snapshot.Metadata.ConfState) if err != nil { - confStateJSON = []byte(fmt.Sprintf("confstate err: %v", err)) + confstateJson = []byte(fmt.Sprintf("confstate err: %v", err)) } fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s confstate=%s\n", - walsnap.Term, walsnap.Index, nodes, confStateJSON) + walsnap.Term, walsnap.Index, nodes, confstateJson) case snap.ErrNoSnapshot: - fmt.Print("Snapshot:\nempty\n") + fmt.Printf("Snapshot:\nempty\n") default: log.Fatalf("Failed loading snapshot: %v", err) } fmt.Println("Start dumping log entries from snapshot.") } - wd := *waldir - if wd == "" { - wd = walDir(dataDir) - } - - w, err := wal.OpenForRead(zap.NewExample(), wd, walsnap) + w, err := wal.OpenForRead(zap.NewExample(), walDir(dataDir), walsnap) if err != nil { log.Fatalf("Failed opening WAL: %v", err) } @@ -156,7 +116,17 @@ func readUsingReadAll(lg *zap.Logger, index *uint64, snapfile *string, dataDir s vid := types.ID(state.Vote) fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n", id, cid, state.Term, state.Commit, vid) - return ents + + fmt.Printf("WAL entries:\n") + fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index) + + fmt.Printf("%4s\t%10s\ttype\tdata", "term", "index") + if *streamdecoder != "" { + fmt.Printf("\tdecoder_status\tdecoded_data") + } + fmt.Println() + + listEntriesType(*entrytype, *streamdecoder, ents) } func walDir(dataDir string) string { return filepath.Join(dataDir, "member", "wal") } @@ -262,10 +232,6 @@ type EntryPrinter func(e raftpb.Entry) func printInternalRaftRequest(entry raftpb.Entry) { var rr etcdserverpb.InternalRaftRequest if err := rr.Unmarshal(entry.Data); err == nil { - // Ensure we don't log user password - if rr.AuthUserChangePassword != nil && rr.AuthUserChangePassword.Password != "" { - rr.AuthUserChangePassword.Password = "" - } fmt.Printf("%4d\t%10d\tnorm\t%s", entry.Term, entry.Index, rr.String()) } } @@ -276,10 +242,10 @@ func printUnknownNormal(entry raftpb.Entry) { func printConfChange(entry raftpb.Entry) { fmt.Printf("%4d\t%10d", entry.Term, entry.Index) - fmt.Print("\tconf") - var r raftpb.ConfChange + fmt.Printf("\tconf") + var r raftpb.ConfChangeV1 if err := r.Unmarshal(entry.Data); err != nil { - fmt.Print("\t???") + fmt.Printf("\t???") } else { fmt.Printf("\tmethod=%s id=%s", r.Type, types.ID(r.NodeID)) } @@ -291,7 +257,7 @@ func printRequest(entry raftpb.Entry) { fmt.Printf("%4d\t%10d\tnorm", entry.Term, entry.Index) switch r.Method { case "": - fmt.Print("\tnoop") + fmt.Printf("\tnoop") case "SYNC": fmt.Printf("\tmethod=SYNC time=%q", time.Unix(0, r.Time).UTC()) case "QGET", "DELETE": @@ -309,7 +275,8 @@ func evaluateEntrytypeFlag(entrytype string) []EntryFilter { entrytypelist = strings.Split(entrytype, ",") } - validRequest := map[string][]EntryFilter{"ConfigChange": {passConfChange}, + validRequest := map[string][]EntryFilter{ + "ConfigChange": {passConfChange}, "Normal": {passInternalRaftRequest, passRequest, passUnknownNormal}, "Request": {passRequest}, "InternalRaftRequest": {passInternalRaftRequest}, @@ -338,13 +305,15 @@ IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke, IRRLeaseCheckpoint`, et) return filters } -// listEntriesType filters and prints entries based on the entry-type flag, +// listEntriesType filters and prints entries based on the entry-type flag, func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry) { entryFilters := evaluateEntrytypeFlag(entrytype) - printerMap := map[string]EntryPrinter{"InternalRaftRequest": printInternalRaftRequest, - "Request": printRequest, - "ConfigChange": printConfChange, - "UnknownNormal": printUnknownNormal} + printerMap := map[string]EntryPrinter{ + "InternalRaftRequest": printInternalRaftRequest, + "Request": printRequest, + "ConfigChange": printConfChange, + "UnknownNormal": printUnknownNormal, + } var stderr bytes.Buffer args := strings.Split(streamdecoder, " ") cmd := exec.Command(args[0], args[1:]...) @@ -394,9 +363,9 @@ func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry return } - decoderStatus, decodedData := parseDecoderOutput(decoderoutput) + decoder_status, decoded_data := parseDecoderOutput(decoderoutput) - fmt.Printf("\t%s\t%s", decoderStatus, decodedData) + fmt.Printf("\t%s\t%s", decoder_status, decoded_data) } } @@ -415,19 +384,19 @@ func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry } func parseDecoderOutput(decoderoutput string) (string, string) { - var decoderStatus string - var decodedData string + var decoder_status string + var decoded_data string output := strings.Split(decoderoutput, "|") switch len(output) { case 1: - decoderStatus = "decoder output format is not right, print output anyway" - decodedData = decoderoutput + decoder_status = "decoder output format is not right, print output anyway" + decoded_data = decoderoutput case 2: - decoderStatus = output[0] - decodedData = output[1] + decoder_status = output[0] + decoded_data = output[1] default: - decoderStatus = output[0] + "(*WARNING: data might contain deliminator used by etcd-dump-logs)" - decodedData = strings.Join(output[1:], "") + decoder_status = output[0] + "(*WARNING: data might contain deliminator used by etcd-dump-logs)" + decoded_data = strings.Join(output[1:], "") } - return decoderStatus, decodedData + return decoder_status, decoded_data } diff --git a/tools/etcd-dump-logs/raw.go b/tools/etcd-dump-logs/raw.go deleted file mode 100644 index 2c1bed7696a..00000000000 --- a/tools/etcd-dump-logs/raw.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/server/v3/storage/wal" - "go.etcd.io/etcd/server/v3/storage/wal/walpb" - "go.etcd.io/raft/v3/raftpb" -) - -func readRaw(fromIndex *uint64, waldir string, out io.Writer) { - var walReaders []fileutil.FileReader - dirEntry, err := os.ReadDir(waldir) - if err != nil { - log.Fatalf("Error: Failed to read directory '%s' error:%v", waldir, err) - } - for _, e := range dirEntry { - finfo, err := e.Info() - if err != nil { - log.Fatalf("Error: failed to get fileInfo of file: %s, error: %v", e.Name(), err) - } - if filepath.Ext(finfo.Name()) != ".wal" { - log.Printf("Warning: Ignoring not .wal file: %s", finfo.Name()) - continue - } - f, err := os.Open(filepath.Join(waldir, finfo.Name())) - if err != nil { - log.Printf("Error: Failed to read file: %s . error:%v", finfo.Name(), err) - } - walReaders = append(walReaders, fileutil.NewFileReader(f)) - } - decoder := wal.NewDecoderAdvanced(true, walReaders...) - // The variable is used to not pollute log with multiple continuous crc errors. - crcDesync := false - for { - rec := walpb.Record{} - err := decoder.Decode(&rec) - if err == nil || errors.Is(err, walpb.ErrCRCMismatch) { - if err != nil && !crcDesync { - log.Printf("Error: Reading entry failed with CRC error: %c", err) - crcDesync = true - } - printRec(&rec, fromIndex, out) - if rec.Type == wal.CrcType { - decoder.UpdateCRC(rec.Crc) - crcDesync = false - } - continue - } - if errors.Is(err, io.EOF) { - fmt.Fprintf(out, "EOF: All entries were processed.\n") - break - } else if errors.Is(err, io.ErrUnexpectedEOF) { - fmt.Fprintf(out, "ErrUnexpectedEOF: The last record might be corrupted, error: %v.\n", err) - break - } else { - log.Printf("Error: Reading failed: %v", err) - break - } - } -} - -func printRec(rec *walpb.Record, fromIndex *uint64, out io.Writer) { - switch rec.Type { - case wal.MetadataType: - var metadata etcdserverpb.Metadata - pbutil.MustUnmarshal(&metadata, rec.Data) - fmt.Fprintf(out, "Metadata: %s\n", metadata.String()) - case wal.CrcType: - fmt.Fprintf(out, "CRC: %d\n", rec.Crc) - case wal.EntryType: - e := wal.MustUnmarshalEntry(rec.Data) - if fromIndex == nil || e.Index >= *fromIndex { - fmt.Fprintf(out, "Entry: %s\n", e.String()) - } - case wal.SnapshotType: - var snap walpb.Snapshot - pbutil.MustUnmarshal(&snap, rec.Data) - if fromIndex == nil || snap.Index >= *fromIndex { - fmt.Fprintf(out, "Snapshot: %s\n", snap.String()) - } - case wal.StateType: - var state raftpb.HardState - pbutil.MustUnmarshal(&state, rec.Data) - if fromIndex == nil || state.Commit >= *fromIndex { - fmt.Fprintf(out, "HardState: %s\n", state.String()) - } - default: - log.Printf("Unexpected WAL log type: %d", rec.Type) - } -} diff --git a/tools/etcd-dump-logs/raw_test.go b/tools/etcd-dump-logs/raw_test.go deleted file mode 100644 index d1509ec4ea3..00000000000 --- a/tools/etcd-dump-logs/raw_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package main - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_readRaw(t *testing.T) { - path := t.TempDir() - mustCreateWalLog(t, path) - var out bytes.Buffer - readRaw(nil, walDir(path), &out) - assert.Equal(t, - `CRC: 0 -Metadata: -Snapshot: -Entry: Term:1 Index:1 Type:EntryConfChange Data:"\010\001\020\000\030\002\"\000" -Entry: Term:2 Index:2 Type:EntryConfChange Data:"\010\002\020\001\030\002\"\000" -Entry: Term:2 Index:3 Type:EntryConfChange Data:"\010\003\020\002\030\002\"\000" -Entry: Term:2 Index:4 Type:EntryConfChange Data:"\010\004\020\003\030\003\"\000" -Entry: Term:3 Index:5 Data:"\010\000\022\000\032\006/path0\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0012\0008\000@\000H\tP\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000" -Entry: Term:3 Index:6 Data:"\010\001\022\004QGET\032\006/path1\"\023{\"0\":\"1\",\"2\":[\"3\"]}(\0002\0008\000@\000H\tP\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000" -Entry: Term:3 Index:7 Data:"\010\002\022\004SYNC\032\006/path2\"\023{\"0\":\"1\",\"2\":[\"3\"]}(\0002\0008\000@\000H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000" -Entry: Term:3 Index:8 Data:"\010\003\022\006DELETE\032\006/path3\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0002\0008\000@\001H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000" -Entry: Term:3 Index:9 Data:"\010\004\022\006RANDOM\032\246\001/path4/superlong/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path/path\"\030{\"hey\":\"ho\",\"hi\":[\"yo\"]}(\0002\0008\000@\000H\002P\000X\001`+"`"+`\000h\000p\000x\001\200\001\000\210\001\000" -Entry: Term:4 Index:10 Data:"\010\005\032\025\n\0011\022\002hi\030\006 \001(\001X\240\234\001h\240\234\001" -Entry: Term:5 Index:11 Data:"\010\006\"\020\n\004foo1\022\004bar1\030\0010\001" -Entry: Term:6 Index:12 Data:"\010\007*\010\n\0010\022\0019\030\001" -Entry: Term:7 Index:13 Data:"\010\0102\024\022\010\032\006\n\001a\022\001b\032\010\032\006\n\001a\022\001b" -Entry: Term:8 Index:14 Data:"\010\t:\002\020\001" -Entry: Term:9 Index:15 Data:"\010\nB\004\010\001\020\001" -Entry: Term:10 Index:16 Data:"\010\013J\002\010\002" -Entry: Term:11 Index:17 Data:"\010\014R\006\010\003\020\004\030\005" -Entry: Term:12 Index:18 Data:"\010\r\302>\000" -Entry: Term:13 Index:19 Data:"\010\016\232?\000" -Entry: Term:14 Index:20 Data:"\010\017\242?\031\n\006myname\022\010password\032\005token" -Entry: Term:15 Index:21 Data:"\010\020\342D\020\n\005name1\022\005pass1\032\000" -Entry: Term:16 Index:22 Data:"\010\021\352D\007\n\005name1" -Entry: Term:17 Index:23 Data:"\010\022\362D\007\n\005name1" -Entry: Term:18 Index:24 Data:"\010\023\372D\016\n\005name1\022\005pass2" -Entry: Term:19 Index:25 Data:"\010\024\202E\016\n\005user1\022\005role1" -Entry: Term:20 Index:26 Data:"\010\025\212E\016\n\005user2\022\005role2" -Entry: Term:21 Index:27 Data:"\010\026\222E\000" -Entry: Term:22 Index:28 Data:"\010\027\232E\000" -Entry: Term:23 Index:29 Data:"\010\030\202K\007\n\005role2" -Entry: Term:24 Index:30 Data:"\010\031\212K\007\n\005role1" -Entry: Term:25 Index:31 Data:"\010\032\222K\007\n\005role3" -Entry: Term:26 Index:32 Data:"\010\033\232K\033\n\005role3\022\022\010\001\022\004Keys\032\010RangeEnd" -Entry: Term:27 Index:33 Data:"\010\034\242K\026\n\005role3\022\003key\032\010rangeend" -Entry: Term:27 Index:34 Data:"?" -EOF: All entries were processed. -`, out.String()) -} diff --git a/tools/etcd-dump-metrics/README b/tools/etcd-dump-metrics/README new file mode 100644 index 00000000000..ba3fa2cc12a --- /dev/null +++ b/tools/etcd-dump-metrics/README @@ -0,0 +1,13 @@ + +go install -v ./tools/etcd-dump-metrics + +# for latest main branch +etcd-dump-metrics > Documentation/metrics/latest + +# Or download etcd v3.3.9 to ./bin +goreman start +etcd-dump-metrics --addr http://localhost:2379/metrics > Documentation/metrics/v3.3.9 + +# Or download etcd v3.3.9 to temporary directory to fetch metrics +etcd-dump-metrics --debug --download-versionCount v3.3.9 +etcd-dump-metrics --download-versionCount v3.3.9 > Documentation/metrics/v3.3.9 diff --git a/tools/etcd-dump-metrics/README.md b/tools/etcd-dump-metrics/README.md deleted file mode 100644 index 26fd61ef9bc..00000000000 --- a/tools/etcd-dump-metrics/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# etcd-dump-metrics - - `etcd-dump-metrics` provides metrics for the latest main branch, a given endpoint, or version. - -## Installation - -Install the tool by running the following command from the etcd source directory. - -``` - $ go install -v ./tools/etcd-dump-metrics -``` - -The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be -installed into the $HOME/go/bin. You can also find out the installed location by running the following command from the -etcd source directory. Make sure that $PATH is set accordingly in your environment. - -``` - $ go list -f "{{.Target}}" ./tools/etcd-dump-metrics -``` - -Alternatively, instead of installing the tool, you can use it by simply running the following command from the etcd source -directory. - -``` - $ go run ./tools/etcd-dump-metrics -``` - -## Usage - -The following command should output the usage per the latest development. - -``` - $ etcd-dump-metrics --help -``` - -An example of usage detail is provided below. - -### For the latest main branch -``` - $ etcd-dump-metrics -``` - -### For the provided endpoint -``` - $ goreman start - $ etcd-dump-metrics --addr http://localhost:2379/metrics -``` - -### Download specific version to temporary directory to fetch metrics -``` - $ etcd-dump-metrics --debug --download-ver v3.5.3 - $ etcd-dump-metrics --download-ver v3.5.3 -``` diff --git a/tools/etcd-dump-metrics/etcd.go b/tools/etcd-dump-metrics/etcd.go index e64d3e1819c..01d8ae3acdf 100644 --- a/tools/etcd-dump-metrics/etcd.go +++ b/tools/etcd-dump-metrics/etcd.go @@ -17,13 +17,14 @@ package main import ( "context" "fmt" + "io/ioutil" "net/url" "os" "strings" "time" - clientv3 "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/embed" + clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3" + "github.com/ls-2018/etcd_cn/etcd/embed" "go.uber.org/zap" ) @@ -40,10 +41,10 @@ func newEmbedURLs(n int) (urls []url.URL) { func setupEmbedCfg(cfg *embed.Config, curls, purls, ics []url.URL) { cfg.Logger = "zap" cfg.LogOutputs = []string{"/dev/null"} - // []string{"stderr"} to enable server logging + // []string{"stderr"} to enable etcd logging var err error - cfg.Dir, err = os.MkdirTemp(os.TempDir(), fmt.Sprintf("%016X", time.Now().UnixNano())) + cfg.Dir, err = ioutil.TempDir(os.TempDir(), fmt.Sprintf("%016X", time.Now().UnixNano())) if err != nil { panic(err) } @@ -70,7 +71,7 @@ func getCommand(exec, name, dir, cURL, pURL, cluster string) (args []string) { "--data-dir", dir, "--listen-client-urls", cURL, "--advertise-client-urls", cURL, - "--listen-peer-urls", pURL, + "--listen-peer-urls", pURL, // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口 "--initial-advertise-peer-urls", pURL, "--initial-cluster", cluster, "--initial-cluster-token=tkn", diff --git a/tools/etcd-dump-metrics/install_darwin.go b/tools/etcd-dump-metrics/install_darwin.go index 8f30fb65205..567b72467c2 100644 --- a/tools/etcd-dump-metrics/install_darwin.go +++ b/tools/etcd-dump-metrics/install_darwin.go @@ -13,18 +13,19 @@ // limitations under the License. //go:build darwin +// +build darwin package main import ( "fmt" - "io" + "io/ioutil" "net/http" "os" "os/exec" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" ) const downloadURL = `https://storage.googleapis.com/etcd/%s/etcd-%s-darwin-amd64.zip` @@ -38,13 +39,13 @@ func install(ver, dir string) (string, error) { } defer resp.Body.Close() - d, err := io.ReadAll(resp.Body) + d, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } zipPath := filepath.Join(dir, "etcd.zip") - if err = os.WriteFile(zipPath, d, fileutil.PrivateFileMode); err != nil { + if err = ioutil.WriteFile(zipPath, d, fileutil.PrivateFileMode); err != nil { return "", err } diff --git a/tools/etcd-dump-metrics/install_linux.go b/tools/etcd-dump-metrics/install_linux.go index c2a2bfa035a..6384cbf129e 100644 --- a/tools/etcd-dump-metrics/install_linux.go +++ b/tools/etcd-dump-metrics/install_linux.go @@ -13,18 +13,18 @@ // limitations under the License. //go:build linux +// +build linux package main import ( "fmt" - "io" + "io/ioutil" "net/http" - "os" "os/exec" "path/filepath" - "go.etcd.io/etcd/client/pkg/v3/fileutil" + "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil" ) const downloadURL = `https://storage.googleapis.com/etcd/%s/etcd-%s-linux-amd64.tar.gz` @@ -38,13 +38,13 @@ func install(ver, dir string) (string, error) { } defer resp.Body.Close() - d, err := io.ReadAll(resp.Body) + d, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } tarPath := filepath.Join(dir, "etcd.tar.gz") - if err = os.WriteFile(tarPath, d, fileutil.PrivateFileMode); err != nil { + if err = ioutil.WriteFile(tarPath, d, fileutil.PrivateFileMode); err != nil { return "", err } diff --git a/tools/etcd-dump-metrics/install_windows.go b/tools/etcd-dump-metrics/install_windows.go index 71873a80b88..07eb15b3209 100644 --- a/tools/etcd-dump-metrics/install_windows.go +++ b/tools/etcd-dump-metrics/install_windows.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build windows +// +build windows package main diff --git a/tools/etcd-dump-metrics/main.go b/tools/etcd-dump-metrics/main.go index ac97d5b86b4..b37ae902224 100644 --- a/tools/etcd-dump-metrics/main.go +++ b/tools/etcd-dump-metrics/main.go @@ -17,15 +17,14 @@ package main import ( "flag" - "fmt" + "io/ioutil" "net/url" "os" "os/exec" "path/filepath" "time" - "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/server/v3/embed" + "github.com/ls-2018/etcd_cn/etcd/embed" "go.uber.org/zap" ) @@ -34,7 +33,7 @@ var lg *zap.Logger func init() { var err error - lg, err = logutil.CreateDefaultZapLogger(zap.InfoLevel) + lg, err = zap.NewProduction() if err != nil { panic(err) } @@ -50,11 +49,7 @@ func main() { panic("specify either 'addr' or 'download-ver'") } if *debug { - var err error - lg, err = logutil.CreateDefaultZapLogger(zap.DebugLevel) - if err != nil { - panic(err) - } + lg = zap.NewExample() } ep := *addr @@ -63,7 +58,7 @@ func main() { ver := *downloadVer // download release binary to temporary directory - d, err := os.MkdirTemp(os.TempDir(), ver) + d, err := ioutil.TempDir(os.TempDir(), ver) if err != nil { panic(err) } @@ -185,5 +180,4 @@ func main() { write(ep) lg.Debug("fetching metrics", zap.String("endpoint", ep)) - fmt.Println(getMetrics(ep)) } diff --git a/tools/etcd-dump-metrics/metrics.go b/tools/etcd-dump-metrics/metrics.go deleted file mode 100644 index 643dc5fe154..00000000000 --- a/tools/etcd-dump-metrics/metrics.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "io" - "net/http" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/client/pkg/v3/transport" - - "go.uber.org/zap" -) - -func fetchMetrics(ep string) (lines []string, err error) { - tr, err := transport.NewTimeoutTransport(transport.TLSInfo{}, time.Second, time.Second, time.Second) - if err != nil { - return nil, err - } - cli := &http.Client{Transport: tr} - resp, err := cli.Get(ep) - if err != nil { - return nil, err - } - defer resp.Body.Close() - b, rerr := io.ReadAll(resp.Body) - if rerr != nil { - return nil, rerr - } - lines = strings.Split(string(b), "\n") - return lines, nil -} - -func getMetrics(ep string) (m metricSlice) { - lines, err := fetchMetrics(ep) - if err != nil { - lg.Panic("failed to fetch metrics", zap.Error(err)) - } - mss := parse(lines) - sort.Sort(metricSlice(mss)) - return mss -} - -func (mss metricSlice) String() (s string) { - ver := "unknown" - for i, v := range mss { - if strings.HasPrefix(v.name, "etcd_server_version") { - ver = v.metrics[0] - } - s += v.String() - if i != len(mss)-1 { - s += "\n\n" - } - } - return "# server version: " + ver + "\n\n" + s -} - -type metricSlice []metric - -func (mss metricSlice) Len() int { - return len(mss) -} - -func (mss metricSlice) Less(i, j int) bool { - return mss[i].name < mss[j].name -} - -func (mss metricSlice) Swap(i, j int) { - mss[i], mss[j] = mss[j], mss[i] -} - -type metric struct { - // raw data for debugging purposes - raw []string - - // metrics name - name string - - // metrics description - desc string - - // metrics type - tp string - - // aggregates of "grpc_server_handled_total" - grpcCodes []string - - // keep fist 1 and last 4 if histogram or summary - // otherwise, keep only 1 - metrics []string -} - -func (m metric) String() (s string) { - s += fmt.Sprintf("# name: %q\n", m.name) - s += fmt.Sprintf("# description: %q\n", m.desc) - s += fmt.Sprintf("# type: %q\n", m.tp) - if len(m.grpcCodes) > 0 { - s += "# gRPC codes: \n" - for _, c := range m.grpcCodes { - s += fmt.Sprintf("# - %q\n", c) - } - } - s += strings.Join(m.metrics, "\n") - return s -} - -func parse(lines []string) (mss []metric) { - m := metric{raw: make([]string, 0), metrics: make([]string, 0)} - for _, line := range lines { - if strings.HasPrefix(line, "# HELP ") { - // add previous metric and initialize - if m.name != "" { - mss = append(mss, m) - } - m = metric{raw: make([]string, 0), metrics: make([]string, 0)} - - m.raw = append(m.raw, line) - ss := strings.Split(strings.Replace(line, "# HELP ", "", 1), " ") - m.name, m.desc = ss[0], strings.Join(ss[1:], " ") - continue - } - - if strings.HasPrefix(line, "# TYPE ") { - m.raw = append(m.raw, line) - m.tp = strings.Split(strings.Replace(line, "# TYPE "+m.tp, "", 1), " ")[1] - continue - } - - m.raw = append(m.raw, line) - m.metrics = append(m.metrics, strings.Split(line, " ")[0]) - } - if m.name != "" { - mss = append(mss, m) - } - - // aggregate - for i := range mss { - /* - munge data for: - etcd_network_active_peers{Local="c6c9b5143b47d146",Remote="fbdddd08d7e1608b"} - etcd_network_peer_sent_bytes_total{To="c6c9b5143b47d146"} - etcd_network_peer_received_bytes_total{From="0"} - etcd_network_peer_received_bytes_total{From="fd422379fda50e48"} - etcd_network_peer_round_trip_time_seconds_bucket{To="91bc3c398fb3c146",le="0.0001"} - etcd_network_peer_round_trip_time_seconds_bucket{To="fd422379fda50e48",le="0.8192"} - etcd_network_peer_round_trip_time_seconds_bucket{To="fd422379fda50e48",le="+Inf"} - etcd_network_peer_round_trip_time_seconds_sum{To="fd422379fda50e48"} - etcd_network_peer_round_trip_time_seconds_count{To="fd422379fda50e48"} - */ - if mss[i].name == "etcd_network_active_peers" { - mss[i].metrics = []string{`etcd_network_active_peers{Local="LOCAL_NODE_ID",Remote="REMOTE_PEER_NODE_ID"}`} - } - if mss[i].name == "etcd_network_peer_sent_bytes_total" { - mss[i].metrics = []string{`etcd_network_peer_sent_bytes_total{To="REMOTE_PEER_NODE_ID"}`} - } - if mss[i].name == "etcd_network_peer_received_bytes_total" { - mss[i].metrics = []string{`etcd_network_peer_received_bytes_total{From="REMOTE_PEER_NODE_ID"}`} - } - if mss[i].tp == "histogram" || mss[i].tp == "summary" { - if mss[i].name == "etcd_network_peer_round_trip_time_seconds" { - for j := range mss[i].metrics { - l := mss[i].metrics[j] - if strings.Contains(l, `To="`) && strings.Contains(l, `le="`) { - k1 := strings.Index(l, `To="`) - k2 := strings.Index(l, `",le="`) - mss[i].metrics[j] = l[:k1+4] + "REMOTE_PEER_NODE_ID" + l[k2:] - } - if strings.HasPrefix(l, "etcd_network_peer_round_trip_time_seconds_sum") { - mss[i].metrics[j] = `etcd_network_peer_round_trip_time_seconds_sum{To="REMOTE_PEER_NODE_ID"}` - } - if strings.HasPrefix(l, "etcd_network_peer_round_trip_time_seconds_count") { - mss[i].metrics[j] = `etcd_network_peer_round_trip_time_seconds_count{To="REMOTE_PEER_NODE_ID"}` - } - } - mss[i].metrics = aggSort(mss[i].metrics) - } - } - - // aggregate gRPC RPC metrics - if mss[i].name == "grpc_server_handled_total" { - pfx := `grpc_server_handled_total{grpc_code="` - codes, metrics := make(map[string]struct{}), make(map[string]struct{}) - for _, v := range mss[i].metrics { - v2 := strings.Replace(v, pfx, "", 1) - idx := strings.Index(v2, `",grpc_method="`) - code := v2[:idx] - v2 = v2[idx:] - codes[code] = struct{}{} - v2 = pfx + "CODE" + v2 - metrics[v2] = struct{}{} - } - mss[i].grpcCodes = sortMap(codes) - mss[i].metrics = sortMap(metrics) - } - - } - return mss -} diff --git a/tools/etcd-dump-metrics/utils.go b/tools/etcd-dump-metrics/utils.go deleted file mode 100644 index 6a9d093b3f2..00000000000 --- a/tools/etcd-dump-metrics/utils.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import "sort" - -func aggSort(ss []string) (sorted []string) { - set := make(map[string]struct{}) - for _, s := range ss { - set[s] = struct{}{} - } - sorted = make([]string, 0, len(set)) - for k := range set { - sorted = append(sorted, k) - } - sort.Strings(sorted) - return sorted -} - -func sortMap(set map[string]struct{}) (sorted []string) { - sorted = make([]string, 0, len(set)) - for k := range set { - sorted = append(sorted, k) - } - sort.Strings(sorted) - return sorted -} diff --git a/tools/local-tester/README.md b/tools/local-tester/README.md index cbd39e6f31e..caa346b70b9 100644 --- a/tools/local-tester/README.md +++ b/tools/local-tester/README.md @@ -1,6 +1,8 @@ # etcd local-tester -The etcd local-tester runs a fault injected cluster using local processes. It sets up an etcd cluster with unreliable network bridges on its peer and client interfaces. The cluster runs with a constant stream of `Put` requests to simulate client usage. A fault injection script periodically kills cluster members and disrupts bridge connectivity. +The etcd local-tester runs a fault injected cluster using local processes. It sets up an etcd cluster with unreliable +network bridges on its peer and client interfaces. The cluster runs with a constant stream of `Put` requests to simulate +client usage. A fault injection script periodically kills cluster members and disrupts bridge connectivity. # Requirements @@ -8,10 +10,11 @@ local-tester depends on `goreman` to manage its processes and `bash` to run faul # Building -local-tester needs `etcd`, `benchmark`, and `bridge` binaries. To build these binaries, run the following from the etcd repository root: +local-tester needs `etcd`, `benchmark`, and `bridge` binaries. To build these binaries, run the following from the etcd +repository root: ```sh -./scripts/build.sh +./build.sh pushd tools/benchmark/ && go build && popd pushd tools/local-tester/bridge && go build && popd ``` diff --git a/tools/local-tester/bridge/bridge.go b/tools/local-tester/bridge/bridge.go index c0e635c3f5f..e4ce615b658 100644 --- a/tools/local-tester/bridge/bridge.go +++ b/tools/local-tester/bridge/bridge.go @@ -19,11 +19,14 @@ import ( "flag" "fmt" "io" + "io/ioutil" "log" "math/rand" "net" "sync" "time" + + cm "github.com/ls-2018/etcd_cn/code_debug/conn" ) type bridgeConn struct { @@ -73,7 +76,7 @@ func timeBridge(b *bridgeConn) { func blackhole(b *bridgeConn) { log.Println("blackholing connection", b.String()) - io.Copy(io.Discard, b.in) + io.Copy(ioutil.Discard, b.in) b.Close() } @@ -186,8 +189,10 @@ type config struct { rxDelay string } -type acceptFaultFunc func() -type connFaultFunc func(*bridgeConn) +type ( + acceptFaultFunc func() + connFaultFunc func(*bridgeConn) +) func main() { var cfg config @@ -206,8 +211,8 @@ func main() { flag.BoolVar(&cfg.corruptSend, "corrupt-send", false, "corrupt packets sent to destination") flag.BoolVar(&cfg.reorder, "reorder", false, "reorder packet delivery") - flag.StringVar(&cfg.txDelay, "tx-delay", "0", "duration to delay client transmission to server") - flag.StringVar(&cfg.rxDelay, "rx-delay", "0", "duration to delay client receive from server") + flag.StringVar(&cfg.txDelay, "tx-delay", "0", "duration to delay client transmission to etcd") + flag.StringVar(&cfg.rxDelay, "rx-delay", "0", "duration to delay client receive from etcd") flag.Parse() @@ -238,7 +243,6 @@ func main() { log.Fatal(err) } l = newListener - } acceptFaults = append(acceptFaults, f) } @@ -303,7 +307,7 @@ func main() { if err != nil { log.Fatal(err) } - + cm.PrintConn("main", conn) r := rand.Intn(len(connFaults)) if rand.Intn(100) >= int(100.0*cfg.connFaultRate) { r = 0 diff --git a/tools/local-tester/bridge/dispatch.go b/tools/local-tester/bridge/dispatch.go index 2aae79db664..b385cefe07a 100644 --- a/tools/local-tester/bridge/dispatch.go +++ b/tools/local-tester/bridge/dispatch.go @@ -70,7 +70,7 @@ func (d *dispatcherPool) flush() { // sort by sockets; preserve the packet ordering within a socket pktmap := make(map[io.Writer][]dispatchPacket) - var outs []io.Writer + outs := []io.Writer{} for _, pkt := range pkts { opkts, ok := pktmap[pkt.out] if !ok { @@ -103,7 +103,7 @@ func (d *dispatcherPool) Copy(w io.Writer, f fetchFunc) error { return err } - var pkts []dispatchPacket + pkts := []dispatchPacket{} for len(b) > 0 { pkt := b if len(b) > dispatchPacketBytes { diff --git a/tools/mod/go.mod b/tools/mod/go.mod deleted file mode 100644 index 76679b65ec4..00000000000 --- a/tools/mod/go.mod +++ /dev/null @@ -1,90 +0,0 @@ -module go.etcd.io/etcd/tools/v3 - -go 1.19 - -require ( - github.com/alexkohler/nakedret v1.0.1 - github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 - github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e - github.com/gogo/protobuf v1.3.2 - github.com/google/addlicense v1.1.1 - github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 - github.com/hexfusion/schwag v0.0.0-20211117114134-3ceb0191ccbf - github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f - github.com/mgechev/revive v1.2.5 - github.com/mikefarah/yq/v4 v4.30.8 - go.etcd.io/gofail v0.1.0 - go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 - go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a - gotest.tools/gotestsum v1.9.0 - gotest.tools/v3 v3.4.0 - honnef.co/go/tools v0.4.0 - mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b -) - -require ( - github.com/BurntSushi/toml v1.2.1 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/a8m/envsubst v1.3.0 // indirect - github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 // indirect - github.com/alecthomas/participle/v2 v2.0.0-beta.5 // indirect - github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect - github.com/bmatcuk/doublestar/v4 v4.0.2 // indirect - github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/dnephin/pflag v1.0.7 // indirect - github.com/elliotchance/orderedmap v1.5.0 // indirect - github.com/fatih/color v1.14.1 // indirect - github.com/fatih/structtag v1.2.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.19.9 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/strfmt v0.21.0 // indirect - github.com/go-openapi/swag v0.19.15 // indirect - github.com/go-stack/stack v1.8.0 // indirect - github.com/goccy/go-json v0.10.0 // indirect - github.com/goccy/go-yaml v1.9.8 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jinzhu/copier v0.3.5 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f // indirect - go.mongodb.org/mongo-driver v1.7.3 // indirect - golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/term v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect - golang.org/x/tools v0.5.0 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect - google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/tools/mod/go.sum b/tools/mod/go.sum deleted file mode 100644 index 3173a9bdb38..00000000000 --- a/tools/mod/go.sum +++ /dev/null @@ -1,427 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/a8m/envsubst v1.3.0 h1:GmXKmVssap0YtlU3E230W98RWtWCyIZzjtf1apWWyAg= -github.com/a8m/envsubst v1.3.0/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY= -github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 h1:bYOD6QJnBJY79MJQR1i9cyQePG5oNDZXDKL2bhN/uvE= -github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19/go.mod h1:HcqyLXmWoESd/vPSbCPqvgw5l5cMM5PtoqFOnXLjSeM= -github.com/alecthomas/assert/v2 v2.0.3 h1:WKqJODfOiQG0nEJKFKzDIG3E29CN2/4zR9XGJzKIkbg= -github.com/alecthomas/participle/v2 v2.0.0-beta.5 h1:y6dsSYVb1G5eK6mgmy+BgI3Mw35a3WghArZ/Hbebrjo= -github.com/alecthomas/participle/v2 v2.0.0-beta.5/go.mod h1:RC764t6n4L8D8ITAJv0qdokritYSNR3wV5cVwmIEaMM= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alexkohler/nakedret v1.0.1 h1:cYUUKLoQ//kuZ3ww60tGjniwOOZW0cfnClt6SGX5qr0= -github.com/alexkohler/nakedret v1.0.1/go.mod h1:FIP5ubTIqmK2D35Xct6bjnYc4O027gqCYLqXLQM4xuY= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA= -github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= -github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 h1:cy5GCEZLUCshCGCRRUjxHrDUqkB4l5cuUt3ShEckQEo= -github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348/go.mod h1:f/miWtG3SSuTxKsNK3o58H1xl+XV6ZIfbC6p7lPPB8U= -github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 h1:0wUHjDfbCAROEAZ96zAJGwcNMkPIheFaIjtQyv3QqfM= -github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03/go.mod h1:uFE9hX+zXEwvyUThZ4gDb9vkAwc5DoHUnRSEpH0VrOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e h1:vHRufSa2k8tfkcDdia1vJFa+oiBvvPxW94mg76PPAoA= -github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e/go.mod h1:4xMOusJ7xxc84WclVxKT8+lNfGYDwojOUC2OQNCwcj4= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= -github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= -github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmkHx63OsBg= -github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/strfmt v0.21.0 h1:hX2qEZKmYks+t0hKeb4VTJpUm2UYsdL3+DCid5swxIs= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= -github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-yaml v1.9.8 h1:5gMyLUeU1/6zl+WFfR1hN7D2kf+1/eRGa7DFtToiBvQ= -github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/addlicense v1.1.1 h1:jpVf9qPbU8rz5MxKo7d+RMcNHkqxi4YJi/laauX4aAE= -github.com/google/addlicense v1.1.1/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 h1:BGeD3v3lyKZy+ocGtprXiDXjIiXvZDfuyII7Lym7GbQ= -github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535/go.mod h1:xV7b0Cn2irnP1jU+mMYvqPAPuFPNjtgB+rvKu/dLIz4= -github.com/hexfusion/schwag v0.0.0-20211117114134-3ceb0191ccbf h1:beD8tmKm5FqWVtm3eV4ty16y1Vu044zh8Juw7swlpr8= -github.com/hexfusion/schwag v0.0.0-20211117114134-3ceb0191ccbf/go.mod h1:wSgrm+n3LvHOVxUJo2ha5ffLqRmt6+oGoD6J/suB66c= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= -github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f h1:Kc3s6QFyh9DLgInXpWKuG+8I7R7lXbnP7mcoOVIt6KY= -github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f/go.mod h1:AmCV4WB3cDMZqgPk+OUQKumliiQS4ZYsBt3AXekyuAU= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.2.5 h1:UF9AR8pOAuwNmhXj2odp4mxv9Nx2qUIwVz8ZsU+Mbec= -github.com/mgechev/revive v1.2.5/go.mod h1:nFOXent79jMTISAfOAasKfy0Z2Ejq0WX7Qn/KAdYopI= -github.com/mikefarah/yq/v4 v4.30.8 h1:EHovseqMJs9kvE25/2k6VnDs4CrBZN+DFbybUhpPAGM= -github.com/mikefarah/yq/v4 v4.30.8/go.mod h1:8D30GDxhu3+KXll0aFV5msGcdgYRZSPOPVBTbgUQ7Dc= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f h1:92ZQJRegaqnKjz9HY9an696Sw5EmAqRv0eie/U2IE6k= -github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f/go.mod h1:wxUiQ1klFJmwnM41kQI7IT2g8jjOKbtuL54LdjkxAI0= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 h1:QQiUXlqz+d96jyNG71NE+IGTgOK6Xlhdx+PzvfbLHlQ= -go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116/go.mod h1:F9kog+iVAuvPJucb1dkYcDcbV0g4uyGEHllTP5NrXiw= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4= -go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4= -go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884 h1:fiNLklpBwWK1mth30Hlwk+fcdBmIALlgF5iy77O37Ig= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/gotestsum v1.9.0 h1:Jbo/0k/sIOXIJu51IZxEAt27n77xspFEfL6SqKUR72A= -gotest.tools/gotestsum v1.9.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= -gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.4.0 h1:lyXVV1c8wUBJRKqI8JgIpT8TW1VDagfYYaxbKa/HoL8= -honnef.co/go/tools v0.4.0/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= -mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b h1:C8Pi6noat8BcrL9WnSRYeQ63fpkJk3hKVHtF5731kIw= -mvdan.cc/unparam v0.0.0-20220316160445-06cc5682983b/go.mod h1:WqFWCt8MGPoFSYGsQSiIORRlYVhkJsIk+n2MY6rhNbA= diff --git a/tools/mod/install_all.sh b/tools/mod/install_all.sh index 2f099b5ea70..7de5fb01473 100755 --- a/tools/mod/install_all.sh +++ b/tools/mod/install_all.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash cd ./tools/mod || exit 2 -go list --tags tools -f '{{ join .Imports "\n" }}' | xargs go install +go list --tags tools -f '{{ join .Imports "\n" }}' | xargs gobin -p diff --git a/tools/mod/libs.go b/tools/mod/libs.go index 48d537e2dc0..1a1191f93ce 100644 --- a/tools/mod/libs.go +++ b/tools/mod/libs.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build libs +// +build libs // This file implements that pattern: // https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module diff --git a/tools/mod/tools.go b/tools/mod/tools.go index 5a7e375d1f9..2bb81989ca6 100644 --- a/tools/mod/tools.go +++ b/tools/mod/tools.go @@ -13,6 +13,7 @@ // limitations under the License. //go:build tools +// +build tools // This file implements that pattern: // https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module @@ -24,7 +25,6 @@ import ( _ "github.com/alexkohler/nakedret" _ "github.com/chzchzchz/goword" _ "github.com/coreos/license-bill-of-materials" - _ "github.com/google/addlicense" _ "github.com/gordonklaus/ineffassign" _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger" @@ -32,12 +32,8 @@ import ( _ "github.com/hexfusion/schwag" _ "github.com/mdempsky/unconvert" _ "github.com/mgechev/revive" - _ "github.com/mikefarah/yq/v4" - _ "go.etcd.io/gofail" + _ "github.com/mikefarah/yq/v3" _ "go.etcd.io/protodoc" - _ "go.etcd.io/raft/v3" - _ "gotest.tools/gotestsum" - _ "gotest.tools/v3" _ "honnef.co/go/tools/cmd/staticcheck" _ "mvdan.cc/unparam" ) diff --git a/tools/proto-annotations/cmd/etcd_version.go b/tools/proto-annotations/cmd/etcd_version.go deleted file mode 100644 index 466c2bfdcf5..00000000000 --- a/tools/proto-annotations/cmd/etcd_version.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "bytes" - "fmt" - "io" - "sort" - - "github.com/coreos/go-semver/semver" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - "go.etcd.io/etcd/server/v3/storage/wal" -) - -var ( - // externalPackages that are not expected to have etcd version annotation. - externalPackages = []string{"io.prometheus.client", "grpc.binarylog.v1", "google.protobuf", "google.rpc", "google.api", "raftpb"} -) - -// printEtcdVersion writes etcd_version proto annotation to stdout and returns any errors encountered when reading annotation. -func printEtcdVersion() []error { - var errs []error - annotations, err := allEtcdVersionAnnotations() - if err != nil { - errs = append(errs, err) - return errs - } - sort.Slice(annotations, func(i, j int) bool { - return annotations[i].fullName < annotations[j].fullName - }) - output := &bytes.Buffer{} - for _, a := range annotations { - newErrs := a.Validate() - if len(newErrs) == 0 { - err := a.PrintLine(output) - if err != nil { - errs = append(errs, err) - return errs - } - } - errs = append(errs, newErrs...) - } - if len(errs) == 0 { - fmt.Print(output) - } - return errs -} - -func allEtcdVersionAnnotations() (annotations []etcdVersionAnnotation, err error) { - var fileAnnotations []etcdVersionAnnotation - protoregistry.GlobalFiles.RangeFiles(func(file protoreflect.FileDescriptor) bool { - pkg := string(file.Package()) - for _, externalPkg := range externalPackages { - if pkg == externalPkg { - return true - } - } - fileAnnotations, err = fileEtcdVersionAnnotations(file) - if err != nil { - return false - } - annotations = append(annotations, fileAnnotations...) - return true - }) - return annotations, err -} - -func fileEtcdVersionAnnotations(file protoreflect.FileDescriptor) (annotations []etcdVersionAnnotation, err error) { - err = wal.VisitFileDescriptor(file, func(path protoreflect.FullName, ver *semver.Version) error { - a := etcdVersionAnnotation{fullName: path, version: ver} - annotations = append(annotations, a) - return nil - }) - return annotations, err -} - -type etcdVersionAnnotation struct { - fullName protoreflect.FullName - version *semver.Version -} - -func (a etcdVersionAnnotation) Validate() (errs []error) { - if a.version == nil { - return nil - } - if a.version.Major == 0 { - errs = append(errs, fmt.Errorf("%s: etcd_version major version should not be zero", a.fullName)) - } - if a.version.Patch != 0 { - errs = append(errs, fmt.Errorf("%s: etcd_version patch version should be zero", a.fullName)) - } - if a.version.PreRelease != "" { - errs = append(errs, fmt.Errorf("%s: etcd_version should not be prerelease", a.fullName)) - } - if a.version.Metadata != "" { - errs = append(errs, fmt.Errorf("%s: etcd_version should not have metadata", a.fullName)) - } - return errs -} - -func (a etcdVersionAnnotation) PrintLine(out io.Writer) error { - if a.version == nil { - _, err := fmt.Fprintf(out, "%s: \"\"\n", a.fullName) - return err - } - _, err := fmt.Fprintf(out, "%s: \"%d.%d\"\n", a.fullName, a.version.Major, a.version.Minor) - return err -} diff --git a/tools/proto-annotations/cmd/root.go b/tools/proto-annotations/cmd/root.go deleted file mode 100644 index 2b4ff9530a0..00000000000 --- a/tools/proto-annotations/cmd/root.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" -) - -const ( - EtcdVersionAnnotation = "etcd_version" -) - -func RootCmd() *cobra.Command { - var annotation string - cmd := &cobra.Command{ - Use: "proto-annotation", - Short: "Proto-annotations prints a dump of annotations used by all protobuf definitions used by Etcd.", - Long: `Tool used to extract values of a specific proto annotation used by protobuf definitions used by Etcd. -Created to ensure that all newly introduced proto definitions have a etcd_version_* annotation, by analysing diffs between generated by this tool. - -Proto annotations is printed to stdout in format: -: "" - - -For example: -''' -etcdserverpb.Member: "3.0" -etcdserverpb.Member.ID: "" -etcdserverpb.Member.clientURLs: "" -etcdserverpb.Member.isLearner: "3.4" -etcdserverpb.Member.name: "" -etcdserverpb.Member.peerURLs: "" -''' - -Any errors in proto will be printed to stderr. -`, - RunE: func(cmd *cobra.Command, args []string) error { - return runProtoAnnotation(annotation) - }, - } - cmd.Flags().StringVar(&annotation, "annotation", "", "Specify what proto annotation to read. Options: etcd_version") - cmd.MarkFlagRequired("annotation") - return cmd -} - -func runProtoAnnotation(annotation string) error { - var errs []error - switch annotation { - case EtcdVersionAnnotation: - errs = printEtcdVersion() - default: - return fmt.Errorf("unknown annotation %q. Options: %q", annotation, EtcdVersionAnnotation) - } - if len(errs) != 0 { - for _, err := range errs { - fmt.Fprintln(os.Stderr, err) - } - return fmt.Errorf("failed reading anotation") - } - return nil -} diff --git a/tools/proto-annotations/main.go b/tools/proto-annotations/main.go deleted file mode 100644 index a350ef82b92..00000000000 --- a/tools/proto-annotations/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2021 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - "os" - - "go.etcd.io/etcd/v3/tools/proto-annotations/cmd" -) - -func main() { - if err := cmd.RootCmd().Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} diff --git a/tools/rw-heatmaps/README.md b/tools/rw-heatmaps/README.md deleted file mode 100644 index 893ea9871cd..00000000000 --- a/tools/rw-heatmaps/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# etcd/tools/rw-heatmaps - -`etcd/tools/rw-heatmaps` is the mixed read/write performance evaluation tool for etcd clusters. - -## Execute - -### Benchmark -To get a mixed read/write performance evaluation result: -```sh -# run with default configurations and specify the working directory -./rw-benchmark.sh -w ${WORKING_DIR} -``` -`rw-benchmark.sh` will automatically use the etcd binary compiled under `etcd/bin/` directory. - -Note: the result csv file will be saved to current working directory. The working directory is where etcd database is saved. The working directory is designed for scenarios where a different mounted disk is preferred. - -### Plot Graphs -To generate two images (read and write) based on the benchmark result csv file: -```sh -# to generate a pair of read & write images from one data csv file -./plot_data.py ${FIRST_CSV_FILE} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME} - - -# to generate a pair of read & write images by comparing two data csv files -./plot_data.py ${FIRST_CSV_FILE} ${SECOND_CSV_FILE} -t ${IMAGE_TITLE} -o ${OUTPUT_IMAGE_NAME} -``` diff --git a/tools/rw-heatmaps/plot_data.py b/tools/rw-heatmaps/plot_data.py deleted file mode 100755 index 217eb6f8a8b..00000000000 --- a/tools/rw-heatmaps/plot_data.py +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env python3 -import sys -import os -import argparse -import logging -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.colors as colors - -logging.basicConfig(format='[%(levelname)s %(asctime)s %(name)s] %(message)s') -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -params = None - - -def parse_args(): - parser = argparse.ArgumentParser( - description='plot graph using mixed read/write result file.') - parser.add_argument('input_file_a', type=str, - help='first input data files in csv format. (required)') - parser.add_argument('input_file_b', type=str, nargs='?', - help='second input data files in csv format. (optional)') - parser.add_argument('-t', '--title', dest='title', type=str, required=True, - help='plot graph title string') - parser.add_argument('-z', '--zero-centered', dest='zero', action='store_true', required=False, - help='plot the improvement graph with white color represents 0.0', - default=True) - parser.add_argument('--no-zero-centered', dest='zero', action='store_false', required=False, - help='plot the improvement graph without white color represents 0.0') - parser.add_argument('-o', '--output-image-file', dest='output', type=str, required=True, - help='output image filename') - parser.add_argument('-F', '--output-format', dest='format', type=str, default='png', - help='output image file format. default: jpg') - return parser.parse_args() - - -def load_data_files(*args): - df_list = [] - try: - for i in args: - if i is not None: - logger.debug('loading csv file {}'.format(i)) - df_list.append(pd.read_csv(i)) - except FileNotFoundError as e: - logger.error(str(e)) - sys.exit(1) - res = [] - try: - for df in df_list: - param_df = df[df['type'] == 'PARAM'] - param_str = '' - if len(param_df) != 0: - param_str = param_df['comment'].iloc[0] - new_df = df[df['type'] == 'DATA'][[ - 'ratio', 'conn_size', 'value_size']].copy() - cols = [x for x in df.columns if x.find('iter') != -1] - tmp = [df[df['type'] == 'DATA'][x].str.split(':') for x in cols] - - read_df = [x.apply(lambda x: float(x[0])) for x in tmp] - read_avg = sum(read_df) / len(read_df) - new_df['read'] = read_avg - - write_df = [x.apply(lambda x: float(x[1])) for x in tmp] - write_avg = sum(write_df) / len(write_df) - new_df['write'] = write_avg - - new_df['ratio'] = new_df['ratio'].astype(float) - new_df['conn_size'] = new_df['conn_size'].astype(int) - new_df['value_size'] = new_df['value_size'].astype(int) - res.append({ - 'dataframe': new_df, - 'param': param_str - }) - except Exception as e: - logger.error(str(e)) - sys.exit(1) - return res - - -# This is copied directly from matplotlib source code. Some early versions of matplotlib -# do not have CenteredNorm class -class CenteredNorm(colors.Normalize): - - def __init__(self, vcenter=0, halfrange=None, clip=False): - """ - Normalize symmetrical data around a center (0 by default). - - Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change - around the center. - - Useful when mapping symmetrical data around a conceptual center - e.g., data that range from -2 to 4, with 0 as the midpoint, and - with equal rates of change around that midpoint. - - Parameters - ---------- - vcenter : float, default: 0 - The data value that defines ``0.5`` in the normalization. - halfrange : float, optional - The range of data values that defines a range of ``0.5`` in the - normalization, so that *vcenter* - *halfrange* is ``0.0`` and - *vcenter* + *halfrange* is ``1.0`` in the normalization. - Defaults to the largest absolute difference to *vcenter* for - the values in the dataset. - - Examples - -------- - This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0 - (assuming equal rates of change above and below 0.0): - - >>> import matplotlib.colors as mcolors - >>> norm = mcolors.CenteredNorm(halfrange=4.0) - >>> data = [-2., 0., 4.] - >>> norm(data) - array([0.25, 0.5 , 1. ]) - """ - self._vcenter = vcenter - self.vmin = None - self.vmax = None - # calling the halfrange setter to set vmin and vmax - self.halfrange = halfrange - self.clip = clip - - def _set_vmin_vmax(self): - """ - Set *vmin* and *vmax* based on *vcenter* and *halfrange*. - """ - self.vmax = self._vcenter + self._halfrange - self.vmin = self._vcenter - self._halfrange - - def autoscale(self, A): - """ - Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*. - """ - A = np.asanyarray(A) - self._halfrange = max(self._vcenter-A.min(), - A.max()-self._vcenter) - self._set_vmin_vmax() - - def autoscale_None(self, A): - """Set *vmin* and *vmax*.""" - A = np.asanyarray(A) - if self._halfrange is None and A.size: - self.autoscale(A) - - @property - def vcenter(self): - return self._vcenter - - @vcenter.setter - def vcenter(self, vcenter): - self._vcenter = vcenter - if self.vmax is not None: - # recompute halfrange assuming vmin and vmax represent - # min and max of data - self._halfrange = max(self._vcenter-self.vmin, - self.vmax-self._vcenter) - self._set_vmin_vmax() - - @property - def halfrange(self): - return self._halfrange - - @halfrange.setter - def halfrange(self, halfrange): - if halfrange is None: - self._halfrange = None - self.vmin = None - self.vmax = None - else: - self._halfrange = abs(halfrange) - - def __call__(self, value, clip=None): - if self._halfrange is not None: - # enforce symmetry, reset vmin and vmax - self._set_vmin_vmax() - return super().__call__(value, clip=clip) - - -# plot type is the type of the data to plot. Either 'read' or 'write' -def plot_data(title, plot_type, cmap_name_default, *args): - if len(args) == 1: - fig_size = (12, 16) - df0 = args[0]['dataframe'] - df0param = args[0]['param'] - fig = plt.figure(figsize=fig_size) - count = 0 - for val, df in df0.groupby('ratio'): - count += 1 - plt.subplot(4, 2, count) - plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type]) - plt.title('R/W Ratio {:.4f} [{:.2f}, {:.2f}]'.format(val, df[plot_type].min(), - df[plot_type].max())) - plt.yscale('log', base=2) - plt.ylabel('Value Size') - plt.xscale('log', base=2) - plt.xlabel('Connections Amount') - plt.colorbar() - plt.tight_layout() - fig.suptitle('{} [{}]\n{}'.format(title, plot_type.upper(), df0param)) - elif len(args) == 2: - fig_size = (12, 26) - df0 = args[0]['dataframe'] - df0param = args[0]['param'] - df1 = args[1]['dataframe'] - df1param = args[1]['param'] - fig = plt.figure(figsize=fig_size) - col = 0 - delta_df = df1.copy() - delta_df[[plot_type]] = ((df1[[plot_type]] - df0[[plot_type]]) / - df0[[plot_type]]) * 100 - for tmp in [df0, df1, delta_df]: - row = 0 - for val, df in tmp.groupby('ratio'): - pos = row * 3 + col + 1 - plt.subplot(8, 3, pos) - norm = None - if col == 2: - cmap_name = 'bwr' - if params.zero: - norm = CenteredNorm() - else: - cmap_name = cmap_name_default - plt.tripcolor(df['conn_size'], df['value_size'], df[plot_type], - norm=norm, - cmap=plt.get_cmap(cmap_name)) - if row == 0: - if col == 0: - plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format( - os.path.basename(params.input_file_a), - val, df[plot_type].min(), df[plot_type].max())) - elif col == 1: - plt.title('{}\nR/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format( - os.path.basename(params.input_file_b), - val, df[plot_type].min(), df[plot_type].max())) - elif col == 2: - plt.title('Gain\nR/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(), - df[plot_type].max())) - else: - if col == 2: - plt.title('R/W Ratio {:.4f} [{:.2f}%, {:.2f}%]'.format(val, df[plot_type].min(), - df[plot_type].max())) - else: - plt.title('R/W Ratio {:.4f} [{:.1f}, {:.1f}]'.format(val, df[plot_type].min(), - df[plot_type].max())) - plt.yscale('log', base=2) - plt.ylabel('Value Size') - plt.xscale('log', base=2) - plt.xlabel('Connections Amount') - - if col == 2: - plt.colorbar(format='%.2f%%') - else: - plt.colorbar() - plt.tight_layout() - row += 1 - col += 1 - fig.suptitle('{} [{}]\n{} {}\n{} {}'.format( - title, plot_type.upper(), os.path.basename(params.input_file_a), df0param, - os.path.basename(params.input_file_b), df1param)) - else: - raise Exception('invalid plot input data') - fig.subplots_adjust(top=0.93) - plt.savefig("{}_{}.{}".format(params.output, plot_type, - params.format), format=params.format) - - -def main(): - global params - logging.basicConfig() - params = parse_args() - result = load_data_files(params.input_file_a, params.input_file_b) - for i in [('read', 'viridis'), ('write', 'plasma')]: - plot_type, cmap_name = i - plot_data(params.title, plot_type, cmap_name, *result) - - -if __name__ == '__main__': - main() diff --git a/tools/rw-heatmaps/rw-benchmark.sh b/tools/rw-heatmaps/rw-benchmark.sh deleted file mode 100755 index 66d89cdd8a5..00000000000 --- a/tools/rw-heatmaps/rw-benchmark.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/bash - -#set -x - -RATIO_LIST="1/128 1/8 1/4 1/2 2/1 4/1 8/1 128/1" -VALUE_SIZE_POWER_RANGE="8 14" -CONN_CLI_COUNT_POWER_RANGE="5 11" -REPEAT_COUNT=5 -RUN_COUNT=200000 - -KEY_SIZE=256 -KEY_SPACE_SIZE=$((1024 * 64)) -BACKEND_SIZE="$((20 * 1024 * 1024 * 1024))" -RANGE_RESULT_LIMIT=100 -CLIENT_PORT="23790" - -COMMIT= - -ETCD_ROOT_DIR="$(cd $(dirname $0) && pwd)/../.." -ETCD_BIN_DIR="${ETCD_ROOT_DIR}/bin" -ETCD_BIN="${ETCD_BIN_DIR}/etcd" -ETCD_BM_BIN="${ETCD_BIN_DIR}/tools/benchmark" - -WORKING_DIR="$(mktemp -d)" -CURRENT_DIR="$(pwd -P)" -OUTPUT_FILE="${CURRENT_DIR}/result-$(date '+%Y%m%d%H%M').csv" - -trap ctrl_c INT - -CURRENT_ETCD_PID= - -function ctrl_c() { - # capture ctrl-c and kill server - echo "terminating..." - kill_etcd_server ${CURRENT_ETCD_PID} - exit 0 -} - -function quit() { - if [ ! -z ${CURRENT_ETCD_PID} ]; then - kill_etcd_server ${CURRENT_ETCD_PID} - fi - exit $1 -} - -function check_prerequisite() { - # check initial parameters - if [ -f "${OUTPUT_FILE}" ]; then - echo "file ${OUTPUT_FILE} already exists." - exit 1 - fi - pushd ${ETCD_ROOT_DIR} > /dev/null - COMMIT=$(git log --pretty=format:'%h' -n 1) - if [ $? -ne 0 ]; then - COMMIT=N/A - fi - popd > /dev/null - cat >"${OUTPUT_FILE}" </dev/null & - return $! -} - -function init_etcd_db() { - #initialize etcd database - if [ ! -x ${ETCD_BM_BIN} ]; then - echo "no etcd benchmark binary found at: ${ETCD_BM_BIN}" - quit -1 - fi - echo "initialize etcd database..." - ${ETCD_BM_BIN} put --sequential-keys \ - --key-space-size=${KEY_SPACE_SIZE} \ - --val-size=${VALUE_SIZE} --key-size=${KEY_SIZE} \ - --endpoints http://127.0.0.1:${CLIENT_PORT} \ - --total=${KEY_SPACE_SIZE} \ - &>/dev/null -} - -function kill_etcd_server() { - # kill etcd server - ETCD_PID=$1 - if [ -z "$(ps aux | grep etcd | awk "{print \$2}")" ]; then - echo "failed to find the etcd instance to kill: ${ETCD_PID}" - return - fi - echo "kill etcd server instance" - kill -9 ${ETCD_PID} - wait ${ETCD_PID} 2>/dev/null - sleep 5 -} - - -while getopts ":w:c:p:l:vh" OPTION; do - case $OPTION in - h) - echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2 - exit 1 - ;; - w) - WORKING_DIR="${OPTARG}" - ;; - c) - RUN_COUNT="${OPTARG}" - ;; - p) - CLIENT_PORT="${OPTARG}" - ;; - v) - set -x - ;; - l) - RANGE_RESULT_LIMIT="${OPTARG}" - ;; - \?) - echo "usage: $(basename $0) [-h] [-w WORKING_DIR] [-c RUN_COUNT] [-p PORT] [-l RANGE_QUERY_LIMIT] [-v]" >&2 - exit 1 - ;; - esac -done -shift "$((${OPTIND} - 1))" - -check_prerequisite - -pushd "${WORKING_DIR}" > /dev/null - -# progress stats management -ITER_TOTAL=$(($(echo ${RATIO_LIST} | wc | awk "{print \$2}") * \ - $(seq ${VALUE_SIZE_POWER_RANGE} | wc | awk "{print \$2}") * \ - $(seq ${CONN_CLI_COUNT_POWER_RANGE} | wc | awk "{print \$2}"))) -ITER_CURRENT=0 -PERCENTAGE_LAST_PRINT=0 -PERCENTAGE_PRINT_THRESHOLD=5 - -for RATIO_STR in ${RATIO_LIST}; do - RATIO=$(echo "scale=4; ${RATIO_STR}" | bc -l) - for VALUE_SIZE_POWER in $(seq ${VALUE_SIZE_POWER_RANGE}); do - VALUE_SIZE=$((2 ** ${VALUE_SIZE_POWER})) - for CONN_CLI_COUNT_POWER in $(seq ${CONN_CLI_COUNT_POWER_RANGE}); do - - # progress stats management - ITER_CURRENT=$((${ITER_CURRENT} + 1)) - PERCENTAGE_CURRENT=$(echo "scale=3; ${ITER_CURRENT}/${ITER_TOTAL}*100" | bc -l) - if [ "$(echo "${PERCENTAGE_CURRENT} - ${PERCENTAGE_LAST_PRINT} > ${PERCENTAGE_PRINT_THRESHOLD}" | - bc -l)" -eq 1 ]; then - PERCENTAGE_LAST_PRINT=${PERCENTAGE_CURRENT} - echo "${PERCENTAGE_CURRENT}% completed" - fi - - CONN_CLI_COUNT=$((2 ** ${CONN_CLI_COUNT_POWER})) - - run_etcd_server - CURRENT_ETCD_PID=$! - sleep 5 - - init_etcd_db - - START=$(date +%s) - LINE="DATA,${RATIO},${CONN_CLI_COUNT},${VALUE_SIZE}" - echo -n "run with setting [${LINE}]" - for i in $(seq ${REPEAT_COUNT}); do - echo -n "." - QPS=$(${ETCD_BM_BIN} txn-mixed "" \ - --conns=${CONN_CLI_COUNT} --clients=${CONN_CLI_COUNT} \ - --total=${RUN_COUNT} \ - --endpoints "http://127.0.0.1:${CLIENT_PORT}" \ - --rw-ratio ${RATIO} --limit ${RANGE_RESULT_LIMIT} \ - --val-size ${VALUE_SIZE} \ - 2>/dev/null | grep "Requests/sec" | awk "{print \$2}") - if [ $? -ne 0 ]; then - echo "benchmark command failed: $?" - quit -1 - fi - RD_QPS=$(echo -e "${QPS}" | sed -n '1 p') - WR_QPS=$(echo -e "${QPS}" | sed -n '2 p') - if [ -z "${RD_QPS}" ]; then - RD_QPS=0 - fi - if [ -z "${WR_QPS}" ]; then - WR_QPS=0 - fi - LINE="${LINE},${RD_QPS}:${WR_QPS}" - done - LINE="${LINE}," - END=$(date +%s) - DIFF=$((${END} - ${START})) - echo "took ${DIFF} seconds" - - cat >>"${OUTPUT_FILE}" < /dev/null